repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
pytorch/pytorch | 79,315 | pytorch__pytorch-79315 | [
"74016"
] | dee3dc6070f09370c33b30776ad3613ff6bbf3fc | diff --git a/torch/storage.py b/torch/storage.py
--- a/torch/storage.py
+++ b/torch/storage.py
@@ -63,6 +63,8 @@ def is_shared(self) -> bool: ... # noqa: E704
@classmethod
def _new_shared_cuda(cls, *args, **kwargs) -> T: ... # noqa: E704
def _shared_incref(self, *args, **kwargs): ... # noqa: E704
+ @classmethod
+ def _free_weak_ref(cls, *args, **kwargs): ... # noqa: E704
def __str__(self):
data_str = ' ' + '\n '.join(str(self[i]) for i in range(self.size()))
@@ -633,7 +635,7 @@ def resize_(self, size):
@classmethod
def _free_weak_ref(cls, *args, **kwargs):
- return eval(cls.__module__)._UntypedStorage._free_weak_ref(*args, **kwargs)
+ return _UntypedStorage._free_weak_ref(*args, **kwargs)
def _weak_ref(self, *args, **kwargs):
return self._storage._weak_ref(*args, **kwargs)
| diff --git a/test/distributed/rpc/test_share_memory.py b/test/distributed/rpc/test_share_memory.py
new file mode 100644
--- /dev/null
+++ b/test/distributed/rpc/test_share_memory.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+# Owner(s): ["oncall: distributed"]
+
+import torch
+import torch.distributed as dist
+
+if not dist.is_available():
+ print("Distributed not available, skipping tests", file=sys.stderr)
+ sys.exit(0)
+
+import copyreg
+import os
+import contextlib
+
+from torch import multiprocessing
+import torch.multiprocessing.reductions as TorchMpReductions
+import torch.distributed.rpc as rpc
+from torch.distributed.rpc.internal import _InternalRPCPickler
+from torch.distributed.rpc.api import _use_rpc_pickler
+from torch.testing._internal.common_utils import TestCase, run_tests
+
[email protected]
+def fs_sharing():
+ prev_strategy = multiprocessing.get_sharing_strategy()
+ multiprocessing.set_sharing_strategy('file_system')
+ try:
+ yield
+ finally:
+ multiprocessing.set_sharing_strategy(prev_strategy)
+
+class ShareMemoryRPCPickler(_InternalRPCPickler):
+ def __init__(self) -> None:
+ super().__init__()
+ self._dispatch_table
+ # pyre-fixme[4]: Attribute must be annotated.
+ self._dispatch_table = copyreg.dispatch_table.copy()
+
+ for t in torch._storage_classes:
+ self._dispatch_table[t] = TorchMpReductions.reduce_storage
+
+ for t in torch._tensor_classes:
+ self._dispatch_table[t] = TorchMpReductions.reduce_tensor
+ self._dispatch_table[torch.Tensor] = TorchMpReductions.reduce_tensor
+ self._dispatch_table[
+ torch.nn.parameter.Parameter
+ ] = TorchMpReductions.reduce_tensor
+
+def worker_loop(a):
+ rpc.init_rpc('worker1', rank=1, world_size=2)
+ rpc.shutdown()
+
+def worker_fn(m):
+ pass
+
+class TestRPCPickler(TestCase):
+ def setUp(self):
+ super().setUp()
+
+ def test_case(self):
+ os.environ['MASTER_ADDR'] = 'localhost'
+ os.environ['MASTER_PORT'] = '29500'
+
+ with fs_sharing():
+ r = multiprocessing.spawn(worker_loop, join=False)
+
+ try:
+ with _use_rpc_pickler(ShareMemoryRPCPickler()):
+ rpc.init_rpc(
+ 'worker0',
+ rank=0,
+ world_size=2)
+ m = torch.nn.Linear(1, 2)
+ m.share_memory()
+ rref = rpc.remote(
+ 'worker1',
+ worker_fn,
+ args=(m,))
+
+ rref.to_here()
+ finally:
+ rpc.shutdown()
+ r.join()
+
+if __name__ == '__main__':
+ run_tests()
diff --git a/test/run_test.py b/test/run_test.py
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -176,6 +176,7 @@ def skip_test_p(name: str) -> bool:
"distributed/nn/jit/test_instantiator",
"distributed/rpc/test_faulty_agent",
"distributed/rpc/test_tensorpipe_agent",
+ "distributed/rpc/test_share_memory",
"distributed/rpc/cuda/test_tensorpipe_agent",
"distributed/pipeline/sync/skip/test_api",
"distributed/pipeline/sync/skip/test_gpipe",
@@ -227,6 +228,7 @@ def skip_test_p(name: str) -> bool:
"distributed/nn/jit/test_instantiator",
"distributed/rpc/test_faulty_agent",
"distributed/rpc/test_tensorpipe_agent",
+ "distributed/rpc/test_share_memory",
"distributed/rpc/cuda/test_tensorpipe_agent",
"distributed/_shard/checkpoint/test_checkpoint"
"distributed/_shard/checkpoint/test_file_system_checkpoint"
@@ -612,6 +614,7 @@ def test_distributed(test_module, test_directory, options):
"distributed/test_pg_wrapper": get_run_test_with_subprocess_fn(),
"distributed/rpc/test_faulty_agent": get_run_test_with_subprocess_fn(),
"distributed/rpc/test_tensorpipe_agent": get_run_test_with_subprocess_fn(),
+ "distributed/rpc/test_share_memory": get_run_test_with_subprocess_fn(),
"distributed/rpc/cuda/test_tensorpipe_agent": get_run_test_with_subprocess_fn(),
}
| AttributeError: 'NoneType' object has no attribute '_free_weak_ref'
### 🐛 Describe the bug
https://github.com/pytorch/pytorch/pull/66970#issuecomment-1063643284 produces these warnings
```
(/home/ezyang/local/pytorch-tmp-env) [[email protected] ~/local/labs] MASTER_ADDR=localhost MASTER_PORT=29500 python worker0.py
tensor([-0.6383, 0.2935, 0.6883, -0.9899], requires_grad=True)
Exception ignored in: <function StorageWeakRef.__del__ at 0x7f15538914c0>
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/multiprocessing/reductions.py", line 36, in __del__
File "/data/users/ezyang/pytorch-tmp/torch/storage.py", line 520, in _free_weak_ref
AttributeError: 'NoneType' object has no attribute '_free_weak_ref'
Exception ignored in: <function StorageWeakRef.__del__ at 0x7f15538914c0>
Traceback (most recent call last):
File "/data/users/ezyang/pytorch-tmp/torch/multiprocessing/reductions.py", line 36, in __del__
File "/data/users/ezyang/pytorch-tmp/torch/storage.py", line 520, in _free_weak_ref
AttributeError: 'NoneType' object has no attribute '_free_weak_ref'
(/home/ezyang/local/pytorch-tmp-env) [[email protected] ~/local/labs] echo $?
0
```
this shouldn't happen. Investigate.
### Versions
master
cc @ezyang @bhosmer @smessmer @ljk53 @bdhirsh
| @ezyang started seeing this in YOLOv5 CI recently:
https://github.com/ultralytics/yolov5/runs/5541120744?check_suite_focus=true
<img width="1788" alt="Screenshot 2022-03-14 at 17 55 30" src="https://user-images.githubusercontent.com/26833433/158222024-4b3b5389-515c-474c-97f8-95c17c054a8b.png">
Sorry I' m a starter. It seems like this warning makes no influence on the training result. (Forget about my bad English : )
well the proximal cause of the problem is `__del__` is being run during process shutdown so the class has already been deleted
During the process I solve another problem, I found that this warning disappear and it didn't raise any error after this operation: You may find 1 libiomp5md.dll in your virtual environment like D:\ANACONDA\envs\yolov5\Lib\site-packages, but another in D:\ANACONDA\envs\yolov5\Library\lib or somewhere in Library. Just move the second one to other place. This could be helpful in some cases like memory error.
> During the process I solve another problem, I found that this warning disappear and it didn't raise any error after this operation: You may find 1 libiomp5md.dll in your virtual environment like D:\ANACONDA\envs\yolov5\Lib\site-packages, but another in D:\ANACONDA\envs\yolov5\Library\lib or somewhere in Library. Just move the second one to other place. This could be helpful in some cases like memory error.
Sry, I failed.
same error
Pytorch1.11has same error, howerver you need to degrade pytorch version to 1.10!
A basic example using the `dataloader` is giving me the error. This happens when using multithreading. When running in the main process, things are OK.
```python
import torch
from torch.utils.data import TensorDataset, DataLoader
print("Torch version:", torch.__version__)
x = torch.arange(12).reshape(6, 2)
ds = TensorDataset(x)
dl = DataLoader(ds, num_workers=8)
for x in dl:
print(x)
```
Gives output
```
Torch version: 1.11.0
[tensor([[0, 1]])]
[tensor([[2, 3]])]
[tensor([[4, 5]])]
[tensor([[6, 7]])]
[tensor([[8, 9]])]
[tensor([[10, 11]])]
Exception ignored in: <function StorageWeakRef.__del__ at 0x7f8c7695daf0>
Traceback (most recent call last):
File "anaconda3/lib/python3.9/site-packages/torch/multiprocessing/reductions.py", line 36, in __del__
File "anaconda3/lib/python3.9/site-packages/torch/storage.py", line 520, in _free_weak_ref
AttributeError: 'NoneType' object has no attribute '_free_weak_ref'
```
I am facing the same issue while training my YOLOV5 model.
`Exception ignored in: <function StorageWeakRef.__del__ at 0x00000248E64DF700>
Traceback (most recent call last):
File "miniconda3\envs\yo1\lib\site-packages\torch\multiprocessing\reductions.py", line 36, in __del__
File "miniconda3\envs\yo1\lib\site-packages\torch\storage.py", line 520, in _free_weak_ref
AttributeError: 'NoneType' object has no attribute '_free_weak_ref'`
I also had the same problem while training my model. This problem does not occur if the parameter of workers is set to 0
Same problem
@kurtamohler I think this should be easier to fix once torch/csrc/generic/StorageSharing.cpp becomes not generic. Then we can just refer to the C binding directly and... hopefully that fixes the module unloading ordering issue.
I can look into it after #75459
i am running into the same issue "'NoneType' object has no attribute '_free_weak_ref'" while training yolov5 model on custom data.
Any update on resolution?
hi guys
sorry my english not very well
ı uninstallpytorch 11.3
ı installed 10.0 and error fixed
conda install pytorch==1.10.0 torchvision==0.11.0 torchaudio==0.10.0 cudatoolkit=10.2 -c pytorch
Still present in `torch=1.11.0`
I've solved the error by installing torch 1.10.1 with along cuda 11.1.Following command was executed
(pip install torch==1.10.1+cu111 torchvision==0.11.2+cu111 torchaudio==0.10.1 -f https://download.pytorch.org/whl/torch_stable.html)
ERROR: Could not find a version that satisfies the requirement torchvision==0.11.2+cu111 (from versions: 0.1.6, 0.1.7, 0.1.8, 0.1.9, 0.2.0, 0.2.1, 0.2.2, 0.2.2.post2, 0.2.2.post3, 0.8.2, 0.8.2+cpu, 0.8.2+cu101, 0.8.2+cu110, 0.9.0, 0.9.0+cpu, 0.9.0+cu101, 0.9.0+cu111, 0.9.1, 0.9.1+cpu, 0.9.1+cu101, 0.9.1+cu102, 0.9.1+cu111, 0.10.0, 0.10.0+cpu, 0.10.0+cu102, 0.10.0+cu111, 0.10.1, 0.10.1+cpu, 0.10.1+cu102, 0.10.1+cu111, 0.11.0, 0.11.0+cpu, 0.11.0+cu102, 0.11.0+cu113, 0.11.1, 0.11.1+cpu, 0.11.1+cu102, 0.11.1+cu113, 0.11.2, 0.11.2+cpu, 0.11.2+cu102, 0.11.2+cu113, 0.11.3, 0.11.3+cpu, 0.11.3+cu102, 0.11.3+cu113, 0.12.0, 0.12.0+cpu, 0.12.0+cu113, 0.12.0+cu115)
ERROR: No matching distribution found for torchvision==0.11.2+cu111
> I've solved the error by installing torch 1.10.1 with along cuda 11.1.Following command was executed (pip install torch==1.10.1+cu111 torchvision==0.11.2+cu111 torchaudio==0.10.1 -f https://download.pytorch.org/whl/torch_stable.html)
thank you very much!
@glenn-jocher I currently get this problem on the visDrone Dataset. Any new on this or any better solutions? I installed a older torch version and that did not help. Not sure what @iyxv meant with the workers parameter.
Error during training with:
`!python train.py --data VisDrone.yaml --cfg yolov5s.yaml --epochs 20 --batch-size 128 --name visDrone_Model_yolov5s`
```
Traceback (most recent call last):
File "c:\Users\Tobias\anaconda3\lib\site-packages\torch\multiprocessing\reductions.py", line 36, in __del__
File "c:\Users\Tobias\anaconda3\lib\site-packages\torch\storage.py", line 520, in _free_weak_ref
AttributeError: 'NoneType' object has no attribute '_free_weak_ref'
```
I also tried to playing around with the epoch/batch count and it still occurred randomly.
Edit: batchsize 16 seems to make it occure less atleast but still happened after 9/10 Epochs :(
I will start working on fixing this now
Edit: Actually, probably going to wait until #78032 is merged
@kurtamohler thank you! This has been a real headache for YOLOv5 users the last couple months. | 2022-06-10T20:45:03 |
pytorch/pytorch | 79,550 | pytorch__pytorch-79550 | [
"79449"
] | 7eef78263671a6d5e9317731ce6ae53ac22847c2 | diff --git a/torch/utils/data/dataloader.py b/torch/utils/data/dataloader.py
--- a/torch/utils/data/dataloader.py
+++ b/torch/utils/data/dataloader.py
@@ -256,6 +256,7 @@ def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1,
torch.utils.data.graph_settings.apply_sharding(self.dataset, ws, rank)
elif isinstance(self.dataset, MapDataPipe):
self.dataset = _MapDataPipeSerializationWrapper(self.dataset)
+ ws, rank = _get_distributed_settings()
if num_workers > 0:
self.worker_init_fn = functools.partial(
_sharding_worker_init_fn, self.worker_init_fn, ws, rank)
| UnboundLocalError: local variable 'ws' referenced before assignment
https://github.com/pytorch/pytorch/blob/c10908cd41f6fe18a4d61704d7013d46bb05aeaf/torch/utils/data/dataloader.py#L261
`ws` is not defined within the elif statement.
cc @VitalyFedyunin @ejguan @NivekT
| cc: @ejguan to take a look
Thank you finding this bug. Sending a patch now. | 2022-06-14T19:10:59 |
|
pytorch/pytorch | 79,890 | pytorch__pytorch-79890 | [
"79828"
] | 50090861500adfd7da975a875aca0df636956df4 | diff --git a/torch/utils/data/dataloader.py b/torch/utils/data/dataloader.py
--- a/torch/utils/data/dataloader.py
+++ b/torch/utils/data/dataloader.py
@@ -561,21 +561,28 @@ def _create_warning_msg(num_worker_suggest, num_worker_created, cpuset_checked):
def _get_shared_seed(self):
if isinstance(self.dataset, IterDataPipe):
- _shared_tensor_seed = torch.empty((), dtype=torch.int64).random_(generator=self.generator)
+ _shared_seed = torch.empty((), dtype=torch.int64).random_(generator=self.generator).item()
if dist.is_available() and dist.is_initialized():
rank = dist.get_rank()
+ ws = dist.get_world_size()
+ store = dist.distributed_c10d._get_default_store()
if rank == 0:
- ws = dist.get_world_size()
- reqs = []
- for rank_id in range(1, ws):
- req = dist.isend(tensor=_shared_tensor_seed, dst=rank_id, tag=rank_id)
- reqs.append(req)
- for req in reqs:
- req.wait()
+ store.set("_dl_shared_seed", str(_shared_seed))
+ # Reset after all distributed processes have received the shared seed
+ store.add("_dl_shared_seed_recv_cnt", 1)
+ _shared_seed_recv_cnt = 1
+ while _shared_seed_recv_cnt != ws:
+ _shared_seed_recv_cnt = int(store.get("_dl_shared_seed_recv_cnt"))
+ store.set("_dl_shared_seed", "")
+ store.add("_dl_shared_seed_recv_cnt", -ws)
+ assert int(store.get("_dl_shared_seed_recv_cnt")) == 0
else:
- dist.recv(tensor=_shared_tensor_seed, src=0, tag=rank)
- _shared_seed = _shared_tensor_seed.item()
- del _shared_tensor_seed
+ _shared_seed_str = ""
+ store.wait(["_dl_shared_seed"], _utils.MP_STATUS_CHECK_INTERVAL)
+ while len(_shared_seed_str) == 0:
+ _shared_seed_str = store.get("_dl_shared_seed")
+ store.add("_dl_shared_seed_recv_cnt", 1)
+ _shared_seed = int(_shared_seed_str)
return _shared_seed
else:
return None
| Distributed shared seed in DataLoader should be moved to cuda with NCCL Process Group
### 🐛 Describe the bug
After this https://github.com/pytorch/pytorch/pull/78765 is landed, when running training script in multi-GPUs environment with `DataLoader` + `DataPipe`, there will an Error raised `RuntimeError: Tensors must be CUDA and dense`
See the user reported issue: https://fburl.com/teyw5gen
### Versions
nightly
cc @SsnL @VitalyFedyunin @ejguan @NivekT @pietern @mrshenli @pritamdamania87 @zhaojuanmao @satgera @rohan-varma @gqchen @aazzolini @osalpekar @jiayisuse @SciPioneer @H-Huang @kwen2501
| 2022-06-20T19:32:32 |
||
pytorch/pytorch | 81,867 | pytorch__pytorch-81867 | [
"80733"
] | 67ece03c8cd632cce9523cd96efde6f2d1cc8121 | diff --git a/torch/storage.py b/torch/storage.py
--- a/torch/storage.py
+++ b/torch/storage.py
@@ -16,7 +16,6 @@
T = TypeVar('T', bound='Union[_StorageBase, _TypedStorage]')
class _StorageBase(object):
_cdata: Any
- is_cuda: bool = False
is_sparse: bool = False
is_sparse_csr: bool = False
device: torch.device
@@ -65,6 +64,8 @@ def _new_shared_cuda(cls, *args, **kwargs) -> T: ... # noqa: E704
def _shared_incref(self, *args, **kwargs): ... # noqa: E704
@classmethod
def _free_weak_ref(cls, *args, **kwargs): ... # noqa: E704
+ @property
+ def is_cuda(self): ... # noqa: E704
def __str__(self):
data_str = ' ' + '\n '.join(str(self[i]) for i in range(self.size()))
@@ -213,6 +214,9 @@ def _untyped(self):
class _UntypedStorage(torch._C.StorageBase, _StorageBase):
pass
+ @property
+ def is_cuda(self):
+ return self.device.type == 'cuda'
def _load_from_bytes(b):
return torch.load(io.BytesIO(b))
@@ -428,7 +432,7 @@ def __init__(self, *args, device=None, dtype=None, wrap_storage=None):
@property
def is_cuda(self):
- return self._storage.device.type == 'cuda'
+ return self.device.type == 'cuda'
def _untyped(self):
return self._storage
| diff --git a/test/test_torch.py b/test/test_torch.py
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -265,6 +265,14 @@ def test_set_storage(self, device, dtype):
error_storage = a.to(error_dtype).storage()
b = torch.tensor([], device=device, dtype=dtype).set_(error_storage)
+ @onlyCUDA
+ def test_module_share_memory(self):
+ # Test fix for issue #80733
+ # See https://github.com/pytorch/pytorch/issues/80733
+ model = torch.nn.Linear(3, 1)
+ model_cuda = model.to('cuda')
+ model.share_memory()
+
@dtypes(torch.float32, torch.complex64)
def test_deepcopy(self, device, dtype):
from copy import deepcopy
| share_memory() on CUDA tensors no longer no-ops and instead crashes in version 1.12.0
### 🐛 Describe the bug
In PyTorch version 1.12.0, `model.share_memory()` on a CUDA tensor no longer no-ops and instead crashes with error `RuntimeError: _share_fd_: only available on CPU`. Version 1.11.0 correctly no-ops
```
import torch
model = torch.nn.Linear(3, 1)
model.to(torch.device('cuda:0'))
model.share_memory()
```
Running above gets the output
```
Traceback (most recent call last):
File "/home/andrew_huggingface_co/test.py", line 5, in <module>
model.share_memory()
File "/opt/conda/envs/fail/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1912, in share_memory
return self._apply(lambda t: t.share_memory_())
File "/opt/conda/envs/fail/lib/python3.9/site-packages/torch/nn/modules/module.py", line 602, in _apply
param_applied = fn(param)
File "/opt/conda/envs/fail/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1912, in <lambda>
return self._apply(lambda t: t.share_memory_())
File "/opt/conda/envs/fail/lib/python3.9/site-packages/torch/_tensor.py", line 515, in share_memory_
self.storage().share_memory_()
File "/opt/conda/envs/fail/lib/python3.9/site-packages/torch/storage.py", line 595, in share_memory_
self._storage.share_memory_()
File "/opt/conda/envs/fail/lib/python3.9/site-packages/torch/storage.py", line 194, in share_memory_
self._share_fd_cpu_()
RuntimeError: _share_fd_: only available on CPU
```
The problem seems to be in the `_StorageBase.share_memory_` function in `storage.py`. `self.is_cuda` is being evaluated as False which then executes `self._share_fd_cpu_()`
### Versions
Collecting environment information...
PyTorch version: 1.12.0
Is debug build: False
CUDA used to build PyTorch: 10.2
ROCM used to build PyTorch: N/A
OS: Debian GNU/Linux 10 (buster) (x86_64)
GCC version: (Debian 8.3.0-6) 8.3.0
Clang version: Could not collect
CMake version: version 3.13.4
Libc version: glibc-2.28
Python version: 3.9.13 | packaged by conda-forge | (main, May 27 2022, 16:58:50) [GCC 10.3.0] (64-bit runtime)
Python platform: Linux-4.19.0-20-cloud-amd64-x86_64-with-glibc2.28
Is CUDA available: True
CUDA runtime version: 11.3.109
GPU models and configuration: GPU 0: Tesla T4
Nvidia driver version: 470.57.02
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==1.22.3
[pip3] torch==1.12.0
[pip3] torchvision==0.13.0
[conda] blas 1.0 mkl
[conda] cudatoolkit 10.2.89 h713d32c_10 conda-forge
[conda] ffmpeg 4.3 hf484d3e_0 pytorch
[conda] mkl 2021.4.0 h06a4308_640
[conda] mkl-service 2.4.0 py39h7e14d7c_0 conda-forge
[conda] mkl_fft 1.3.1 py39h0c7bc48_1 conda-forge
[conda] mkl_random 1.2.2 py39hde0f152_0 conda-forge
[conda] numpy 1.22.3 py39he7a7128_0
[conda] numpy-base 1.22.3 py39hf524024_0
[conda] pytorch 1.12.0 py3.9_cuda10.2_cudnn7.6.5_0 pytorch
[conda] pytorch-mutex 1.0 cuda pytorch
[conda] torchvision 0.13.0 py39_cu102 pytorch
cc @ezyang @gchanan @zou3519
| cc @kurtamohler | 2022-07-21T11:17:21 |
pytorch/pytorch | 81,873 | pytorch__pytorch-81873 | [
"78490"
] | 67ece03c8cd632cce9523cd96efde6f2d1cc8121 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -310,6 +310,7 @@ def report(*args):
# Version, create_version_file, and package_name
################################################################################
package_name = os.getenv('TORCH_PACKAGE_NAME', 'torch')
+package_type = os.getenv('PACKAGE_TYPE', 'wheel')
version = get_torch_version()
report("Building wheel {}-{}".format(package_name, version))
@@ -457,8 +458,7 @@ class build_ext(setuptools.command.build_ext.build_ext):
# Copy libiomp5.dylib inside the wheel package on OS X
def _embed_libiomp(self):
- if not IS_DARWIN:
- return
+
lib_dir = os.path.join(self.build_lib, 'torch', 'lib')
libtorch_cpu_path = os.path.join(lib_dir, 'libtorch_cpu.dylib')
if not os.path.exists(libtorch_cpu_path):
@@ -550,7 +550,8 @@ def run(self):
# It's an old-style class in Python 2.7...
setuptools.command.build_ext.build_ext.run(self)
- self._embed_libiomp()
+ if IS_DARWIN and package_type != 'conda':
+ self._embed_libiomp()
# Copy the essential export library to compile C++ extensions.
if IS_WINDOWS:
| Initializing libiomp5.dylib, but found libomp.dylib already initialized.
### 🐛 Describe the bug
The issue appears on MacOS py3.8, it started after updating to the latest nightly `1.13.0.dev20220525-py3.8_0` from core (previously I was at `1.12.0.dev20220309-py3.8_0`, so the issue could have been introduced earlier than May 25th). I'm receiving the following after importing numpy and pytorch together:
```
$ python -c "import numpy;import torch"
OMP: Error pytorch/vision#15: Initializing libiomp5.dylib, but found libomp.dylib already initialized.
OMP: Hint This means that multiple copies of the OpenMP runtime have been linked into the program. That is dangerous, since it can degrade performance or cause incorrect results. The best thing to do is to ensure that only a single OpenMP runtime is linked into the process, e.g. by avoiding static linking of the OpenMP runtime in any library. As an unsafe, unsupported, undocumented workaround you can set the environment variable KMP_DUPLICATE_LIB_OK=TRUE to allow the program to continue to execute, but that may cause crashes or silently produce incorrect results. For more information, please see http://www.intel.com/software/products/support/.
```
```
python3 -mtorch.utils.collect_env
Collecting environment information...
PyTorch version: 1.13.0.dev20220525
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 12.3.1 (x86_64)
GCC version: Could not collect
Clang version: 12.0.0 (clang-1200.0.32.21)
CMake version: version 3.18.4
Libc version: N/A
Python version: 3.8.12 | packaged by conda-forge | (default, Oct 12 2021, 21:50:38) [Clang 11.1.0 ] (64-bit runtime)
Python platform: macOS-10.16-x86_64-i386-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] efficientnet-pytorch==0.7.1
[pip3] mypy==0.931
[pip3] mypy-extensions==0.4.3
[pip3] numpy==1.22.4
[pip3] torch==1.13.0.dev20220525
[pip3] torchdata==0.4.0a0+652986b
[pip3] torchvision==0.14.0a0+9a72fd6
[pip3] torchviz==0.0.2
[conda] blas 2.112 mkl conda-forge
[conda] blas-devel 3.9.0 12_osx64_mkl conda-forge
[conda] efficientnet-pytorch 0.7.1 pypi_0 pypi
[conda] libblas 3.9.0 12_osx64_mkl conda-forge
[conda] libcblas 3.9.0 12_osx64_mkl conda-forge
[conda] liblapack 3.9.0 12_osx64_mkl conda-forge
[conda] liblapacke 3.9.0 12_osx64_mkl conda-forge
[conda] mkl 2021.4.0 h89fa619_689 conda-forge
[conda] mkl-devel 2021.4.0 h694c41f_690 conda-forge
[conda] mkl-include 2021.4.0 hf224eb6_689 conda-forge
[conda] numpy 1.22.4 py38h3ad0702_0 conda-forge
[conda] pytorch 1.13.0.dev20220525 py3.8_0 pytorch-nightly
[conda] torchdata 0.4.0a0+652986b pypi_0 pypi
[conda] torchvision 0.14.0a0+9a72fd6 dev_0 <develop>
[conda] torchviz 0.0.2 pypi_0 pypi
```
Strangely, importing first torch works:
```
python -c "import torch;import numpy;print('works')"
works
```
Setting `KMP_DUPLICATE_LIB_OK=TRUE` as env var solves the issue while invoking from console:
```
KMP_DUPLICATE_LIB_OK=TRUE python -c "import numpy;import torch;print('works')"
works
```
Sometimes I get segfaults thought, this doesn't seem like a stable solution.
### Versions
Latest Core nightly (20220525).
| I've ended "fixing" the issue by wiping my conda setup, moving to py3.10 and reinstalling dependencies. The issue was definitely not related to TorchVision as I managed to reproduce it only using Core. It could be related to some weird dependency on my system though.
As discussed with @atalman I'll leave the issue open just in case this is something suspicious that needs to be investigated on the future. I know there were a bunch of macOS changes recently on OK. Feel free to close or move to Core.
@datumbox you should have run `python3 -mtorch.utils.collect_env` and posted results here. What I suspect has happened: in your env you've installed PyTorch from wheels, but numpy from conda-forge
Looks like its conda issue, I was able to reproduce this:
conda install pytorch torchvision torchaudio -c pytorch-nightly
```
python -c "import numpy;import torch"
OMP: Error pytorch/vision#15: Initializing libiomp5.dylib, but found libomp.dylib already initialized.
OMP: Hint This means that multiple copies of the OpenMP runtime have been linked into the program. That is dangerous, since it can degrade performance or cause incorrect results. The best thing to do is to ensure that only a single OpenMP runtime is linked into the process, e.g. by avoiding static linking of the OpenMP runtime in any library. As an unsafe, unsupported, undocumented workaround you can set the environment variable KMP_DUPLICATE_LIB_OK=TRUE to allow the program to continue to execute, but that may cause crashes or silently produce incorrect results. For more information, please see http://www.intel.com/software/products/support/.
Abort trap: 6
```
@atalman can you please run `python3 -mtorch.utils.collect_env` and post results here?
Here they are, this config was failing
```
python3 -mtorch.utils.collect_env
Collecting environment information...
PyTorch version: 1.13.0.dev20220525
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 12.3.1 (x86_64)
GCC version: Could not collect
Clang version: 13.1.6 (clang-1316.0.21.2.3)
CMake version: Could not collect
Libc version: N/A
Python version: 3.9.7 (default, Sep 16 2021, 08:50:36) [Clang 10.0.0 ] (64-bit runtime)
Python platform: macOS-10.16-x86_64-i386-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] mypy-extensions==0.4.3
[pip3] numpy==1.20.3
[pip3] numpydoc==1.1.0
[pip3] torch==1.13.0.dev20220525
[pip3] torchaudio==0.12.0.dev20220525
[pip3] torchvision==0.14.0.dev20220525
[conda] blas 1.0 mkl
[conda] mkl 2021.4.0 hecd8cb5_637
[conda] mkl-service 2.4.0 py39h9ed2024_0
[conda] mkl_fft 1.3.1 py39h4ab4a9b_0
[conda] mkl_random 1.2.2 py39hb2f4e1b_0
[conda] numpy 1.20.3 py39h4b4dc7a_0
[conda] numpy-base 1.20.3 py39he0bd621_0
[conda] numpydoc 1.1.0 pyhd3eb1b0_1
[conda] pytorch 1.13.0.dev20220525 py3.9_0 pytorch-nightly
[conda] torch 1.12.0 pypi_0 pypi
[conda] torchaudio 0.12.0.dev20220511 pypi_0 pypi
[conda] torchvision 0.13.0.dev20220511 pypi_0 pypi
```
@malfet I've updated the issue with the requested output. In my case your suspicion seems correct which is different from what @atalman reports. Hope it helps. Feel free to move this issue on PyTorch core if you want, as this is not related to TorchVision.
@datumbox @malfet
Looks like I was able to mitigate this issue by doing following
```
conda conda uninstall intel-openmp
conda install -c intel openmp
conda install numpy
conda install pytorch torchvision torchaudio -c pytorch-nightly
```
So looks like problem could be related to openmp
```
python3 -mtorch.utils.collect_env
Collecting environment information...
PyTorch version: 1.13.0.dev20220525
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 12.3.1 (x86_64)
GCC version: Could not collect
Clang version: 13.1.6 (clang-1316.0.21.2.3)
CMake version: Could not collect
Libc version: N/A
Python version: 3.9.7 (default, Sep 16 2021, 08:50:36) [Clang 10.0.0 ] (64-bit runtime)
Python platform: macOS-10.16-x86_64-i386-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==1.22.3
[pip3] torch==1.13.0.dev20220525
[pip3] torchaudio==0.12.0.dev20220525
[pip3] torchvision==0.14.0.dev20220525
[conda] blas 1.0 mkl
[conda] mkl 2021.4.0 hecd8cb5_637
[conda] mkl-service 2.4.0 py39h9ed2024_0
[conda] mkl_fft 1.3.1 py39h4ab4a9b_0
[conda] mkl_random 1.2.2 py39hb2f4e1b_0
[conda] numpy 1.22.3 py39h2e5f0a9_0
[conda] numpy-base 1.22.3 py39h3b1a694_0
[conda] pytorch 1.13.0.dev20220525 py3.9_0 pytorch-nightly
[conda] torch 1.12.0 pypi_0 pypi
[conda] torchaudio 0.12.0.dev20220511 pypi_0 pypi
[conda] torchvision 0.13.0.dev20220511 pypi_0 pypi
```
Ok, can we please clarify the following: what conda package provides libiomp5.dylib? If it's provided by MKL base, that we do not need to bundle it with the package, but rather take it from conda, i.e. following function should not be called when building conda packages for MacOS https://github.com/pytorch/pytorch/blob/18d46ea9fd5071820cdbe7595b99396a9153f716/setup.py#L432
After doing some more research I found out that this error is resolved by
```
conda remove mkl
```
however we can reintroduce this issue by installing
```
conda install numpy-base=1.22.3
```
which installs following packages:
```
numpy-base pkgs/main/osx-64::numpy-base-1.22.3-py310hfd2de13_0
```
| 2022-07-21T12:48:58 |
|
pytorch/pytorch | 81,952 | pytorch__pytorch-81952 | [
"81129"
] | 03b82bdd997a9983ad57587dae359b1624258609 | diff --git a/torch/nn/modules/activation.py b/torch/nn/modules/activation.py
--- a/torch/nn/modules/activation.py
+++ b/torch/nn/modules/activation.py
@@ -1085,10 +1085,10 @@ def forward(self, query: Tensor, key: Tensor, value: Tensor, key_padding_mask: O
why_not_fast_path = "add_zero_attn was enabled"
elif not self._qkv_same_embed_dim:
why_not_fast_path = "_qkv_same_embed_dim was not True"
- elif query.is_nested and (key_padding_mask is not None or attn_mask is not None):
- why_not_fast_path = "key_padding_mask and attn_mask are not supported with NestedTensor input"
- elif not query.is_nested and key_padding_mask is not None and attn_mask is not None:
- why_not_fast_path = "key_padding_mask and attn_mask were both supplied"
+ elif attn_mask is not None:
+ why_not_fast_path = "attn_mask was not None"
+ elif query.is_nested and key_padding_mask is not None:
+ why_not_fast_path = "key_padding_mask is not supported with NestedTensor input"
if not why_not_fast_path:
tensor_args = (
diff --git a/torch/nn/modules/transformer.py b/torch/nn/modules/transformer.py
--- a/torch/nn/modules/transformer.py
+++ b/torch/nn/modules/transformer.py
@@ -411,9 +411,8 @@ def forward(self, src: Tensor, src_mask: Optional[Tensor] = None,
self.self_attn.batch_first and
self.self_attn._qkv_same_embed_dim and self.activation_relu_or_gelu and
self.norm1.eps == self.norm2.eps and
- ((src_mask is None and src_key_padding_mask is None)
- if src.is_nested
- else (src_mask is None or src_key_padding_mask is None))):
+ src_mask is None and
+ not (src.is_nested and src_key_padding_mask is not None)):
tensor_args = (
src,
self.self_attn.in_proj_weight,
@@ -453,7 +452,7 @@ def forward(self, src: Tensor, src_mask: Optional[Tensor] = None,
self.linear1.bias,
self.linear2.weight,
self.linear2.bias,
- src_mask if src_mask is not None else src_key_padding_mask,
+ src_mask if src_mask is not None else src_key_padding_mask, # TODO: split into two args
)
x = src
if self.norm_first:
| diff --git a/test/test_nn.py b/test/test_nn.py
--- a/test/test_nn.py
+++ b/test/test_nn.py
@@ -5826,32 +5826,6 @@ def test_multihead_attn_3d_attn_mask(self):
# output_2d in shape of [T, 1, D]
self.assertEqual(output_3d[i].unsqueeze(0).transpose(0, 1), output_2d)
- @unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
- def test_self_attn_TxT_attn_mask(self):
- embed_dim = 16
- num_heads = 4
- batch_size = 10
- tgt_len = 16
-
- query = torch.rand(batch_size, tgt_len, embed_dim, device="cuda") # [N, T, D]
- attn_mask = torch.randint(0, 2, (tgt_len, tgt_len)).cuda().float() # [T, T]
- attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0))
-
- attn_mask_4d = attn_mask.expand(batch_size, num_heads, tgt_len, tgt_len)
-
- mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads, batch_first=True).cuda()
- mta_model.eval()
-
- # Generate 3D results
- with torch.inference_mode():
- output_mask_4d = mta_model(query, query, query, attn_mask=attn_mask_4d)[0]
- output_mask_4d = output_mask_4d.transpose(0, 1) # [N, T, D]
-
- output_mask_TxT = mta_model(query, query, query, attn_mask=attn_mask)[0]
- output_mask_TxT = output_mask_TxT.transpose(0, 1) # [N, T, D]
-
- self.assertEqual(output_mask_4d, output_mask_TxT)
-
def test_multihead_attn_no_bias(self):
embed_dim = 8
num_heads = 4
diff --git a/test/test_transformers.py b/test/test_transformers.py
new file mode 100644
--- /dev/null
+++ b/test/test_transformers.py
@@ -0,0 +1,156 @@
+# Owner(s): ["module: nn"]
+
+import contextlib
+import torch
+import unittest
+
+from torch.testing._internal.common_nn import NNTestCase
+from torch.testing._internal.common_utils import run_tests, parametrize, instantiate_parametrized_tests
+from torch.testing._internal.common_cuda import TEST_CUDA
+
[email protected]
+def set_default_dtype(dtype):
+ saved_dtype = torch.get_default_dtype()
+ torch.set_default_dtype(dtype)
+ try:
+ yield
+ finally:
+ torch.set_default_dtype(saved_dtype)
+
+class TestTransformers(NNTestCase):
+ _do_cuda_memory_leak_check = True
+ _do_cuda_non_default_stream = True
+
+ device_list = ['cpu'] # TODO: is there a way to do parametrize for this?
+ if TEST_CUDA:
+ device_list.append('cuda')
+
+ @unittest.skip("4D mask not supported yet - activate when 4D mask supported")
+ @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") # TODO: make this work for both cuda and cpu
+ def test_self_attn_TxT_attn_mask(self):
+ embed_dim = 16
+ num_heads = 4
+ batch_size = 10
+ tgt_len = 16
+
+ query = torch.rand(batch_size, tgt_len, embed_dim, device="cuda") # [N, T, D]
+ attn_mask = torch.randint(0, 2, (tgt_len, tgt_len)).cuda().float() # [T, T]
+ attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0))
+
+ attn_mask_4d = attn_mask.expand(batch_size, num_heads, tgt_len, tgt_len)
+
+ mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads, batch_first=True).cuda()
+ mta_model.eval()
+
+ # Generate 3D results
+ with torch.inference_mode():
+ output_mask_4d = mta_model(query, query, query, attn_mask=attn_mask_4d)[0]
+ output_mask_4d = output_mask_4d.transpose(0, 1) # [N, T, D]
+
+ output_mask_TxT = mta_model(query, query, query, attn_mask=attn_mask)[0]
+ output_mask_TxT = output_mask_TxT.transpose(0, 1) # [N, T, D]
+
+ self.assertEqual(output_mask_4d, output_mask_TxT)
+
+ @parametrize("device", device_list)
+ def test_transformerencoderlayer_src_mask(self, device):
+ batch_size = 2
+ seqlen = 4
+ d_model = 8
+ nhead = 8
+ dim_feedforward = 32
+
+ model = torch.nn.TransformerEncoderLayer(
+ d_model=d_model,
+ nhead=nhead,
+ dim_feedforward=dim_feedforward,
+ batch_first=True).to(device)
+ src = torch.rand(batch_size, seqlen, d_model).to(device) # bs, seqlen, d_model
+ src_mask = torch.zeros(seqlen, seqlen).to(torch.bool).to(device)
+
+ model(src, src_mask=src_mask)
+ model.eval()
+ with torch.no_grad():
+ model(src, src_mask=src_mask)
+
+ @parametrize("use_torchscript", [True, False])
+ @parametrize("with_no_grad", [True, False])
+ @parametrize("training", [True, False])
+ def test_transformerencoder_fastpath_torchscript(self, use_torchscript, with_no_grad, training):
+ """
+ Test TransformerEncoder does not crash
+ """
+ model = torch.nn.TransformerEncoder(
+ torch.nn.TransformerEncoderLayer(d_model=2, nhead=2, dim_feedforward=8, batch_first=True),
+ num_layers=2,
+ enable_nested_tensor=True
+ )
+
+ if training:
+ model = model.train()
+ else:
+ model = model.eval()
+
+ if use_torchscript:
+ model = torch.jit.script(model)
+
+ x = torch.Tensor([[[1, 2], [3, 4]]]).to(torch.float)
+ mask = torch.Tensor([[0, 1]]).to(torch.bool)
+
+ if with_no_grad:
+ cm = torch.no_grad()
+ else:
+ cm = contextlib.nullcontext()
+ with cm:
+ model(x, src_key_padding_mask=mask)
+
+ @parametrize("with_no_grad", [True, False])
+ @parametrize("training", [True, False])
+ @parametrize("enable_nested_tensor", [False])
+ @parametrize("device", device_list)
+ def test_transformerencoder_square_input(self, with_no_grad, training, enable_nested_tensor, device):
+ """
+ Test for edge cases when input of shape (batch size, sequence length, embedding dimension) has
+ batch size == sequence length
+ """
+ model = torch.nn.TransformerEncoder(
+ torch.nn.TransformerEncoderLayer(d_model=4, nhead=2, dim_feedforward=16, dropout=0.0, batch_first=True),
+ num_layers=2,
+ enable_nested_tensor=enable_nested_tensor
+ ).to(device)
+
+ with torch.no_grad():
+ # set constant weights of the model
+ for idx, p in enumerate(model.parameters()):
+ x = p.data
+ sz = x.view(-1).size(0)
+ shape = x.shape
+ x = torch.cos(torch.arange(0, sz).float().view(shape))
+ p.data.copy_(x)
+
+ if training:
+ model = model.train()
+ else:
+ model = model.eval()
+ x = torch.arange(0, 16).reshape(2, 2, 4).to(torch.float).to(device)
+ src_mask = torch.Tensor([[0, 1], [0, 0]]).to(torch.bool).to(device)
+
+ if with_no_grad:
+ cm = torch.no_grad()
+ else:
+ cm = contextlib.nullcontext()
+ with cm:
+ result = model(x, mask=src_mask)
+
+ ref_output = torch.Tensor([[[2.420306205749512, 0.017629241570830, -0.607857942581177, -0.085519507527351],
+ [2.420306205749512, 0.017629241570830, -0.607857942581177, -0.085519507527351]],
+ [[2.419836044311523, 0.017548924311996, -0.608187675476074, -0.085347734391689],
+ [2.419836044311523, 0.017548924311996, -0.608187675476074, -0.085347734391689]]]
+ ).to(device)
+ self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
+ torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
+
+instantiate_parametrized_tests(TestTransformers)
+
+if __name__ == '__main__':
+ run_tests()
| Transformer and CPU path with `src_mask` raises error with torch 1.12
### 🐛 Describe the bug
The following code, which runs on torch 1.11 cpu, doesn't anymore on torch 1.12:
```python
import torch
model = torch.nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
src = torch.rand(32, 10, 512)
src_mask = torch.zeros(10, 10).to(torch.bool)
model.eval()
with torch.no_grad():
print(model(src, src_mask))
```
It raises
```
Traceback (most recent call last):
File "/Users/adm/Desktop/main.py", line 9, in <module>
print(model(src, src_mask))
File "/Users/adm/pytorch/torch/nn/modules/module.py", line 1186, in _call_impl
return forward_call(*input, **kwargs)
File "/Users/adm/pytorch/torch/nn/modules/transformer.py", line 439, in forward
return torch._transformer_encoder_layer_fwd(
RuntimeError: Expected attn_mask->sizes()[0] == batch_size to be true, but got false. (Could this error message be improved? If so, please report an enhancement request to PyTorch.)
```
The `.to(torch.bool)` is only here to silence the warning: `UserWarning: Converting mask without torch.bool dtype to bool; this will negatively affect performance. Prefer to use a boolean mask directly.`, the original code on v1.11 doesn't use it.
This also happens on an x86 ubuntu machine, when using cpu, but it does not happens when using CUDA.
Because of this condition: https://github.com/pytorch/pytorch/blob/e9b3bc2eadb8ffe10c002abcd5a34a5b7d36f390/aten/src/ATen/native/transformers/attention.cpp#L136-L144
using `src_mask = torch.zeros(32, 10)` makes the error go away, but it must not be right because I believe the size of `src_mask` should be `(seq_len, seq_len)`.
Instead, on torch 1.11, using `(32, 10)` was raising `RuntimeError: The shape of the 2D attn_mask is torch.Size([32, 10]), but should be (10, 10).`.
Also, the comment saying CPU path doesn't support mask makes me wonder if I'm looking at the right code 🤔
### Versions
Collecting environment information...
PyTorch version: 1.13.0a0+git4c57cf9
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 12.4 (arm64)
GCC version: Could not collect
Clang version: 13.1.6 (clang-1316.0.21.2.5)
CMake version: version 3.23.2
Libc version: N/A
Python version: 3.10.5 | packaged by conda-forge | (main, Jun 14 2022, 07:07:06) [Clang 13.0.1 ] (64-bit runtime)
Python platform: macOS-12.4-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==1.23.0
[pip3] torch==1.13.0a0+git4c57cf9
[conda] numpy 1.23.0 py310h0a343b5_0 conda-forge
[conda] torch 1.13.0a0+git4c57cf9 dev_0 <develop>
cc @ezyang @gchanan @zou3519 @jbschlosser @bhosmer @cpuhrsch @erichan1
| 2022-07-21T23:47:21 |
|
pytorch/pytorch | 81,976 | pytorch__pytorch-81976 | [
"80306"
] | 787b469b19f0b97fbb328befa96efe4f8f356367 | diff --git a/torchgen/gen.py b/torchgen/gen.py
--- a/torchgen/gen.py
+++ b/torchgen/gen.py
@@ -2454,6 +2454,9 @@ def main() -> None:
DispatchKey.CompositeExplicitAutograd,
DispatchKey.Meta,
}
+ if options.mps:
+ functions_keys.add(DispatchKey.MPS)
+
if options.backend_whitelist:
dispatch_keys = [
k
| diff --git a/test/test_mps.py b/test/test_mps.py
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -8,16 +8,25 @@
import warnings
import subprocess
import os
+import pprint
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
+from collections import defaultdict
from torch._six import inf
from torch.nn import Parameter
-from torch.testing._internal.common_utils import run_tests, TestCase, download_file, TEST_WITH_UBSAN
+from torch.testing._internal.common_utils import \
+ (gradcheck, gradgradcheck, run_tests, TestCase, download_file,
+ TEST_WITH_UBSAN)
+from torch.testing import make_tensor
+from torch.testing._internal.common_dtype import get_all_dtypes
import torch.backends.mps
-from torch.distributions import Uniform
+from torch.distributions import Uniform, Exponential
+from functools import partial
+from torch.testing._internal.common_methods_invocations import op_db
+from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests
from torch.testing._internal.common_nn import NNTestCase
import numpy as np
import torch
@@ -364,6 +373,12 @@ def _linear_helper(self, in_features, out_features, shape, bias=True, backward_p
self.assertEqual(cpu_linear.bias.grad.size(), mps_linear.bias.grad.size())
self.assertEqual(cpu_linear.bias.grad, mps_linear.bias.grad.to("cpu"), atol=8e-04, rtol=10.4e-05)
+ def test_linear1D(self):
+ self._linear_helper(in_features=2, out_features=3, shape=([2]), bias=True, backward_pass=False)
+
+ def test_linear1D_backward(self):
+ self._linear_helper(in_features=2, out_features=3, shape=([2]), bias=True, backward_pass=True)
+
def test_linear2D(self):
self._linear_helper(in_features=2, out_features=3, shape=((4, 2)), bias=True, backward_pass=False)
@@ -780,7 +795,6 @@ def helper(input_shape, normalized_shape, eps=1e-05, elementwise_affine=True, dt
helper((2, 3, 4, 5), (4, 5), elementwise_affine=elementwise_affine)
helper((2, 3, 4, 5, 6), (4, 5, 6), elementwise_affine=elementwise_affine)
-
def test_instance_norm(self):
def helper(shape, eps=1, momentum=0.1, wts=False, channels_last=False, track_running_stats=True, test_module=False):
@@ -1442,6 +1456,14 @@ def test_to(self):
torch.tensor(4, dtype=torch.int32))
self.assertEqual(torch.tensor(-8.34, device='cpu').to('mps', torch.int),
torch.tensor(-8.34, device='cpu').to('mps').to(torch.int))
+ # Cast int8 and uint8 to float and compare results
+ # See https://github.com/pytorch/pytorch/issues/80009 for more details
+ cpu_byte = torch.tensor([60, 160, 20, 220], dtype=torch.uint8)
+ cpu_char = torch.tensor([60, -60, 20, -120], dtype=torch.uint8)
+ for x_cpu in [cpu_byte, cpu_char]:
+ x_mps = x_cpu.to('mps')
+ self.assertEqual(x_mps.to(torch.float32), x_cpu.to(torch.float32))
+
def test_setitem_scalar(self) -> None:
device = 'mps'
@@ -1458,6 +1480,108 @@ def test_setitem_scalar(self) -> None:
self.assertEqual(t[2, 1], j)
self.assertEqual(t.sum(), 1 + i + j)
+ def test_stride_of_strides(self) -> None:
+ x = torch.rand(32, 1, device='mps')
+ y = x.as_strided(size=(32, 2), stride=(1, 0))
+ # Casting stride of strided tensor to CPU use to crash with "buffer is not large enough." assert
+ # See https://github.com/pytorch/pytorch/issues/79181#issuecomment-1154683435
+ z = y.as_strided(size=(32, 3), stride=(1, 0)).to("cpu")
+ self.assertEqual(x.to("cpu").as_strided(size=(32, 3), stride=(1, 0)), z)
+
+
+class TestLogical(TestCase):
+ def _wrap_tensor(self, x, device="cpu", dtype=None, requires_grad=False):
+ return torch.tensor(x, device=device, dtype=dtype, requires_grad=requires_grad)
+
+ def test_logical_not(self):
+ def helper(x):
+ cpu_x = x
+ x = cpu_x.detach().clone().to('mps')
+
+ result = torch.logical_not(x)
+ result_cpu = torch.logical_not(cpu_x)
+
+ self.assertEqual(result, result_cpu)
+
+ helper(self._wrap_tensor([1, 1, 0, 0]))
+ helper(self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True))
+ helper(self._wrap_tensor([True, True, False, False]))
+ helper(self._wrap_tensor(1))
+ helper(self._wrap_tensor(0))
+ helper(self._wrap_tensor(True))
+ helper(self._wrap_tensor(False))
+
+ def test_logical_and(self):
+ def helper(x, other):
+ cpu_x = x
+ x = cpu_x.detach().clone().to('mps')
+
+ cpu_other = other
+ other = cpu_other.detach().clone().to('mps')
+
+ result = torch.logical_and(x, other)
+ result_cpu = torch.logical_and(cpu_x, cpu_other)
+ self.assertEqual(result, result_cpu)
+
+ helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor(([1, 0, 0, 1])))
+ helper(
+ self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True),
+ self._wrap_tensor([1, 0, 0, 1], dtype=torch.float)
+ )
+ helper(self._wrap_tensor([True, True, False, False]), self._wrap_tensor([True, False, False, True]))
+ helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(1))
+ helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(0))
+ helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(True))
+ helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(False))
+
+ def test_logical_or(self):
+ def helper(x, other):
+ cpu_x = x
+ x = cpu_x.detach().clone().to('mps')
+
+ cpu_other = other
+ other = cpu_other.detach().clone().to('mps')
+
+ result = torch.logical_or(x, other)
+ result_cpu = torch.logical_or(cpu_x, cpu_other)
+
+ self.assertEqual(result, result_cpu)
+
+ helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor(([1, 0, 0, 1])))
+ helper(
+ self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True),
+ self._wrap_tensor([1, 0, 0, 1], dtype=torch.float)
+ )
+ helper(self._wrap_tensor([True, True, False, False]), self._wrap_tensor([True, False, False, True]))
+ helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(1))
+ helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(0))
+ helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(True))
+ helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(False))
+
+ def test_logical_xor(self):
+ def helper(x, other):
+ cpu_x = x
+ x = cpu_x.detach().clone().to('mps')
+
+ cpu_other = other
+ other = cpu_other.detach().clone().to('mps')
+
+ result = torch.logical_xor(x, other)
+ result_cpu = torch.logical_xor(cpu_x, cpu_other)
+
+ self.assertEqual(result, result_cpu)
+
+ helper(self._wrap_tensor([1, 1, 0, 0]), self._wrap_tensor(([1, 0, 0, 1])))
+ helper(
+ self._wrap_tensor([1, 1, 0, 0], dtype=torch.float, requires_grad=True),
+ self._wrap_tensor([1, 0, 0, 1], dtype=torch.float)
+ )
+ helper(self._wrap_tensor([True, True, False, False]), self._wrap_tensor([True, False, False, True]))
+ helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(1))
+ helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(0))
+ helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(True))
+ helper(self._wrap_tensor((1, 0, 1, 0)), self._wrap_tensor(False))
+
class TestSmoothL1Loss(TestCase):
@@ -1499,7 +1623,6 @@ def test_smooth_l1_loss_reduction_mean_sum_backward(self):
class TestNLLLoss(TestCase):
-
def test_nll_loss_mismatched_batch(self, device='mps'):
x = torch.randn((10, 3), requires_grad=True, device=device)
# t should have size (10,)
@@ -1577,18 +1700,30 @@ def _nll_loss_helper(self, input_size, reduction, expected):
self.assertEqual(input.grad, input_mps.grad.to('cpu'))
def test_as_strided(self):
- def helper(n, c):
- values = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
- values_1 = [[1.0, 1.0], [1.0, 1.0]]
- cpu_x = torch.tensor(values, device='cpu')
- ones1 = torch.tensor(values_1, device='mps')
- x = cpu_x.detach().clone().to('mps').requires_grad_()
- strided_cpu = torch.as_strided(cpu_x, (2, 2), (1, 2))
- strided_mps = torch.as_strided(x, (2, 2), (1, 2))
+ values = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
+ values_1 = [[1.0, 1.0], [1.0, 1.0]]
+ cpu_x = torch.tensor(values, device='cpu')
+ ones1 = torch.tensor(values_1, device='mps')
+ x = cpu_x.detach().clone().to('mps').requires_grad_()
+ strided_cpu = torch.as_strided(cpu_x, (2, 2), (1, 2))
+ strided_mps = torch.as_strided(x, (2, 2), (1, 2))
+ self.assertEqual(strided_mps, strided_cpu)
+ strided_cpu_out = strided_cpu + ones1.to('cpu')
+ strided_mps_out = strided_mps + ones1
+ self.assertEqual(strided_cpu_out, strided_mps_out)
+
+ # test with storage offsets
+ cpu_x = torch.rand(3, 3, device='cpu')
+ mps_x = cpu_x.to('mps')
+ strided_cpu1 = torch.as_strided(cpu_x, (2, 2), (1, 2), 0)
+ strided_mps1 = torch.as_strided(mps_x, (2, 2), (1, 2), 0)
+ strided_cpu2 = torch.as_strided(cpu_x, (2, 2), (1, 2), 1)
+ strided_mps2 = torch.as_strided(mps_x, (2, 2), (1, 2), 1)
+ strided_cpu_out = strided_cpu1 - strided_cpu2
+ strided_mps_out = strided_mps1 - strided_mps2
+ self.assertEqual(strided_cpu_out, strided_mps_out)
- self.assertEqual(strided_mps, strided_cpu)
- helper(3, 3)
def test_sum_backward(self):
def helper(n, c):
@@ -1689,7 +1824,7 @@ def helper(shape, reduction):
helper([8, 4, 5, 7, 6], 'mean')
# Binary Cross Enropy
- def test_bce_loss(self):
+ def test_bce_loss_simple(self):
def helper(shape, reduction):
# create the criterion
loss = torch.nn.BCELoss(reduction=reduction)
@@ -1719,6 +1854,146 @@ def helper(shape, reduction):
# verify if changes in shape would cause cached graph lookup problems
helper([7, 5, 2, 4, 6], 'sum')
helper([8, 4, 5, 7, 6], 'mean')
+ helper([1, 1, 32, 32], 'mean')
+
+ def test_bce_loss_always_nonnegative(self):
+ target = torch.ones(5, device='mps')
+ input = torch.ones(5, device='mps')
+ self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
+
+ target = torch.zeros(5, device='mps')
+ input = torch.zeros(5, device='mps')
+ self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
+
+ def test_bce_loss_size_mismatch(self):
+ bceloss = nn.BCELoss()
+ a = torch.rand(25, device='mps')
+ b = torch.rand(25, 1, device='mps')
+ with self.assertRaisesRegex(ValueError, r'Using a target size \('):
+ bceloss(a, b)
+
+ def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss_large_tensors_with_grad(self):
+ x_size = 1024
+ y_size = 256
+ target = torch.rand(x_size, y_size, device='mps')
+
+ for reduction in ['none', 'mean', 'sum']:
+ output_sig = torch.rand(x_size, y_size, device='mps') - 0.5
+ output_logits = output_sig.clone().detach()
+
+ output_sig.requires_grad = True
+ output_logits.requires_grad = True
+ weight = torch.rand(y_size, device='mps')
+
+ loss_sig = nn.BCELoss(weight, reduction=reduction)(
+ torch.sigmoid(output_sig), target
+ )
+ loss_logits = nn.BCEWithLogitsLoss(weight, reduction=reduction)(
+ output_logits, target
+ )
+
+ self.assertEqual(loss_logits, loss_sig)
+
+ if reduction == 'none':
+ grad = torch.rand(x_size, y_size, device='mps')
+ loss_sig.backward(grad)
+ loss_logits.backward(grad)
+ else:
+ loss_sig.backward()
+ loss_logits.backward()
+
+ self.assertEqual(output_sig.grad, output_logits.grad)
+
+ def test_bce_with_logits_has_correct_grad_at_zero(self):
+ output = torch.zeros(3, 1, requires_grad=True, device='mps')
+ target = torch.zeros(3, 1, device='mps')
+ nn.BCEWithLogitsLoss(reduction='sum')(output, target).backward()
+ expected_grad = torch.empty(3, 1, device='mps').fill_(0.5)
+ self.assertEqual(output.grad, expected_grad)
+
+ def test_bce_with_logits_broadcasts_weights(self):
+ target = torch.rand(16, 4, device='mps')
+ output = torch.rand(16, 4, device='mps') - 0.5
+
+ weight = torch.rand(4, device='mps')
+ out1 = nn.BCEWithLogitsLoss(weight)(output, target)
+
+ weight = weight.expand(16, 4).contiguous()
+ out2 = nn.BCEWithLogitsLoss(weight)(output, target)
+
+ self.assertEqual(out1, out2)
+
+ weight = torch.rand(16, 1, device='mps')
+ out1 = nn.BCEWithLogitsLoss(weight)(output, target)
+
+ weight = weight.expand(16, 4).contiguous()
+ out2 = nn.BCEWithLogitsLoss(weight)(output, target)
+
+ self.assertEqual(out1, out2)
+
+ def test_bce_with_logits_ones_in_pos_weights_are_the_same_as_none(self):
+ target = torch.rand(64, 4, device='mps')
+ output = torch.rand(64, 4, device='mps') - 0.5
+ pos_weight = torch.ones(64, 4, device='mps')
+
+ self.assertEqual(nn.BCEWithLogitsLoss()(output, target),
+ nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target))
+
+ def test_bce_with_logits_broadcasts_pos_weights(self):
+ target = torch.rand(64, 4, device='mps')
+ output = torch.rand(64, 4, device='mps') - 0.5
+ pos_weight = torch.rand(4, device='mps')
+ out1 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
+
+ pos_weight1 = pos_weight.expand(1, 4)
+ out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight1)(output, target)
+
+ pos_weight2 = pos_weight.expand(64, 4)
+ out3 = nn.BCEWithLogitsLoss(pos_weight=pos_weight2)(output, target)
+
+ self.assertEqual(out1, out2)
+ self.assertEqual(out1, out3)
+
+ def test_bce_with_logits_with_pos_weight_has_correct_grad_at_zero(self):
+ output = torch.zeros(3, 1, requires_grad=True, device='mps')
+ target = torch.zeros(3, 1, device='mps')
+ pos_weight = torch.ones(3, 1, device='mps')
+ nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction='sum')(output, target).backward()
+ expected_grad = torch.empty(3, 1, device='mps').fill_(0.5)
+ grad = output.grad
+ self.assertEqual(grad, expected_grad)
+
+ def test_bce_with_logits_stability(self):
+ output = torch.tensor([0., -120.], device='mps')
+ target = torch.tensor([0., 1.], device='mps')
+ pos_weight = torch.tensor([1., 1.], device='mps')
+
+ out1 = nn.BCEWithLogitsLoss()(output, target)
+ self.assertTrue(torch.isfinite(out1).all().item())
+
+ out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
+ self.assertTrue(torch.isfinite(out2).all().item())
+
+ def test_bce_loss_broadcasts_weights(self):
+ sigmoid = nn.Sigmoid()
+ target = torch.rand(16, 4, device='mps')
+ output = torch.rand(16, 4, device='mps') - 0.5
+
+ weight = torch.rand(4, device='mps')
+ out1 = nn.BCELoss(weight)(sigmoid(output), target)
+
+ weight = weight.expand(16, 4).contiguous()
+ out2 = nn.BCELoss(weight)(sigmoid(output), target)
+
+ self.assertEqual(out1, out2)
+
+ weight = torch.rand(16, 1, device='mps')
+ out1 = nn.BCELoss(weight)(sigmoid(output), target)
+
+ weight = weight.expand(16, 4).contiguous()
+ out2 = nn.BCELoss(weight)(sigmoid(output), target)
+
+ self.assertEqual(out1, out2)
def test_log_softmax(self):
values = [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]]
@@ -1881,9 +2156,14 @@ def helper(shape):
helper((2, 3, 4, 5))
- # Test forward argmax
- def test_argmax(self):
- def helper(n, c, h, w, dtype=torch.float32):
+ # Test forward argmin argmax
+ def test_argmin_argmax(self):
+ def helper(n, c, h, w, reduction_type, dtype=torch.float32):
+ if reduction_type == "max":
+ arg_reduction_fn = torch.argmax
+ else:
+ arg_reduction_fn = torch.argmin
+
cpu_x = None
x = None
if(dtype not in [torch.float32, torch.bool]):
@@ -1896,46 +2176,50 @@ def helper(n, c, h, w, dtype=torch.float32):
cpu_x = torch.randn(n, c, h, w, device='cpu', dtype=dtype, requires_grad=True)
x = cpu_x.detach().clone().to('mps').requires_grad_()
- y = torch.argmax(x)
- ref_y = torch.argmax(cpu_x)
+ y = arg_reduction_fn(x)
+ ref_y = arg_reduction_fn(cpu_x)
self.assertEqual(y, ref_y)
- y_0 = torch.argmax(x, dim=0)
- refy_0 = torch.argmax(cpu_x, dim=0)
+ y_0 = arg_reduction_fn(x, dim=0)
+ refy_0 = arg_reduction_fn(cpu_x, dim=0)
self.assertEqual(y_0, refy_0)
- y_0dim = torch.argmax(x, dim=0, keepdim=True)
- refy_0dim = torch.argmax(cpu_x, dim=0, keepdim=True)
+ y_0dim = arg_reduction_fn(x, dim=0, keepdim=True)
+ refy_0dim = arg_reduction_fn(cpu_x, dim=0, keepdim=True)
self.assertEqual(y_0dim, refy_0dim)
- y_1 = torch.argmax(x, dim=1)
- refy_1 = torch.argmax(cpu_x, dim=1)
+ y_1 = arg_reduction_fn(x, dim=1)
+ refy_1 = arg_reduction_fn(cpu_x, dim=1)
self.assertEqual(y_1, refy_1)
- y_1dim = torch.argmax(x, dim=1, keepdim=True)
- refy_1dim = torch.argmax(cpu_x, dim=1, keepdim=True)
+ y_1dim = arg_reduction_fn(x, dim=1, keepdim=True)
+ refy_1dim = arg_reduction_fn(cpu_x, dim=1, keepdim=True)
self.assertEqual(y_1dim, refy_1dim)
- y_2 = torch.argmax(x, dim=2)
- refy_2 = torch.argmax(cpu_x, dim=2)
+ y_2 = arg_reduction_fn(x, dim=2)
+ refy_2 = arg_reduction_fn(cpu_x, dim=2)
self.assertEqual(y_2, refy_2)
- y_2dim = torch.argmax(x, dim=2, keepdim=True)
- refy_2dim = torch.argmax(cpu_x, dim=2, keepdim=True)
+ y_2dim = arg_reduction_fn(x, dim=2, keepdim=True)
+ refy_2dim = arg_reduction_fn(cpu_x, dim=2, keepdim=True)
self.assertEqual(y_2dim, refy_2dim)
- y_3 = torch.argmax(x, dim=3)
- refy_3 = torch.argmax(cpu_x, dim=3)
+ y_3 = arg_reduction_fn(x, dim=3)
+ refy_3 = arg_reduction_fn(cpu_x, dim=3)
self.assertEqual(y_3, refy_3)
- y_3dim = torch.argmax(x, dim=3, keepdim=True)
- refy_3dim = torch.argmax(cpu_x, dim=3, keepdim=True)
+ y_3dim = arg_reduction_fn(x, dim=3, keepdim=True)
+ refy_3dim = arg_reduction_fn(cpu_x, dim=3, keepdim=True)
self.assertEqual(y_3dim, refy_3dim)
- helper(2, 8, 4, 4, torch.float32)
- helper(2, 8, 4, 4, torch.int32)
- helper(2, 8, 4, 4, torch.float16)
- helper(2, 8, 4, 4, torch.int64)
+ helper(2, 8, 4, 4, "max", torch.float32)
+ helper(2, 8, 4, 4, "max", torch.int32)
+ helper(2, 8, 4, 4, "max", torch.float16)
+ helper(2, 8, 4, 4, "max", torch.int64)
+ helper(2, 8, 4, 4, "min", torch.float32)
+ helper(2, 8, 4, 4, "min", torch.int32)
+ helper(2, 8, 4, 4, "min", torch.float16)
+ helper(2, 8, 4, 4, "min", torch.int64)
# Test forward max
# Note - don't test grad now
@@ -2510,6 +2794,8 @@ def helper(shape):
self.assertEqual(two_three_keepdim_std, two_three_dim_keepstd_cpu)
helper((4, 5, 6, 7))
+ # verify if a change in shape of input would cause problems with graph caching
+ helper((9, 5, 6, 7))
# Test var
def test_var(self):
@@ -2608,6 +2894,52 @@ def helper(shape):
self.assertEqual(two_three_keepdim_var, two_three_dim_keepvar_cpu)
helper((4, 5, 6, 7))
+ # verify if a change in shape of input would cause problems with graph caching
+ helper((9, 5, 6, 7))
+
+ # Test forward amax
+ def test_amax(self):
+ def helper(shape, dim, keepdim):
+ cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
+ x = cpu_x.detach().clone().to('mps').requires_grad_()
+
+ result = torch.amax(x, dim=dim, keepdim=keepdim)
+ result_cpu = torch.amax(cpu_x, dim=dim, keepdim=keepdim)
+
+ cpu_grad = torch.randn(result_cpu.shape)
+ grad = cpu_grad.to('mps')
+
+ result_cpu.backward(gradient=cpu_grad)
+ result.backward(gradient=grad)
+
+ self.assertEqual(result, result_cpu)
+ self.assertEqual(x.grad, cpu_x.grad)
+
+ for dim in ([], [0], [0, 1], [2, 3]):
+ for keepdim in [False, True]:
+ helper((2, 8, 4, 5), dim, keepdim)
+
+ # Test forward amin
+ def test_amin(self):
+ def helper(shape, dim, keepdim):
+ cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
+ x = cpu_x.detach().clone().to('mps').requires_grad_()
+
+ result = torch.amin(x, dim=dim, keepdim=keepdim)
+ result_cpu = torch.amin(cpu_x, dim=dim, keepdim=keepdim)
+
+ cpu_grad = torch.randn(result_cpu.shape)
+ grad = cpu_grad.to('mps')
+
+ result_cpu.backward(gradient=cpu_grad)
+ result.backward(gradient=grad)
+
+ self.assertEqual(result, result_cpu)
+ self.assertEqual(x.grad, cpu_x.grad)
+
+ for dim in ([], [0], [0, 1], [2, 3]):
+ for keepdim in [False, True]:
+ helper((2, 8, 4, 5), dim, keepdim)
# Test minimum and maximum
def test_minimum_maximum(self):
@@ -2720,16 +3052,18 @@ def helper(n, c, h, w):
def test_divmode(self):
def helper(shape, rounding_mode):
- cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
- mps_x = cpu_x.detach().clone().to('mps')
- # clamp to avoid division by 0
- cpu_y = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False).clamp_min_(0.1)
- mps_y = cpu_y.detach().clone().to('mps')
-
- result_div_cpu = torch.div(cpu_x, cpu_y, rounding_mode=rounding_mode)
- result_div_mps = torch.div(mps_x, mps_y, rounding_mode=rounding_mode)
- self.assertEqual(result_div_mps, result_div_cpu)
-
+ for dtype in [torch.float32]:
+ cpu_x = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=False)
+ mps_x = cpu_x.detach().clone().to('mps')
+ # clamp to avoid division by 0
+ cpu_y = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=False)
+ mps_y = cpu_y.detach().clone().to('mps')
+
+ result_div_cpu = torch.div(cpu_x, cpu_y, rounding_mode=rounding_mode)
+ result_div_mps = torch.div(mps_x, mps_y, rounding_mode=rounding_mode)
+ self.assertEqual(result_div_mps, result_div_cpu)
+
+ helper((2, 8, 4, 5), None)
helper((2, 8, 4, 5), "floor")
helper((2, 8, 4, 5), "trunc")
@@ -2790,6 +3124,16 @@ def helper(n, c):
helper(3, 3)
+ def test_assert_topk(self):
+ # here the k > 16 raises an error as expected
+ with self.assertRaisesRegex(RuntimeError, "Currently topk on mps works only for k<=16"):
+ xs = torch.arange(30).to('mps')
+ xs.topk(30)
+ # for k <= 16 it works fine
+ ys_cpu = torch.arange(30)
+ ys_mps = ys_cpu.to('mps')
+ self.assertEqual(ys_cpu.topk(16), ys_mps.topk(16))
+
def test_topk(self):
def helper(shape):
cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
@@ -2921,6 +3265,14 @@ def helper(shape_x, shape_y, shape_z):
# Empty test - Currently failing! Empty tensor not handled!
# helper([0, 2, 4, 5], [2, 0, 4, 5], [2, 5, 0, 5])
+ def test_constant_pad(self):
+ m = torch.nn.ConstantPad2d((-2, -2, -2, -2), 3.5)
+ input_cpu = torch.randn(1, 16, 16, 16)
+ input_mps = input_cpu.detach().clone().to("mps")
+ r_cpu = m(input_cpu)
+ r_mps = m(input_mps)
+ self.assertEqual(r_cpu, r_mps.to("cpu"))
+
def test_pad(self):
def helper(shape, padding, op):
inputCPU = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
@@ -3191,6 +3543,52 @@ def helper(shape, alpha=1.0):
for shape in [[], (2, 3), (2, 8, 4, 5)]:
for alpha in [0.000001, 1.0, 2.3, 0.34, 23]:
helper(shape, alpha)
+
+ # Test glu
+ def test_glu(self):
+ def helper(shape, dim=0):
+ cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
+ x = cpu_x.detach().clone().to('mps').requires_grad_()
+
+ for activation_func in [torch.nn.GLU(dim=dim)]:
+ glu_result = activation_func(x)
+ glu_result_cpu = activation_func(cpu_x)
+
+ cpu_grad = torch.randn(glu_result_cpu.shape)
+ grad = cpu_grad.to('mps')
+
+ glu_result.backward(gradient=grad)
+ glu_result_cpu.backward(gradient=cpu_grad)
+
+ self.assertEqual(glu_result, glu_result_cpu)
+ self.assertEqual(x.grad, cpu_x.grad)
+
+ for shape in [[4], (2, 4), (2, 8, 4, 6)]:
+ for dim in range(len(shape)):
+ helper(shape, dim)
+
+ # Test softplus
+ def test_softplus(self):
+ def helper(shape):
+ cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=True)
+ x = cpu_x.detach().clone().to('mps').requires_grad_()
+
+ softplus_result = torch.nn.Softplus(beta=0.5, threshold=0.5)(x)
+ softplus_result_cpu = torch.nn.Softplus(beta=0.5, threshold=0.5)(cpu_x)
+
+ cpu_grad = torch.randn(softplus_result.shape)
+ grad = cpu_grad.to('mps')
+
+ softplus_result.backward(gradient=grad)
+ softplus_result_cpu.backward(gradient=cpu_grad)
+
+ self.assertEqual(softplus_result, softplus_result_cpu)
+ self.assertEqual(x.grad, cpu_x.grad)
+
+ # Test empty shape too
+ for shape in [(), (2, 3), (10, 10), (2, 3, 4, 5)]:
+ helper(shape)
+
# Test silu
def test_silu(self):
@@ -3467,6 +3865,28 @@ def helper(shape):
helper((2, 8, 4, 5))
+ # Test flip
+ def test_flip(self):
+ def helper(shape, dims):
+ cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
+ x = cpu_x.detach().clone().to('mps')
+
+ flip_result = torch.flip(x, dims=dims)
+ flip_result_cpu = torch.flip(cpu_x, dims=dims)
+
+ self.assertEqual(flip_result, flip_result_cpu)
+
+ helper((2, 8, 4, 5), [0])
+ helper((8, 8, 4, 5), [0, 1])
+ helper((2, 8, 4, 5), (0, 1, 2, 3))
+ helper((2, 3, 3), (-1,))
+ # empty dims
+ helper((2, 8, 4, 5), [])
+ # input.numel() == 1
+ helper((1,), (0,))
+ # input.numel() == 0
+ helper((0,), (0,))
+
# Test index select
def test_index_select(self):
def helper(shape, dim, index, idx_dtype=torch.int32):
@@ -3886,9 +4306,6 @@ def helper(shape, x_shape, y_shape, cond_dtype=torch.bool, x_dtype=torch.float):
# Test normal
def test_normal(self):
def helper(shape, mean=0.0, std=1.0):
- cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
- x = cpu_x.detach().clone().to('mps')
-
mps_out = torch.normal(mean, std, shape, device='mps')
mean_array = np.ones(shape)
@@ -3901,6 +4318,7 @@ def helper(shape, mean=0.0, std=1.0):
cpu_std_tensor = torch.tensor(std_array, device='cpu', dtype=torch.float, requires_grad=False)
std_tensor = cpu_std_tensor.detach().clone().to('mps')
+ # test out
mps_out = torch.zeros(shape, device='mps')
torch.normal(mean_tensor, std, out=mps_out)
@@ -3910,14 +4328,22 @@ def helper(shape, mean=0.0, std=1.0):
mps_out = torch.zeros(shape, device='mps')
torch.normal(mean_tensor, std_tensor, out=mps_out)
+ # test without out
+ mps_out = torch.normal(mean_tensor, std)
+ self.assertEqual(mps_out.size(), mean_tensor.size())
+
+ mps_out = torch.normal(mean, std_tensor)
+ self.assertEqual(mps_out.size(), std_tensor.size())
+
+ inferred_shape = torch.broadcast_shapes(mean_tensor.size(), std_tensor.size())
+ mps_out = torch.normal(mean_tensor, std_tensor)
+ self.assertEqual(mps_out.size(), inferred_shape)
+
helper((2, 3, 4, 5, 6))
helper((100, 100), 2.5, 1.2)
def test_bernoulli(self):
def helper(shape, prob=0.5):
- cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
- x = cpu_x.detach().clone().to('mps')
-
prob_array = np.ones(shape)
prob_array *= prob
cpu_prob_tensor = torch.tensor(prob_array, device='cpu', dtype=torch.float, requires_grad=False)
@@ -3956,42 +4382,116 @@ def helper(shape, low, high, dtype=torch.int32):
helper([100, 100], 23, 89, dtype=torch.int64)
helper([100, 100], 0, 2, dtype=torch.bool)
- # Test add
- def test_add_binary_op(self):
- def helper(shape, alpha):
- cpu_x = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
- x = cpu_x.detach().clone().to('mps')
+ # Test exponential
+ def test_exponential(self):
+ def helper(shape, lamda, dtype=torch.float32):
- cpu_y = torch.randn(shape, device='cpu', dtype=torch.float, requires_grad=False)
- y = cpu_y.detach().clone().to('mps')
+ mps_out = torch.zeros(shape, device='mps', dtype=dtype)
+ mps_out.exponential_(lamda)
- cpu_out = torch.add(cpu_x, cpu_y, alpha=alpha)
- out = torch.add(x, y, alpha=alpha)
+ print(mps_out.to('cpu').float().mean(), 1 / lamda)
+ print(mps_out.to('cpu').float().std() ** 2, 1 / (lamda**2))
- self.assertEqual(out, cpu_out)
+ for dtype in [torch.float32, torch.float16]:
+ helper([100, 100], 2, dtype)
+ helper([100, 100], 1, dtype)
+ helper([100, 100], 3, dtype)
+ helper([100, 100], 0.5, dtype)
+
+ def test_exponential_1(self):
+ rate = torch.randn(5, 5).abs().requires_grad_()
+ rate_1d = torch.randn(1).abs().requires_grad_()
+ self.assertEqual(Exponential(rate).sample().size(), (5, 5))
+ self.assertEqual(Exponential(rate).sample((7,)).size(), (7, 5, 5))
+ self.assertEqual(Exponential(rate_1d).sample((1,)).size(), (1, 1))
+ self.assertEqual(Exponential(rate_1d).sample().size(), (1,))
+ self.assertEqual(Exponential(0.2).sample((1,)).size(), (1,))
+ self.assertEqual(Exponential(50.0).sample((1,)).size(), (1,))
+ # Test add
+ def test_add_binary_op(self):
+ def helper(shape, alpha):
+ for dtype in [torch.float16, torch.float32]:
+ cpu_x = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=False)
+ mps_x = cpu_x.detach().clone().to('mps')
+
+ cpu_y = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=False)
+ mps_y = cpu_y.detach().clone().to('mps')
+
+ cpu_out = torch.add(cpu_x, cpu_y, alpha=alpha)
+ mps_out = torch.add(mps_x, mps_y, alpha=alpha)
+ # fp16 isn't accurate when alpha is passed
+ # TODO: remove or fix 'tol' when we fix problems with fp16
+ tol = 1e-3 if dtype is torch.float16 else None
+ self.assertEqual(mps_out, cpu_out, rtol=tol, atol=tol)
+ # create a scalar tensor
+ cpu_s = torch.tensor(2.3, device='cpu', dtype=dtype, requires_grad=False)
+ mps_s = cpu_s.detach().clone().to('mps')
+ # primary tensor is scalar
+ self.assertEqual(torch.add(cpu_s, cpu_y), torch.add(mps_s, mps_y))
+ # secondary tensor is scalar
+ self.assertEqual(torch.add(cpu_x, cpu_s), torch.add(mps_x, mps_s))
+
+ helper((2, 8, 4, 5), 1.0)
+ helper((2, 8, 4, 5), 0.0)
helper((2, 8, 4, 5), 0.1)
helper((2, 8, 3, 5), 0.1)
helper((2, 8, 3, 5), 0.2)
# Test add
def test_add_scalars(self):
- def helper(alpha=1.0):
- cpu_x = torch.tensor(2.3, device='cpu', dtype=torch.float, requires_grad=False)
- x = cpu_x.detach().clone().to('mps')
-
- cpu_y = torch.tensor(3.4, device='cpu', dtype=torch.float, requires_grad=False)
- y = cpu_y.detach().clone().to('mps')
+ def helper(alpha):
+ for dtype in [torch.float16, torch.float32]:
+ cpu_x = torch.tensor(2.3, device='cpu', dtype=dtype, requires_grad=False)
+ x = cpu_x.detach().clone().to('mps')
- cpu_out = torch.add(cpu_x, cpu_y, alpha=alpha)
- out = torch.add(x, y, alpha=alpha)
+ cpu_y = torch.tensor(3.4, device='cpu', dtype=dtype, requires_grad=False)
+ y = cpu_y.detach().clone().to('mps')
- self.assertEqual(out, cpu_out)
+ cpu_out = torch.add(cpu_x, cpu_y, alpha=alpha)
+ out = torch.add(x, y, alpha=alpha)
+ # fp16 isn't accurate when alpha is passed
+ tol = 1e-3 if dtype is torch.float16 else None
+ self.assertEqual(out, cpu_out, rtol=tol, atol=tol)
- helper()
+ helper(1.0)
+ helper(0.0)
helper(0.1)
helper(0.2)
+ # Test int32 tensor + int64 scalar add
+ # see https://github.com/pytorch/pytorch/issues/79835#issuecomment-1164984534
+ x = torch.ones(4, dtype=torch.int32, device='mps')
+ self.assertEqual(x + 1, torch.full((4,), 2, dtype=torch.int32, device='mps'))
+ self.assertTrue(torch.equal(x + 1.5, torch.full((4,), 2.5, device='mps')))
+
+ def test_types_binary_op(self):
+ # Float * Bool
+ cpu_x = torch.arange(5, dtype=torch.float32, device="cpu") * torch.tensor([True, False, True, False, True], device="cpu")
+ mps_x = torch.arange(5, dtype=torch.float32, device="mps") * torch.tensor([True, False, True, False, True], device="mps")
+ self.assertEqual(cpu_x, mps_x)
+ # Float * Int64
+ cpu_y = torch.arange(5, dtype=torch.float32, device="cpu") * torch.tensor([1, 0, 1, 0, 1], device="cpu")
+ mps_y = torch.arange(5, dtype=torch.float32, device="mps") * torch.tensor([1, 0, 1, 0, 1], device="mps")
+ self.assertEqual(cpu_y, mps_y)
+
+ def test_unary_ops(self):
+ def helper(shape, op):
+ for dtypef in [torch.float32]:
+ cpu_x = torch.randn(shape, device='cpu', dtype=dtypef, requires_grad=False)
+ mps_x = cpu_x.detach().clone().to('mps')
+ self.assertEqual(op(cpu_x), op(mps_x))
+
+ for dtypei in [torch.int32, torch.int16]:
+ cpu_x = torch.randint(0, 1000, shape, device='cpu', dtype=dtypei, requires_grad=False)
+ mps_x = cpu_x.to('mps')
+ self.assertEqual(op(cpu_x), op(mps_x), rtol=1e-4, atol=1e-4)
+
+ helper((2, 8, 4, 5), torch.exp)
+ helper((2, 8, 3, 5), torch.exp2)
+ helper((2, 8, 3, 5), torch.log)
+ helper((2, 8, 3, 5), torch.cos)
+
def test_atan2(self):
def helper(shape):
input_cpu = torch.randn(shape)
@@ -4301,6 +4801,845 @@ def maybe_transpose(cond, m):
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
self._test_addmm_addmv(torch.addmm, M, m1, m2, transpose_out=t4)
+class TestGatherScatter(TestCase):
+ def test_slicing_with_step(self):
+ # Slicing with step
+ # https://github.com/pytorch/pytorch/issues/78886
+ x_mps = torch.zeros(10, dtype=torch.float32, device="mps")
+ x_mps[::2] = 1.0
+
+ x_cpu = torch.zeros(10, dtype=torch.float32, device="cpu")
+ x_cpu[::2] = 1.0
+
+ self.assertEqual(x_cpu, x_mps)
+
+ def test_slicing_replace_column(self):
+ # https://github.com/pytorch/pytorch/issues/78074
+ def _helper(tensor_data):
+ x_cpu = torch.tensor(tensor_data)
+ x_mps = x_cpu.to('mps')
+
+ x_cpu[:, 0] = 7
+ x_mps[:, 0] = 7
+
+ self.assertEqual(x_cpu, x_mps)
+
+ _helper([[1, 2, 3], [4, 5, 6]])
+ _helper([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ _helper([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
+
+ def test_inplace_scatter(self):
+ # https://github.com/pytorch/pytorch/issues/79672
+ a_mps = torch.ones((2, 2),).to(torch.device("mps"))
+ b_mps = torch.ones((2, 2),).to(torch.device("mps"))
+
+ a_cpu = torch.ones((2, 2),).to(torch.device("cpu"))
+ b_cpu = torch.ones((2, 2),).to(torch.device("cpu"))
+
+ a_mps[:, 0] += b_mps[:, 0]
+ a_cpu[:, 0] += b_cpu[:, 0]
+ self.assertEqual(a_cpu, a_mps)
+
+ a_mps[:, 0] = a_mps[:, 0] + b_mps[:, 0]
+ a_cpu[:, 0] = a_cpu[:, 0] + b_cpu[:, 0]
+ self.assertEqual(a_cpu, a_mps)
+
+# These tests were taken from test/test_view_ops.py
+# They are subset of those tests as currently only this subset is working.
+# This whole `class` will be removed when we add generic device testing. There
+# are no additional tests added apart from what is part of test_view_ops.py
+class TestViewOpsMPS(TestCase):
+ exact_dtype = True
+
+ def is_view_of(self, base, other):
+ if (not other._is_view() or
+ other is base or
+ other._base is not base or
+ base.device != other.device):
+ return False
+ # Note: only validates storage on native device types
+ # because some accelerators, like XLA, do not expose storage
+ if base.device.type == 'mps':
+ if base.storage().data_ptr() != other.storage().data_ptr():
+ return False
+
+ return True
+
+ # Returns true if v1 and v2 are views of the same base
+ def is_view_of_same_base(self, v1, v2):
+ if (not v1._is_view() or v1 is v2):
+ return False
+ return self.is_view_of(v1._base, v2)
+
+ # Performs transpose if contiguous=True, else returns the input tensor as is
+ def _do_transpose(self, x, contiguous=False, dim0=0, dim1=1):
+ if contiguous:
+ return x
+ else:
+ return x.transpose(dim0, dim1)
+
+ def test_diagonal_view(self, device="mps"):
+ t = torch.ones((5, 5), device=device)
+ v = torch.diagonal(t)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0] = 0
+ self.assertEqual(t[0, 0], v[0])
+
+ t = torch.ones((3, 3, 3), device="mps")
+ v = torch.diagonal(t, offset=1, dim1=1, dim2=2)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0, 0] = 0
+ self.assertEqual(t[0, 0, 1], v[0, 0])
+
+ def test_select_view(self, device="mps") -> None:
+ t = torch.ones((5, 5), device=device)
+ v = t.select(0, 2)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0] = 0
+ self.assertEqual(t[2, 0], v[0])
+
+ def test_unbind_view(self, device="mps") -> None:
+ t = torch.zeros((5, 5), device=device)
+ tup = torch.unbind(t)
+
+ for idx, v in enumerate(tup):
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0] = idx + 1
+ self.assertEqual(t[idx, 0], v[0])
+
+ def test_expand_view(self, device="mps") -> None:
+ t = torch.ones((5, 1), device=device)
+ v = t.expand(5, 5)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[2, 2] = 0
+ self.assertEqual(t[2, 0], v[2, 2])
+
+ def test_expand_as_view(self, device="mps"):
+ t = torch.ones((5, 1), device=device)
+ e = torch.empty((5, 5), device=device)
+ v = t.expand_as(e)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[2, 2] = 0
+ self.assertEqual(t[2, 0], v[2, 2])
+
+ def test_narrow_view(self, device="mps"):
+ t = torch.ones((5, 5), device=device)
+ v = torch.narrow(t, 1, 2, 2)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0, 0] = 0
+ self.assertEqual(t[0, 2], v[0, 0])
+
+ def test_permute_view(self, device="mps") -> None:
+ t = torch.ones((5, 5), device=device)
+ v = t.permute(1, 0)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0, 1] = 0
+ self.assertEqual(t[1, 0], v[0, 1])
+
+ def test_transpose_view(self, device="mps"):
+ for fn in (torch.swapdims, torch.swapaxes, torch.transpose):
+ t = torch.ones((5, 5), device=device)
+ v = fn(t, 0, 1)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0, 1] = 0
+ self.assertEqual(t[1, 0], v[0, 1])
+
+ def test_transpose_inplace_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ v = t.view_as(t)
+ v = v.swapdims_(0, 1)
+ self.assertTrue(self.is_view_of(t, v))
+ v[0, 1] = 0
+ self.assertEqual(t[1, 0], v[0, 1])
+
+ t = torch.ones(5, 5, device=device)
+ v = t.view_as(t)
+ v = v.swapaxes_(0, 1)
+ self.assertTrue(self.is_view_of(t, v))
+ v[0, 1] = 0
+ self.assertEqual(t[1, 0], v[0, 1])
+
+ t = torch.ones(5, 5, device=device)
+ v = t.view_as(t)
+ v = v.transpose_(0, 1)
+ self.assertTrue(self.is_view_of(t, v))
+ v[0, 1] = 0
+ self.assertEqual(t[1, 0], v[0, 1])
+
+ def test_t_view(self, device="mps"):
+ t = torch.ones((5, 5), device=device)
+ v = t.t()
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0, 1] = 0
+ self.assertEqual(t[1, 0], v[0, 1])
+
+ def test_t_inplace_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ v = t.view_as(t)
+ v = v.t_()
+ self.assertTrue(self.is_view_of(t, v))
+ v[0, 1] = 0
+ self.assertEqual(t[1, 0], v[0, 1])
+
+ def test_T_view(self, device="mps"):
+ for op in ("T", "H", "mT", "mH"):
+ t = torch.ones((5, 5), device=device)
+ v = getattr(t, op)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0, 1] = 0
+ self.assertEqual(t[1, 0], v[0, 1])
+
+ # requires aten::unfold
+ # def test_unfold_view(self, device="mps"):
+ # t = torch.ones(10, device=device)
+ # v = t.unfold(0, 3, 2)
+ # self.assertTrue(self.is_view_of(t, v))
+
+ # v[1, 0] = 0
+ # self.assertEqual(t[2], v[1, 0])
+
+ def test_squeeze_view(self, device="mps"):
+ t = torch.ones(5, 1, 5, device=device)
+ v = torch.squeeze(t)
+ self.assertTrue(self.is_view_of(t, v))
+ v[0, 1] = 0
+ self.assertTrue(t is v._base)
+
+ def test_squeeze_inplace_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ v = t.view_as(t)
+ v = v.squeeze_()
+ self.assertTrue(self.is_view_of(t, v))
+ v[0, 1] = 0
+ self.assertTrue(t is v._base)
+
+ def test_unsqueeze_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ v = torch.unsqueeze(t, 1)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0, 0, 1] = 0
+ self.assertEqual(t[0, 1], v[0, 0, 1])
+
+ def test_unsqueeze_inplace_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ v = t.view_as(t)
+ v = v.unsqueeze_(1)
+ self.assertTrue(self.is_view_of(t, v))
+ v[0, 0, 1] = 0
+ self.assertEqual(t[0, 1], v[0, 0, 1])
+
+ def test_as_strided_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ v = torch.as_strided(t, (25,), (1,))
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[6] = 0
+ self.assertEqual(t[1, 1], v[6])
+
+ def test_as_strided_inplace_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ v = t.view_as(t)
+ v = v.as_strided_((25,), (1,))
+ self.assertTrue(self.is_view_of(t, v))
+ v[6] = 0
+ self.assertEqual(t[1, 1], v[6])
+
+ def test_view_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ v = t.view(25)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[6] = 0
+ self.assertEqual(t[1, 1], v[6])
+
+ def test_view_as_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ e = torch.empty((25,))
+ v = t.view_as(e)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[6] = 0
+ self.assertEqual(t[1, 1], v[6])
+
+ def test_contiguous_self(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ s = t.contiguous()
+ self.assertTrue(s is t)
+
+ def test_contiguous_nonview(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ nv = t.t().contiguous()
+ self.assertTrue(not self.is_view_of(t, nv))
+
+ nv[0, 0] = 0
+ self.assertNotEqual(t[0, 0], nv[0, 0])
+
+ def test_reshape_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ v = torch.reshape(t, (25,))
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[6] = 0
+ self.assertEqual(t[1, 1], v[6])
+
+ def test_reshape_as_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ e = torch.empty((25,), device=device)
+ v = t.reshape_as(e)
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[6] = 0
+ self.assertEqual(t[1, 1], v[6])
+
+ def test_reshape_nonview(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ nv = torch.reshape(t.t(), (25,))
+ self.assertTrue(not self.is_view_of(t, nv))
+
+ nv[6] = 0
+ self.assertNotEqual(t[1, 1], nv[6])
+
+ def test_flatten_view(self, device="mps"):
+ def test_writes_propagate(t, v):
+ idx_t = (0,) * t.ndim
+ idx_v = (0,) * v.ndim
+ v[idx_v] = 0
+ self.assertEqual(t[idx_t], v[idx_v])
+
+ t = torch.ones(1, 2, 3, 4, device=device)
+ v = t.flatten()
+ self.assertTrue(self.is_view_of(t, v))
+ test_writes_propagate(t, v)
+
+ # zero-dimensional tensor
+ t = torch.tensor(1, device=device)
+ v = t.flatten()
+ test_writes_propagate(t, v)
+ self.assertTrue(self.is_view_of(t, v))
+
+ t = torch.ones(1, 2, 3, 4, device=device).transpose(2, 3)
+ v = t.flatten(0, 1)
+ test_writes_propagate(t, v)
+ self.assertTrue(self.is_view_of_same_base(t, v))
+
+ # stride[i] = stride[i + 1] * size[i + 1] is satisfied for 3 groups:
+ t = torch.ones(720, device=device) \
+ .as_strided((2, 3, 2, 3, 5, 4), (6, 2, 15, 5, 1, 0))
+ # [--1--|---2---|-3-] [--1--|----2---|-3-]
+ v1 = t.flatten(0, 1)
+ v2 = v1.flatten(1, 3)
+ v3 = v2.flatten(2, 2)
+ test_writes_propagate(t, v1)
+ self.assertTrue(self.is_view_of_same_base(t, v1))
+ test_writes_propagate(t, v2)
+ self.assertTrue(self.is_view_of_same_base(t, v2))
+ test_writes_propagate(t, v3)
+ self.assertTrue(self.is_view_of_same_base(t, v3))
+
+ def test_flatten_nonview(self, device="mps"):
+ def assert_is_nonview(t, nv):
+ idx_t = (0,) * t.ndim
+ idx_nv = (0,) * nv.ndim
+ self.assertTrue(not nv._is_view())
+ nv[idx_nv] = 0
+ self.assertNotEqual(t[idx_t], nv[idx_nv])
+ t = torch.ones(2, 3, 2, 3, device=device).transpose(2, 3)
+ nv = t.flatten(1, 3)
+ assert_is_nonview(t, nv)
+
+ t = torch.ones(2, 2, device=device).T
+ nv = t.flatten()
+ assert_is_nonview(t, nv)
+
+ # flatten returns the original object if start_dim=end_dim
+ t = t = torch.ones(2, 2, device=device)
+ nv = t.flatten(1, 1)
+ self.assertTrue(t is nv)
+
+ def test_basic_indexing_slice_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ v = t[:2, :3]
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0, 0] = 0
+ self.assertEqual(t[0, 0], v[0, 0])
+
+ def test_basic_indexing_ellipses_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ v = t[..., :2]
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0, 0] = 0
+ self.assertEqual(t[0, 0], v[0, 0])
+
+ def test_basic_indexing_newaxis_view(self, device="mps"):
+ t = torch.ones(5, 5, device=device)
+ v = t[None, :2, 3]
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0, 0] = 0
+ self.assertEqual(t[0, 3], v[0, 0])
+
+ def test_chunk_view(self, device="mps"):
+ t = torch.zeros(3, 3, device=device)
+ l = torch.chunk(t, 3)
+
+ for idx, v in enumerate(l):
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0, 0] = idx + 1
+ self.assertEqual(t[idx, 0], v[0, 0])
+
+ def test_split_view(self, device="mps"):
+ t = torch.zeros(3, 3, device=device)
+ l = torch.split(t, [1, 1, 1])
+
+ for idx, v in enumerate(l):
+ self.assertTrue(self.is_view_of(t, v))
+
+ v[0, 0] = idx + 1
+ self.assertEqual(t[idx, 0], v[0, 0])
+
+ def test_movedim_view(self, device="mps"):
+ def run_test(device, op):
+ t = torch.zeros(3, 3, device=device)
+ out = op(t)
+
+ self.assertTrue(self.is_view_of(t, out))
+
+ # Randomly change values in output
+ # and verify that original is changed
+ # as well.
+ for _ in range(3):
+ idx_1, idx_2 = random.randint(0, 2), random.randint(0, 2)
+ out[idx_1, idx_2] = random.random()
+ self.assertEqual(t[idx_2, idx_1], out[idx_1, idx_2])
+
+ for fn in [torch.movedim, torch.moveaxis]:
+ op = partial(fn, source=(0, 1), destination=(1, 0))
+ run_test(device, op)
+
+ op = partial(fn, source=0, destination=1)
+ run_test(device, op)
+
+ # Testing that the generated view_copy kernel and its derivative are implemented correctly
+ def test_view_copy(self, device="mps"):
+ a = torch.randn(4, device=device, requires_grad=True)
+ a_ref = a.clone().detach().requires_grad_()
+ a_view = a_ref.view(2, 2)
+ a_view_copy = torch.view_copy(a, (2, 2))
+
+ # view_copy ops don't preserve view relationship
+ self.assertTrue(self.is_view_of(a_ref, a_view))
+ self.assertFalse(self.is_view_of(a, a_view_copy))
+
+ a_view_copy.sum().backward()
+ a_view.sum().backward()
+
+ # forward and backward give the same shape + result
+ self.assertEqual(a_view_copy, a_view)
+ self.assertEqual(a.grad, a_ref.grad)
+
+ def test_view_copy_out(self, device="mps"):
+ a = torch.randn(2, 2, device=device)
+ out = torch.empty(2, device=device)
+
+ torch.diagonal_copy(a, out=out)
+ expected = torch.diagonal_copy(a)
+
+ self.assertEqual(expected, out)
+
+ a = torch.randn(4, device=device)
+ out1 = torch.empty(2, device=device)
+ out2 = torch.empty(2, device=device)
+
+ torch.split_copy(a, 2, out=(out1, out2))
+ expected1, expected2 = torch.split_copy(a, 2)
+
+ self.assertEqual(expected1, out1)
+ self.assertEqual(expected2, out2)
+
+ def test_empty_reshape(self, device="mps"):
+ x = torch.randn(0, 6, device=device)
+ self.assertEqual((1, 0, 6, 1, 1), x.reshape(1, 0, 6, 1, 1).shape)
+ # should be viewable -- i.e. data_ptr is the same.
+ self.assertEqual(x.data_ptr(), x.reshape(1, 0, 6, 1, 1).data_ptr())
+
+ # match NumPy semantics -- don't infer the size of dimension with a degree of freedom
+ self.assertRaises(RuntimeError, lambda: x.reshape(0, -1))
+
+ def test_expand(self, device="mps"):
+ tensor = torch.rand(1, 8, 1, device=device)
+ tensor2 = torch.rand(5, device=device)
+ template = torch.rand(4, 8, 5, device=device)
+ target = template.size()
+ self.assertEqual(tensor.expand_as(template).size(), target)
+ self.assertEqual(tensor.expand(4, 8, 5).size(), target)
+ self.assertEqual(tensor.expand(target).size(), target)
+ self.assertEqual(tensor2.expand_as(template).size(), target)
+ self.assertEqual(tensor2.expand(4, 8, 5).size(), target)
+ self.assertEqual(tensor2.expand(target).size(), target)
+
+ # test double expand
+ self.assertEqual(tensor2.expand(1, 5).expand(2, 2, 5), tensor2.repeat(2, 2, 1))
+
+ # test non-contiguous
+ noncontig = torch.randn(5, 2, 1, 3, device=device)[:, 0]
+ self.assertFalse(noncontig.is_contiguous())
+ self.assertEqual(noncontig.expand(2, 5, 4, 3), noncontig.contiguous().repeat(2, 1, 4, 1))
+
+ # make sure it's compatible with unsqueeze
+ expanded = tensor2.expand(1, 1, 5)
+ unsqueezed = tensor2.unsqueeze(0).unsqueeze(1)
+ self.assertEqual(expanded, unsqueezed)
+ self.assertEqual(expanded.stride(), unsqueezed.stride())
+
+ # test -1 as target size
+ self.assertEqual(tensor.expand(4, -1, 5), tensor.expand(4, 8, 5))
+ self.assertRaises(RuntimeError, lambda: tensor2.expand(-1, -1))
+
+ # test expanding empty to empty
+ self.assertEqual(torch.zeros(0, device=device).expand((0,)), torch.zeros(0, device=device))
+
+ def test_view_empty(self, device="mps"):
+ x = torch.randn(0, 6, device=device)
+ self.assertEqual((1, 0, 6, 1, 1), x.view(1, 0, 6, 1, 1).shape)
+
+ def test_reshape(self, device="mps"):
+ x = torch.randn(3, 3, device=device)
+ self.assertEqual(x.data_ptr(), x.reshape(-1).data_ptr())
+ self.assertEqual(x.data_ptr(), x.reshape(1, 9, 1).data_ptr())
+ self.assertEqual(torch.reshape(x, (9,)), x.reshape(9))
+ self.assertRaises(RuntimeError, lambda: x.reshape(-1, -1))
+
+ y = torch.randn(4, 4, 4, device=device)[:, 0, :]
+ # .data_ptr() on meta tensors is always 0 so they are equal regardless of the reshape
+ if device != "meta":
+ self.assertNotEqual(y.data_ptr(), y.reshape(-1).data_ptr())
+ self.assertEqual(y.contiguous().view(-1), y.reshape(-1))
+ self.assertEqual(y.reshape(2, 2, 4).data_ptr(), y.data_ptr())
+
+ s = torch.randn((), device=device)
+ self.assertEqual(s.data_ptr(), s.reshape(()).data_ptr())
+ self.assertEqual(s.reshape(-1).shape, (1,))
+ self.assertRaises(RuntimeError, lambda: s.reshape(2))
+
+ empty = torch.tensor([], device=device)
+ self.assertEqual(empty, empty.reshape(-1))
+ self.assertEqual(empty, empty.reshape([0]))
+ # TODO: fix these once we have multi-dimensional empty tensors
+ self.assertEqual(empty.reshape([0, 1]).shape, (0, 1))
+ self.assertEqual(empty.reshape([1, -1]).shape, (1, 0))
+ self.assertRaises(RuntimeError, lambda: empty.reshape(1))
+
+ x = torch.randn(3, 3, device=device)
+ self.assertEqual(x.data_ptr(), x.reshape_as(torch.rand(9)).data_ptr())
+ self.assertEqual(x.data_ptr(), x.reshape_as(torch.rand(1, 9, 1)).data_ptr())
+ self.assertRaises(RuntimeError, lambda: x.reshape_as(torch.rand(10, device=device)))
+
+ def test_narrow(self, device="mps"):
+ x = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
+ self.assertEqual(x.narrow(0, 0, 1), torch.tensor([[0, 1, 2]]))
+ self.assertEqual(x.narrow(0, 0, 2), torch.tensor([[0, 1, 2], [3, 4, 5]]))
+ self.assertEqual(x.narrow(0, 1, 1), torch.tensor([[3, 4, 5]]))
+ self.assertEqual(x.narrow(0, -1, 1), torch.tensor([[6, 7, 8]]))
+ self.assertEqual(x.narrow(0, -2, 2), torch.tensor([[3, 4, 5], [6, 7, 8]]))
+ self.assertEqual(x.narrow(0, -3, 3), torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
+ self.assertEqual(x.narrow(-1, -1, 1), torch.tensor([[2], [5], [8]]))
+ self.assertEqual(x.narrow(-2, -1, 1), torch.tensor([[6, 7, 8]]))
+
+ def test_narrow_tensor(self, device="mps"):
+ x = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
+ self.assertEqual(x.narrow(0, torch.tensor(0), 1), torch.tensor([[0, 1, 2]]))
+ with self.assertRaises(Exception):
+ x.narrow(0, torch.tensor(0.), 1)
+ with self.assertRaises(Exception):
+ x.narrow(0, torch.tensor([0]), 1)
+ with self.assertRaises(Exception):
+ x.narrow(0, torch.tensor([0, 1]), 1)
+
+ def test_t(self, device="mps"):
+ # Test 0D tensors
+ x = torch.randn(())
+ self.assertEqual(x, x.t())
+ x = x.to_sparse()
+ self.assertEqual(x, x.t())
+
+ # Test 1D tensors
+ x = torch.arange(4)
+ self.assertEqual(x, x.t())
+ x = x.to_sparse()
+ self.assertEqual(x, x.t())
+
+ # Test 2D tensors
+ x = torch.rand((2, 2))
+ self.assertEqual(x.t(), x.transpose(0, 1))
+ x = x.to_sparse()
+ self.assertEqual(x.t(), x.transpose(0, 1))
+
+ # Test 3D tensor
+ x = torch.rand((2, 2, 2))
+ with self.assertRaisesRegex(RuntimeError, 'expects a tensor with <= 2 dimensions, but self is 3D'):
+ x.t()
+ x = x.to_sparse()
+ with self.assertRaisesRegex(RuntimeError, 'expects a tensor with <= 2 sparse and 0 dense dimensions'):
+ x.t()
+
+ def test_split(self, device="mps"):
+ tensor = torch.rand(7, 4)
+ split_size = 3
+ dim = 0
+ target_sizes = ([3, 4], [3, 4], [1, 4])
+ splits = tensor.split(split_size, dim)
+ start = 0
+ for target_size, split in zip(target_sizes, splits):
+ self.assertEqual(split.size(), target_size)
+ self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0)
+ start = start + target_size[dim]
+
+ # Variable sections split
+ tensor = torch.randn(20, 10)
+ dim = 0
+ split_sizes = [5, 5, 10]
+ target_sizes = ([[5, 10], [5, 10], [10, 10]])
+ splits = tensor.split(split_sizes, dim)
+ start = 0
+ for target_size, split in zip(target_sizes, splits):
+ self.assertEqual(split.size(), target_size)
+ self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0)
+ start = start + target_size[dim]
+
+ split_sizes = [2, 2, 6]
+ target_sizes = ([20, 2], [20, 2], [20, 6])
+ dim = 1
+ splits = tensor.split(split_sizes, dim)
+ start = 0
+ for target_size, split in zip(target_sizes, splits):
+ self.assertEqual(split.size(), target_size)
+ self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0)
+ start = start + target_size[dim]
+
+ def test_chunk(self, device="mps"):
+ tensor = torch.rand(4, 7)
+ num_chunks = 3
+ dim = 1
+ target_sizes = ([4, 3], [4, 3], [4, 1])
+ splits = tensor.chunk(num_chunks, dim)
+ start = 0
+ for target_size, split in zip(target_sizes, splits):
+ self.assertEqual(split.size(), target_size)
+ self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split,
+ atol=0, rtol=0)
+ start = start + target_size[dim]
+
+ # Invalid chunk sizes
+ error_regex = 'chunk expects.*greater than 0'
+ with self.assertRaisesRegex(RuntimeError, error_regex):
+ tensor.chunk(0)
+ with self.assertRaisesRegex(RuntimeError, error_regex):
+ tensor.chunk(-2)
+
+ def test_unsqueeze(self, device="mps") -> None:
+ x = torch.randn(2, 3, 4)
+ y = x.unsqueeze(1)
+ self.assertEqual(y, x.view(2, 1, 3, 4))
+ y = x.clone().unsqueeze_(2)
+ self.assertEqual(y, x.view(2, 3, 1, 4))
+
+ x = x[:, 1]
+ self.assertFalse(x.is_contiguous())
+ y = x.unsqueeze(1)
+ self.assertEqual(y, x.contiguous().view(2, 1, 4))
+ y = x.clone().unsqueeze_(2)
+ self.assertEqual(y, x.contiguous().view(2, 4, 1))
+
+ # unit test for special case transposed copy (see ATen/native/Copy.cpp for details)
+ def test_big_transpose(self, device="mps"):
+ t = torch.rand(456, 789, device=device)
+ t1 = t.t().contiguous()
+ t2 = torch.from_numpy(t.cpu().numpy().transpose())
+ self.assertEqual(t1, t2)
+
+ def test_T(self, device="mps"):
+ a = torch.randn(2, 3, 4, device=device)
+ t1 = a.T
+ t2 = a.permute(2, 1, 0)
+ self.assertEqual(t2, t1)
+ b = torch.randn(10, device=device)
+ self.assertEqual(b, b.T)
+ scalar = torch.tensor(5, device=device)
+ self.assertEqual(scalar, scalar.T)
+
+ def test_transposes(self, device="mps", dtype=torch.float32):
+ for op in ("T", "H", "mT", "mH", "adjoint"):
+ shapes = ((), (2, 3), (2, 3, 4)) if op[0] == "m" or op == "adjoint" else ((), (2, 3),)
+ for shape in shapes:
+ a = make_tensor(shape, device=device, dtype=dtype)
+ t1 = getattr(a, op)
+ if op == "adjoint":
+ t1 = t1()
+ t2 = a
+ if a.ndim != 0:
+ t2 = t2.transpose(-2, -1)
+ if op[-1] == "H" or op == "adjoint":
+ t2 = t2.conj()
+ self.assertEqual(t2, t1)
+
+ def test_transposes_errors(self, device="mps", dtype=torch.float32):
+ for op in ("H", "mT", "mH", "adjoint"):
+ shapes = ((2,), (2, 3, 4)) if op == "H" else ((2,),)
+ for shape in shapes:
+ a = make_tensor(shape, device=device, dtype=dtype)
+ with self.assertRaisesRegex(RuntimeError, "only supported on matrices"):
+ t1 = getattr(a, op)
+ if op == "adjoint":
+ t1 = t1()
+
+ def test_python_types(self, device="mps"):
+ a1 = torch.randn((1, 2), device=device, dtype=torch.float32)
+ a2 = torch.randn((1, 2), device=device, dtype=torch.float32)
+ self.assertEqual(a1.dtype, a2.dtype)
+
+ b1 = torch.arange(10, 20, dtype=torch.int64, device=device)
+ b2 = torch.arange(10, 20, dtype=int, device=device)
+ self.assertEqual(b1.dtype, b2.dtype)
+
+ c1 = torch.tensor([True, False], dtype=torch.bool, device=device)
+ c2 = torch.tensor([True, False], dtype=bool, device=device)
+ self.assertEqual(c1.dtype, c2.dtype)
+
+ # TODO: is resize best put in test_view_ops?
+ def test_resize_as_preserves_strides(self, device="mps"):
+ x = torch.empty(2, 3).t()
+ old_strides = x.stride()
+ x.resize_as_(x)
+ self.assertEqual(x.stride(), old_strides)
+
+ def test_memory_format_resize_as(self, device="mps"):
+ def test_helper(shape, memory_format, device="mps"):
+ xc = torch.randn(shape, device=device).contiguous(memory_format=memory_format)
+ flat = torch.randn(xc.numel(), device=device)
+ flat.resize_as_(xc, memory_format=torch.preserve_format)
+ self.assertTrue(flat.is_contiguous(memory_format=memory_format))
+
+ test_helper((10, 3, 32, 32), torch.channels_last, device="mps")
+ test_helper((3, 10, 3, 32, 32), torch.channels_last_3d, device="mps")
+
+ def test_memory_format_resize_(self, device="mps"):
+ def test_helper(shape, numel, memory_format, device="mps"):
+ flat = torch.randn(numel, device=device)
+ flat.resize_(shape, memory_format=memory_format)
+ self.assertTrue(flat.is_contiguous(memory_format=memory_format))
+
+ test_helper((10, 3, 32, 32), 10 * 3 * 32 * 32, torch.channels_last, device="mps")
+ test_helper((3, 10, 3, 32, 32), 3 * 10 * 3 * 32 * 32, torch.channels_last_3d, device="mps")
+
+ # TODO: OpInfo this
+ def _test_atleast(self, device, torch_fn):
+ # 0-dim
+ s = torch.tensor(0.5, dtype=torch.double, requires_grad=True)
+
+ gradcheck(lambda x: torch_fn(x), s)
+ gradgradcheck(lambda x: torch_fn(x), s)
+
+ # 1-dim
+ a = torch.rand(4, dtype=torch.double, requires_grad=True)
+
+ gradcheck(lambda x: torch_fn(x), a)
+ gradgradcheck(lambda x: torch_fn(x), a)
+
+ # 2,3,4-dim
+ b = torch.rand(4, 3, dtype=torch.double, requires_grad=True)
+ c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True)
+ d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True)
+
+ input_tuple = (s, a, b, c, d)
+ gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
+ gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
+
+ def test_atleast_gradient(self, device="mps"):
+ self._test_atleast(device, torch.atleast_1d)
+ self._test_atleast(device, torch.atleast_2d)
+ self._test_atleast(device, torch.atleast_3d)
+
+ def test_view(self, device="mps"):
+ tensor = torch.rand(15, device=device)
+ template = torch.rand(3, 5, device=device)
+ empty = torch.empty(0, device=device)
+ target = template.size()
+ self.assertEqual(tensor.view_as(template).size(), target)
+ self.assertEqual(tensor.view(3, 5).size(), target)
+ self.assertEqual(tensor.view(torch.Size([3, 5])).size(), target)
+ self.assertEqual(tensor.view(-1, 5).size(), target)
+ self.assertEqual(tensor.view(3, -1).size(), target)
+ tensor_view = tensor.view(5, 3)
+ tensor_view.fill_(random.uniform(0, 1))
+ self.assertEqual(empty.view_as(empty), empty)
+ self.assertEqual(empty.view(0), empty)
+ self.assertEqual(empty.view(0, 3, 0, 1).size(), torch.Size([0, 3, 0, 1]))
+ self.assertEqual(empty.view(0, 3, 0, 1).view(0), empty)
+
+ # test size inference with empty tensors
+ self.assertEqual(empty.view(-1).size(), torch.Size([0]))
+ self.assertEqual(empty.view(10, 3, -1).size(), torch.Size([10, 3, 0]))
+
+ with self.assertRaisesRegex(RuntimeError, r"because the unspecified dimension size -1 can be any value"):
+ empty.view(-1, 0)
+
+ with self.assertRaisesRegex(RuntimeError, r"because the unspecified dimension size -1 can be any value"):
+ empty.view(3, 0, -1, 0)
+
+ self.assertRaises(RuntimeError, lambda: tensor.view(15, 0))
+ self.assertRaises(RuntimeError, lambda: tensor.view(7, -1))
+ self.assertRaises(RuntimeError, lambda: tensor.view(15, -1, -1))
+
+ # RuntimeError: Invalid device for storage: mps
+ # def test_contiguous(self, device="mps"):
+ # x = torch.randn(1, 16, 5, 5, device=device)
+ # self.assertTrue(x.is_contiguous())
+ # stride = list(x.stride())
+ # stride[0] = 20
+ # # change the stride in dimension 0. the tensor is still contiguous because size[0] is 1
+ # x.set_(x.storage(), 0, x.size(), stride)
+ # self.assertTrue(x.is_contiguous())
+
+ def test_resize_all_dtypes_and_devices(self, device="mps"):
+ shape = (2, 2)
+ for dt in (torch.half, torch.bfloat16, torch.bool):
+ x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device)
+ x.resize_(shape)
+ self.assertEqual(shape, x.shape)
+
+ def test_resize_as_all_dtypes_and_devices(self, device="mps"):
+ for dt in (torch.half, torch.bfloat16, torch.bool):
+ x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device)
+ y = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=dt, device=device)
+ x.resize_as_(y)
+ self.assertEqual(y.shape, x.shape)
+
+ def test_resize_overflow(self, device="mps"):
+ x = torch.empty((), dtype=torch.float64)
+ with self.assertRaisesRegex(RuntimeError, 'Storage size calculation overflowed'):
+ x.resize_([2, 4, 2**29, 2**29])
+ with self.assertRaisesRegex(RuntimeError, 'overflow'):
+ x.resize_([8, 8, 2**29, 2**29])
+
+ def test_view_all_dtypes_and_devices(self, device="mps"):
+ for dt in (torch.float, torch.bool):
+ x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device)
+ self.assertEqual(x.view(6).shape, [6])
class TestRNNMPS(TestCase):
def test_lstm_1(self, device="mps", dtype=torch.float32):
@@ -4310,15 +5649,34 @@ def test_lstm_1(self, device="mps", dtype=torch.float32):
hx = torch.zeros(2, 3, 4, device="cpu")
cx = torch.zeros(2, 3, 4, device="cpu")
- cpu_output, _ = rnn(input, (hx, cx))
+ cpu_output, (cpu_hn, cpu_cn) = rnn(input, (hx, cx))
+
+ rnn = rnn.to(device)
+ input = input.to(device)
+ hx = hx.to(device)
+ cx = cx.to(device)
+ output, (hn, cn) = rnn(input, (hx, cx))
+
+ self.assertEqual(cpu_output, output)
+ self.assertEqual(cpu_hn, hn)
+ self.assertEqual(cpu_cn, cn)
+
+ # test batch_first
+ rnn = nn.LSTM(1, 4, 2, device="cpu", batch_first=True)
+ input = torch.randn(3, 2, 1, device="cpu")
+ hx = torch.zeros(2, 3, 4, device="cpu")
+ cx = torch.zeros(2, 3, 4, device="cpu")
+ cpu_output, (cpu_hn, cpu_cn) = rnn(input, (hx, cx))
- device = torch.device("mps")
rnn = rnn.to(device)
input = input.to(device)
hx = hx.to(device)
cx = cx.to(device)
- output, _ = rnn(input, (hx, cx))
+ output, (hn, cn) = rnn(input, (hx, cx))
+
self.assertEqual(cpu_output, output)
+ self.assertEqual(cpu_hn, hn)
+ self.assertEqual(cpu_cn, cn)
@unittest.skipIf(True, "Backward of lstm returns wrong result")
def test_lstm_2(self, device="mps", dtype=torch.float32):
@@ -4425,8 +5783,11 @@ def test_assert_close(self):
with self.assertRaisesRegex(AssertionError, "Tensor-likes are not close!"):
torch.testing.assert_close(a, inf)
- with self.assertRaisesRegex(AssertionError, "Tensor-likes are not close!"):
- torch.testing.assert_close(a, nan)
+ # TODO: The NaN test is failing when all the tests in test_mps are run
+ # together but passes when run separately. There seems to be memory
+ # corruption which needs to be fixed for this test to be enabled.
+ # with self.assertRaisesRegex(AssertionError, "Tensor-likes are not close!"):
+ # torch.testing.assert_close(a, nan)
def test_double_error(self):
with self.assertRaisesRegex(TypeError, "the MPS framework doesn't support float64"):
@@ -4442,6 +5803,605 @@ def test_legacy_constructor(self):
b = a.new(1)
+MPS_DTYPES = get_all_dtypes()
+for t in [torch.double, torch.cdouble, torch.cfloat, torch.int8, torch.bfloat16]:
+ del MPS_DTYPES[MPS_DTYPES.index(t)]
+
+class TestConsistency(TestCase):
+ # TODO: This is only used while some ops are being added.
+ # This list should contain all ops and dtypes eventually
+ # This can be generated automatically in the `new_mps_allowlist.txt` file
+ # by doing `EXPECTTEST_ACCEPT=1 python test_mps.py TestConsistencyCPU`
+ # You most likely do NOT want to modify this manually
+ ALLOWLIST_OP = {
+ '__radd__': ['torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ '__rand__': ['torch.bool',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ '__rmul__': ['torch.bool',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ '__ror__': ['torch.bool',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ '__rxor__': ['torch.bool',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ '_masked.normalize': ['torch.float32'],
+ 'abs': ['torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.uint8'],
+ 'add': ['torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ 'addcdiv': ['torch.float32'],
+ 'addcmul': ['torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ 'addmv': ['torch.float32'],
+ 'addr': ['torch.float32'],
+ 'all': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ 'any': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ 'argmax': ['torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ 'asin': ['torch.float32'],
+ 'asinh': ['torch.float32'],
+ 'atan': ['torch.float32'],
+ 'atan2': ['torch.float32'],
+ 'atanh': ['torch.float32'],
+ 'atleast_1d': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'atleast_2d': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'atleast_3d': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'baddbmm': ['torch.float32'],
+ 'bitwise_and': ['torch.bool',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'bitwise_left_shift': ['torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'bitwise_not': ['torch.bool',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'bitwise_or': ['torch.bool',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'bitwise_right_shift': ['torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'bitwise_xor': ['torch.bool',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'bmm': ['torch.float32'],
+ 'ceil': ['torch.float32'],
+ 'chunk': ['torch.float16', 'torch.float32', 'torch.int64'],
+ 'clone': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'column_stack': ['torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'conj': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'conj_physical': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'contiguous': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'corrcoef': ['torch.float32'],
+ 'deg2rad': ['torch.float32'],
+ 'diag': ['torch.float32', 'torch.int32'],
+ 'diagflat': ['torch.int32'],
+ 'diff': ['torch.float32'],
+ 'dist': ['torch.float32'],
+ 'dot': ['torch.float32', 'torch.int32'],
+ 'einsum': ['torch.float32'],
+ 'erf': ['torch.float32'],
+ 'fill': ['torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ 'flatten': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ 'floor': ['torch.float32'],
+ 'hstack': ['torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ 'index_select': ['torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ 'isinf': ['torch.float16', 'torch.float32'],
+ 'isnan': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'kron': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'linalg.norm': ['torch.float16',
+ 'torch.float32',
+ 'torch.float16',
+ 'torch.float32'],
+ 'linalg.svd': ['torch.float32'],
+ 'linalg.vector_norm': ['torch.float16'],
+ 'log1p': ['torch.float32'],
+ 'log_softmax': ['torch.float32'],
+ 'logaddexp': ['torch.float32'],
+ 'logaddexp2': ['torch.float32'],
+ 'masked_select': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'mm': ['torch.float32'],
+ 'mv': ['torch.float32'],
+ 'neg': ['torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32'],
+ 'nn.functional.adaptive_max_pool1d': ['torch.float32'],
+ 'nn.functional.adaptive_max_pool2d': ['torch.float32'],
+ 'nn.functional.binary_cross_entropy': ['torch.float32'],
+ 'nn.functional.celu': ['torch.float32'],
+ 'nn.functional.elu': ['torch.float32'],
+ 'nn.functional.embedding': ['torch.float16', 'torch.float32'],
+ 'nn.functional.feature_alpha_dropout': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'nn.functional.hardtanh': ['torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ 'nn.functional.hinge_embedding_loss': ['torch.float32'],
+ 'nn.functional.kl_div': ['torch.float32'],
+ 'nn.functional.l1_loss': ['torch.float32'],
+ 'nn.functional.huber_loss': ['torch.float32'],
+ 'nn.functional.leaky_relu': ['torch.float32'],
+ 'nn.functional.mse_loss': ['torch.float16', 'torch.float32'],
+ 'nn.functional.relu': ['torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'nn.functional.relu6': ['torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'nn.functional.selu': ['torch.float32'],
+ 'nn.functional.silu': ['torch.float32'],
+ 'nn.functional.smooth_l1_loss': ['torch.float32'],
+ 'nn.functional.softmin': ['torch.float32'],
+ 'nn.functional.threshold': ['torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'nn.functional.upsample_bilinear': ['torch.float32'],
+ 'norm': ['torch.float32', 'torch.float16', 'torch.float32'],
+ 'positive': ['torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'rad2deg': ['torch.float32'],
+ 'ravel': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'real': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'repeat_interleave': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'resize_': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'resize_as_': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'resolve_conj': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'resolve_neg': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'round': ['torch.float32'],
+ 'sgn': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'sign': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.uint8'],
+ 'sin': ['torch.float32'],
+ 'sinh': ['torch.float32'],
+ 'softmax': ['torch.float32'],
+ 'split': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'sqrt': ['torch.float32'],
+ 'square': ['torch.float32'],
+ 'squeeze': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'stack': ['torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'sub': ['torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ 'sum_to_size': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'svd': ['torch.float32'],
+ 't': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'tanh': ['torch.float32'],
+ 'tensordot': ['torch.float32'],
+ 'topk': ['torch.float32'],
+ 'tril': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'triu': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'true_divide': ['torch.float32'],
+ 'trunc': ['torch.float32'],
+ 'unsqueeze': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'view': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'view_as': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'vsplit': ['torch.bool',
+ 'torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8'],
+ 'vstack': ['torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64'],
+ 'zero_': ['torch.float16',
+ 'torch.float32',
+ 'torch.int16',
+ 'torch.int32',
+ 'torch.int64',
+ 'torch.uint8']}
+
+ # These ops that are problematic. So never run them even when
+ # generating the new allowlist.
+ # If the dtype list is None, all dtypes are excluded.
+ # All the entries in this list should be removed
+ BLOCKLIST = {
+ # Functions that hang
+ 'masked_fill': [torch.bool, torch.uint8, torch.float32], 'where': [torch.bool],
+ # Functions that hard crash
+ 'nn.functional.kl_div': [torch.int16, torch.int32, torch.int64],
+ 'nn.functional.nll_loss': [torch.float32],
+ 'nn.functional.padreflect': [torch.float32], 'nn.functional.padreplicate': [torch.float32],
+ 'nn.functional.smooth_l1_loss': [torch.float16], 'std': [torch.float16],
+ 'stft': [torch.float32], 'var': [torch.float16],
+
+ # These were moved from ALLOWLIST to BLOCK as they are not working
+ # locally
+ 'tile': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
+ 'repeat': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
+ '__radd__': ['torch.bool', 'torch.uint8'],
+ '__rmul__': ['torch.uint8'],
+ 'add': ['torch.bool', 'torch.uint8'],
+ 'square': ['torch.int32', 'torch.int64', 'torch.uint8'],
+ 'addr': ['torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
+ 'diag': ['torch.int64'],
+ 'diagflat': ['torch.int64'],
+
+ # Functions that are flaky
+ # These are detected as "ok" by the expect case but actually fail to run sometimes
+ 'H': None,
+ 'T': None,
+ 'as_strided': None,
+ 'broadcast_tensors': None,
+ 'broadcast': None,
+ 'broadcast_to': None,
+ 'diagonal': None,
+ 'divfloor_rounding': None,
+ 'divno_rounding_mode': None,
+ 'divtrunc_rounding': None,
+ 'dsplit': None,
+ 'hsplit': None,
+ 'empty': None,
+ 'expand_as': None,
+ 'expand': None,
+ 'ge': None,
+ 'ne': None,
+ 'le': None,
+ 'lt': None,
+ 'gt': None,
+ 'transpose': None,
+ 'splitlist_args': None,
+ 'select': None,
+ 'reshape': None,
+ 'reshape_as': None,
+ 'permute': None,
+ 'norm': None,
+ 'nn.functional.pixel_unshuffle': None,
+ 'nn.functional.pixel_shuffle': None,
+ 'nn.functional.cross_entropy': None,
+ 'nn.functional.one_hot': None,
+ 'narrow': None,
+ 'movedim': None,
+ 'minreduction_with_dim': None,
+ 'minreduction_no_dim': None,
+ 'minbinary': None,
+ 'meshgridvariadic_tensors': None,
+ 'meshgridlist_of_tensors': None,
+ 'maxreduction_with_dim': None,
+ 'maxreduction_no_dim': None,
+ 'maxbinary': None,
+ 'maximum': None,
+ 'minimum': None,
+ 'mT': None,
+ 'mH': None,
+ 'outer': None,
+ 'softmaxwith_dtype': None,
+ 'rounddecimals_neg_3': None,
+ 'rounddecimals_3': None,
+ 'rounddecimals_0': None,
+ 'normnuc': None,
+ 'nn.functional.softminwith_dtype': None,
+ 'nn.functional.feature_alpha_dropoutwith_train': None,
+ 'log_softmaxdtype': None,
+ 'split_with_sizes': None,
+ 'trapezoid': None,
+ 'eq': None,
+ 'mul': None,
+ 'cartesian_prod': None,
+ 'nonzero': None,
+ 'bool': None,
+ 'inner': None,
+ 'dstack': None,
+ 'take_along_dim': None,
+ }
+
+ # Used for accept mode only
+ NEW_ALLOW_LIST = defaultdict(list)
+
+ @ops(op_db, allowed_dtypes=MPS_DTYPES)
+ def test_output_match(self, device, dtype, op):
+ self.assertEqual(device, "cpu")
+ if not torch.backends.mps.is_available():
+ self.skipTest("MPS is not available")
+
+ key = op.name + op.variant_test_name
+ if key in self.BLOCKLIST:
+ if self.BLOCKLIST[key] is None or dtype in self.BLOCKLIST[key]:
+ self.skipTest(f"Running test with {op.name} hangs so skipping")
+
+ # Make this an expecttest manually
+ # When this env variable is set, generate a new ALLOWLIST_OP
+ # that reflects the current state of what passes or not
+ if os.environ.get("EXPECTTEST_ACCEPT", None) == "1":
+ generate_new_truth = True
+ else:
+ generate_new_truth = False
+
+ if not generate_new_truth:
+ if op.name not in self.ALLOWLIST_OP:
+ self.skipTest(f"{op.name} is not in the allow list for test on MPS")
+ else:
+ if str(dtype) not in self.ALLOWLIST_OP[op.name]:
+ self.skipTest(f"{op.name} is in the allow list for MPS but {dtype} is excluded")
+
+ try:
+ cpu_samples = op.sample_inputs(device, dtype)
+
+ for cpu_sample in cpu_samples:
+ mps_sample = cpu_sample.transform(lambda x: x.to("mps") if isinstance(x, torch.Tensor) else x)
+
+ # TODO: This checks only the function variant. We should also check the method and inplace version
+ # when they exist
+ cpu_args = [cpu_sample.input] + list(cpu_sample.args)
+ cpu_kwargs = cpu_sample.kwargs
+ mps_args = [mps_sample.input] + list(mps_sample.args)
+ mps_kwargs = mps_sample.kwargs
+
+ cpu_out = op(*cpu_args, **cpu_kwargs)
+ mps_out = op(*mps_args, **mps_kwargs)
+ self.assertEqual(cpu_out, mps_out)
+ except Exception as e:
+ if not generate_new_truth:
+ raise e
+ else:
+ if generate_new_truth:
+ self.NEW_ALLOW_LIST[op.name].append(str(dtype))
+
+ # We could write it only once. But I don't know how to detect that the current test is the last one
+ # So each test append to the dict and write it.
+ with open("new_mps_allowlist.txt", "w") as f:
+ pprint.pprint(self.NEW_ALLOW_LIST, stream=f)
+
+# TODO: Actually instantiate that test for the "mps" device to better reflect what it is doing.
+# This requires mps to be properly registered in the device generic test framework which is not the
+# case right now.
+instantiate_device_type_tests(TestConsistency, globals(), only_for="cpu")
if __name__ == "__main__":
run_tests()
| LSTM Output Transposed w/MPS on 1.13 nightly build
### 🐛 Describe the bug
The 1.13 nightly build, when sending an LSTM model to `device="mps"` reverses the expected order of batch and seq in the output.
Please see this discussion for code examples and further details:
https://discuss.pytorch.org/t/lstm-output-transposed/154820/2
### Versions
Collecting environment information...
PyTorch version: 1.13.0.dev20220620
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 12.4 (arm64)
GCC version: Could not collect
Clang version: 13.1.6 (clang-1316.0.21.2.5)
CMake version: Could not collect
Libc version: N/A
Python version: 3.9.13 | packaged by conda-forge | (main, May 27 2022, 17:01:00) [Clang 13.0.1 ] (64-bit runtime)
Python platform: macOS-12.4-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==1.22.4
[pip3] torch==1.13.0.dev20220620
[conda] numpy 1.22.4 pypi_0 pypi
[conda] pytorch 1.13.0.dev20220620 py3.9_0 pytorch-nightly
cc @kulinseth @albanD
| 2022-07-22T08:34:59 |
|
pytorch/pytorch | 86,851 | pytorch__pytorch-86851 | [
"44964"
] | f37023b03f3e92dadb247e89fd4e024eb4a0eb8a | diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -396,8 +396,34 @@
# Disable docstring inheritance
autodoc_inherit_docstrings = False
-# Disable displaying type annotations, these can be very verbose
-autodoc_typehints = 'none'
+# Show type hints in the description
+autodoc_typehints = 'description'
+
+# Add parameter types if the parameter is documented in the docstring
+autodoc_typehints_description_target = 'documented_params'
+
+# Type aliases for common types
+# Sphinx type aliases only works with Postponed Evaluation of Annotations
+# (PEP 563) enabled (via `from __future__ import annotations`), which keeps the
+# type annotations in string form instead of resolving them to actual types.
+# However, PEP 563 does not work well with JIT, which uses the type information
+# to generate the code. Therefore, the following dict does not have any effect
+# until PEP 563 is supported by JIT and enabled in files.
+autodoc_type_aliases = {
+ "_size_1_t": "int or tuple[int]",
+ "_size_2_t": "int or tuple[int, int]",
+ "_size_3_t": "int or tuple[int, int, int]",
+ "_size_4_t": "int or tuple[int, int, int, int]",
+ "_size_5_t": "int or tuple[int, int, int, int, int]",
+ "_size_6_t": "int or tuple[int, int, int, int, int, int]",
+ "_size_any_opt_t": "int or None or tuple",
+ "_size_2_opt_t": "int or None or 2-tuple",
+ "_size_3_opt_t": "int or None or 3-tuple",
+ "_ratio_2_t": "float or tuple[float, float]",
+ "_ratio_3_t": "float or tuple[float, float, float]",
+ "_ratio_any_t": "float or tuple",
+ "_tensor_list_t": "Tensor or tuple[Tensor]",
+}
# Enable overriding of function signatures in the first line of the docstring.
autodoc_docstring_signature = True
| Improve how type annotations show up in html docs
Maybe some simpler type annotations can be introduced.
Currently it's `kernel_size: Union[T, Tuple[T, ...]], stride: Optional[Union[T, Tuple[T, ...]]]`, and it's hard to parse, especially since it's so comon and clutters the signatures. Can we introduce some type alias `int_or_inttuple` or something in this spirit?

cc @jlin27 @ezyang @malfet @rgommers @xuzhao9 @gramster
| Such aliases are already used in the module definitions, but apparently are unwrapped when documentation is generated:
https://github.com/pytorch/pytorch/blob/acc2a1e5fabe3c322927be72e0aa5d2e02bae46c/torch/nn/modules/pooling.py#L17-L19
maybe it's also worth to promote them to "first class" and remove the leading underscore from "_size_any_t" and give it a more comprehensible name such as "int_or_inttuple" (or sth else, but semantics of word "any" here is not very clear)
The right fix here is to remove the type annotations from all signatures in the generated html docs I believe. The parameter descriptions already have type information, and there will be many cases where the type annotations will make little sense to the user (e.g., returning typevar `T`, long unions).
That of course does mean there can be mismatches between docs and annotations, but that may be on purpose sometimes. For example, for backwards compat often multiple types of inputs are supported, but there's one preferred one which is the only one documented. For example, I just saw that `ModuleDict.update` accepts not only `Mapping[str, Module]`, but also a length-2 list of str-module tuples. In such cases, the type annotations should be made complete if that's not too cumbersome (otherwise ignores can be added), but the docs should recommend just the preferred form.
Well, the type annotations could go to hover-only message with `title` html attribute. I think, types provide a fast parse over function arguments, I think they are still valuable (at least to me).
And a first-class, public alias of `Union[int, Tuple[int]]` may be useful anyway, as many functions accept such types, and users could even use these type definitions in their code
+ sometimes types are not explicitily mentioned in parameter description (e.g. if torch scalar or Python scalar is supported. if Python scalar, only int or float). Or that description is minorly outdated. So having it generated somewhere in the docs is useful - maybe html `title` attribute could be a compromise
> Well, the type annotations could go to hover-only message with `title` html attribute.
That would be nice, unfortunately there's no `autodoc` setting for that. So I'd defer that as a separate enhancement idea; would need a Sphinx or https://github.com/agronholm/sphinx-autodoc-typehints PR. Right now it's choosing between:
<img width="568" alt="image" src="https://user-images.githubusercontent.com/98330/101998635-54013d00-3cd5-11eb-8474-b9ccdca4989e.png">
and
<img width="568" alt="image" src="https://user-images.githubusercontent.com/98330/101998639-5f546880-3cd5-11eb-87f8-ab075c1d2bbe.png">
In recent versions of Sphinx (>= 3.0) there's a third option, `autodoc_typehints = 'description'`, which moves the annotations into the parameter description. I will have a look if that works without having to change each individual docstring with a marker for where to put that type info.
Oh wait, one more thing to try: `autodoc_type_aliases` to leave aliases unevaluated, that would be useful perhaps.
Also, if `stride=None` is supported, the type should be `Optional[something]`...
Still, we can use aliases for common things like `_TensorOrTensors` and feed those into `autodoc_type_aliases` as a dictionary, but for one-off unions that'd not be practical.
> Also, if `stride=None` is supported, the type should be `Optional[something]`...
Yep, lots of bugs left in the `nn.modules` annotations - first PRs to enable mypy on files in `nn/modules/` are for review now, that should catch a lot of these issues.
Unfortunately Sphinx 3.x doesn't seem to work yet; 2.4.4 does. So we can't use any of the newer features for autodoc type annotation handling, just the on/off toggle. @mattip I couldn't easily find where Sphinx versions are stored. I assume you tried with Sphinx 3.x at some point, do you know the status of that?
I don't remember trying with sphinx3. Based on NumPy's experience, it would be a non-trivial migration. The version used is in [docs/requirements.txt](https://github.com/pytorch/pytorch/blob/master/docs/requirements.txt) and [docs/cpp/requirements.txt](https://github.com/pytorch/pytorch/blob/master/docs/cpp/requirements.txt). The `cpp` one uses `sphinx=3.1.2`
Type annotations no longer show up in the html docs after gh-49294. I'll leave this issue open because it contains some good ideas to make use of type annotations in the docs without making a mess; those can be picked up after migrating the doc build to Sphinx 3.x
Just checking if there is an update? Closed #78311 for this.
Sphinx has made a little progress here I believe. The `autodoc_typehints_description_target` and `autodoc_type_aliases` are relevant, and available in Sphinx >= 3.3 (so [docs/cpp/requirements.txt](https://github.com/pytorch/pytorch/blob/master/docs/cpp/requirements.txt)) would need a version bump.
See https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_typehints_description_target
It'd still be a large job to make this usable in the PyTorch HTML docs though. What would need doing is to use [Type aliases](https://mypy.readthedocs.io/en/latest/kinds_of_types.html#type-aliases) consistently, to get readable types rather than things like `Union[T, [Tuple[T, ...]], <etc>`.
For the documentation, between having no type annotation at all and having sometimes-complicated type annotation, I prefer the latter.
The human-entered documentation does not always contain necessary type information so the current design means the reader sometimes is forced to read the source code to get type information, which contains the same complicated type information, anyway. If the documentation contains the original type hints, regardless of whether a reader thinks the type is too complicated, he has an option to read it or ignore it; he still has a choice. | 2022-10-12T23:43:34 |
|
pytorch/pytorch | 86,921 | pytorch__pytorch-86921 | [
"86168"
] | 311b47b72c73509a82ed0b613a7c563e6628332b | diff --git a/torch/onnx/_internal/jit_utils.py b/torch/onnx/_internal/jit_utils.py
--- a/torch/onnx/_internal/jit_utils.py
+++ b/torch/onnx/_internal/jit_utils.py
@@ -5,7 +5,8 @@
import dataclasses
import re
-from typing import Any, Dict, Iterable, Sequence, Tuple, Union
+import typing
+from typing import Any, Dict, Iterable, Optional, Sequence, Tuple, Union
import torch
from torch import _C
@@ -306,3 +307,17 @@ def _add_attribute(node: _C.Node, key: str, value: Any, aten: bool):
else:
kind = "i"
return getattr(node, f"{kind}_")(name, value)
+
+
+# TODO: Expose this to user when migrating symbolic helper functions to here.
+@_beartype.beartype
+def _is_tensor(x: _C.Value) -> bool:
+ return x.type().isSubtypeOf(_C.TensorType.get())
+
+
+@_beartype.beartype
+def get_device_from_value(value: _C.Value) -> Optional[torch.device]:
+ if not _is_tensor(value):
+ return None
+ tensor_type = typing.cast(_C.TensorType, value.type())
+ return tensor_type.device()
diff --git a/torch/onnx/symbolic_opset9.py b/torch/onnx/symbolic_opset9.py
--- a/torch/onnx/symbolic_opset9.py
+++ b/torch/onnx/symbolic_opset9.py
@@ -205,6 +205,7 @@
"prim_shape",
"prim_tolist",
"prim_tuple_construct",
+ "prim_type",
"prim_unchecked_cast",
"prim_uninitialized",
"rand_like",
@@ -2067,6 +2068,21 @@ def eq(g: jit_utils.GraphContext, self, other):
# ONNX doesn't have devices, so consider them all to be equal.
# The no-op check for equality will get constant-folded.
return g.op("Constant", value_t=torch.tensor(True, dtype=torch.bool))
+ self_node = self.node()
+ other_node = other.node()
+ if self_node.kind() == other_node.kind() == "onnx::Constant":
+ if self_node.kindOf("value") == other_node.kindOf("value") == "s":
+ # Exporting strings to ONNX is not supported.
+ # If both strings are constant, we can compare them directly.
+ # The no-op check for equality will get constant-folded.
+ return g.op(
+ "Constant",
+ value_t=torch.tensor(
+ self_node.s("value") == other_node.s("value"),
+ dtype=torch.bool,
+ ),
+ )
+
return g.op("Equal", self, other)
@@ -6621,6 +6637,21 @@ def prim_constant(g: jit_utils.GraphContext, *inputs, **attrs):
)
+@_onnx_symbolic("prim::type")
+@_beartype.beartype
+def prim_type(g: jit_utils.GraphContext, device_value: _C.Value, *args, **kwargs):
+ if device_value.node().kind() == "prim::device":
+ device = jit_utils.get_device_from_value(device_value.node().input())
+ if device is not None:
+ return g.op("Constant", value_s=str(device))
+
+ return symbolic_helper._unimplemented(
+ "prim::type",
+ "Device type cannot be statically determined.",
+ device_value,
+ )
+
+
@_onnx_symbolic("onnx::Placeholder")
@_beartype.beartype
def onnx_placeholder(g: jit_utils.GraphContext, *inputs, **attrs):
| diff --git a/test/onnx/test_pytorch_onnx_onnxruntime.py b/test/onnx/test_pytorch_onnx_onnxruntime.py
--- a/test/onnx/test_pytorch_onnx_onnxruntime.py
+++ b/test/onnx/test_pytorch_onnx_onnxruntime.py
@@ -9974,11 +9974,7 @@ def forward(self, boxes, scores):
self.run_test(Module(), (boxes, scores))
- @unittest.skip(
- "Broken in recent TorchVision, see https://github.com/pytorch/pytorch/issues/81121"
- )
@skipIfUnsupportedMinOpsetVersion(11)
- # TODO: Fails with vision 0.13. See #77671
def test_batched_nms(self):
num_boxes = 100
boxes = torch.rand(num_boxes, 4)
| [ONNX] Support device().type() string comparison with constant
Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom):
* #86216
* #86169
* __->__ #86168
Fixes `batched_nms` export for torch vision.
| 2022-10-13T17:54:11 |
|
pytorch/pytorch | 87,122 | pytorch__pytorch-87122 | [
"73619"
] | c40a04415c59edc45b2ceba682377b255f2091c7 | diff --git a/torch/onnx/symbolic_helper.py b/torch/onnx/symbolic_helper.py
--- a/torch/onnx/symbolic_helper.py
+++ b/torch/onnx/symbolic_helper.py
@@ -31,6 +31,7 @@
"check_training_mode",
"dequantize_helper",
"is_caffe2_aten_fallback",
+ "is_complex_value",
"parse_args",
"pytorch_name_to_type",
"quantize_helper",
@@ -503,6 +504,16 @@ def _is_tuple_construct(x: _C.Value) -> bool:
return x.node().kind() == "prim::TupleConstruct"
+@_beartype.beartype
+def is_complex_value(x: _C.Value) -> bool:
+ assert _is_value(x)
+ return _type_utils.JitScalarType.from_name(x.type().scalarType()) in {
+ _type_utils.JitScalarType.COMPLEX32,
+ _type_utils.JitScalarType.COMPLEX64,
+ _type_utils.JitScalarType.COMPLEX128,
+ }
+
+
@_beartype.beartype
def is_caffe2_aten_fallback() -> bool:
return (
diff --git a/torch/onnx/symbolic_opset9.py b/torch/onnx/symbolic_opset9.py
--- a/torch/onnx/symbolic_opset9.py
+++ b/torch/onnx/symbolic_opset9.py
@@ -268,6 +268,8 @@
"unsafe_split_with_sizes",
"unsafe_split",
"unsqueeze",
+ "unsupported_complex_operators",
+ "noop_complex_operators",
"unused",
"var_mean",
"var",
@@ -6660,3 +6662,35 @@ def onnx_placeholder(g: jit_utils.GraphContext, *inputs, **attrs):
env = g.env
return torch._C._jit_onnx_convert_pattern_from_subblock(block, node, env)
+
+
+@_onnx_symbolic("aten::resolve_conj")
+@_onnx_symbolic("aten::resolve_neg")
+@_beartype.beartype
+def noop_complex_operators(g: jit_utils.GraphContext, input: _C.Value):
+ # ONNX does not have operators to *directly* manipulate real/imaginary components
+ # However, a few torch APIs (e.g. .tolist()) use complex operations when input is real,
+ # which results in failures due to missing operators for complex numbers
+
+ # `aten::resolve_conj` and `aten::resolve_neg` can safely be implemented as no-op
+ return input
+
+
+@_onnx_symbolic("aten::_conj")
+@_onnx_symbolic("aten::conj_physical")
+@_beartype.beartype
+def unsupported_complex_operators(g: jit_utils.GraphContext, input: _C.Value):
+ # ONNX does not have operators to *directly* manipulate real/imaginary components
+ # However, a few torch APIs (e.g. .tolist()) use complex operations when input is real,
+ # which results in failures due to missing operators for complex numbers
+
+ # While `aten::_conj` and `aten::conj_phisical` raise exception when input is complex
+ if symbolic_helper.is_complex_value(input):
+ # FIXME(justinchuby): report correct name for symbolic being executed
+ return symbolic_helper._onnx_unsupported(
+ "aten::_conj, aten::conj_physical",
+ input,
+ )
+
+ # they can safely be implemented as no-op for real numbers only
+ return noop_complex_operators(g, input)
| diff --git a/test/onnx/test_pytorch_onnx_onnxruntime.py b/test/onnx/test_pytorch_onnx_onnxruntime.py
--- a/test/onnx/test_pytorch_onnx_onnxruntime.py
+++ b/test/onnx/test_pytorch_onnx_onnxruntime.py
@@ -12476,6 +12476,33 @@ def forward(self, x):
self.run_test(LerpModel(), torch.rand(5, 4, 3))
+ @common_utils.parametrize("input_dtype", [torch.cfloat, torch.float])
+ @skipIfUnsupportedMinOpsetVersion(9)
+ def test_print_tensor_within_torch_nn_module(self, input_dtype: torch.dtype):
+ class PrintTensorOnMyModel(torch.nn.Module):
+ def forward(self, x):
+ # 'print' has side effect calling 'resolve_conj' and 'resolve_neg'.
+ x_firsts = x[:, 0]
+ print(f"x_firsts: {x_firsts}")
+ # 'tolist' has side effect calling 'resolve_conj' and 'resolve_neg'.
+ # Annotation added to pass torch script.
+ _: List[float] = x.tolist()
+ return x_firsts
+
+ m = PrintTensorOnMyModel()
+ x = torch.randn(10, 5, dtype=input_dtype)
+ if input_dtype == torch.cfloat:
+ with self.assertRaises(RuntimeError):
+ self.run_test(
+ m,
+ x,
+ )
+ else:
+ self.run_test(
+ m,
+ x,
+ )
+
# Cannot export with older opsets because of "ConstantFill" op
# ConstantFill was a temp op removed at opset 8. This is no longer supported by onnxruntime
# There are still some issues prevent us from enabling script test for these scenarios:
| ONNX export fails with a `resolve_conj` op when a tensor slice is printed.
### 🐛 Describe the bug
Exporting a module as ONNX fails when the module involves a tensor slice being (maybe also any view) passed to `print()`. The export fails with a message stating that the `resolve_conj` op is not available.
It appears that printing a view invokes `resolve_conj`, which causes ONNX exports to fail even in cases where the branch is extraneous. As far as I understand, the ONNX optimiser can remove these kinds of extraneous branches, but in this case it fails before pruning.
Example:
```python
import torch
from torch import nn
class BadFirst(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x_firsts = x[:, 0]
print(f"x_firsts: {x_firsts}") # removing this line fixes the issue.
return x_firsts
m = BadFirst().eval()
x = torch.rand(10, 5)
res = m(x) # this works
torch.onnx.export(m, x, "m.onnx") # this fails due to a resolve_conj op
```
Observed Output:
```
x_firsts: tensor([0.8206, 0.7061, 0.0366, 0.7012, 0.4380, 0.0920, 0.7127, 0.1474, 0.5061,
0.9991])
x_firsts: tensor([0.8206, 0.7061, 0.0366, 0.7012, 0.4380, 0.0920, 0.7127, 0.1474, 0.5061,
0.9991])
Traceback (most recent call last):
File "~\scratch\pytorch\example.py", line 18, in <module>
torch.onnx.export(m, x, "m.onnx") # this fails due to a resolve_conj op
File "~\.virtualenvs\pytorch-jk_rFARN\lib\site-packages\torch\onnx\__init__.py", line 316, in export
return utils.export(model, args, f, export_params, verbose, training,
File "~\.virtualenvs\pytorch-jk_rFARN\lib\site-packages\torch\onnx\utils.py", line 107, in export
_export(model, args, f, export_params, verbose, training, input_names, output_names,
File "~\.virtualenvs\pytorch-jk_rFARN\lib\site-packages\torch\onnx\utils.py", line 724, in _export
_model_to_graph(model, args, verbose, input_names,
File "~\.virtualenvs\pytorch-jk_rFARN\lib\site-packages\torch\onnx\utils.py", line 497, in _model_to_graph
graph = _optimize_graph(graph, operator_export_type,
File "~\.virtualenvs\pytorch-jk_rFARN\lib\site-packages\torch\onnx\utils.py", line 216, in _optimize_graph
graph = torch._C._jit_pass_onnx(graph, operator_export_type)
File "~\.virtualenvs\pytorch-jk_rFARN\lib\site-packages\torch\onnx\__init__.py", line 373, in _run_symbolic_function
return utils._run_symbolic_function(*args, **kwargs)
File "~\.virtualenvs\pytorch-jk_rFARN\lib\site-packages\torch\onnx\utils.py", line 1028, in _run_symbolic_function
symbolic_fn = _find_symbolic_in_registry(domain, op_name, opset_version, operator_export_type)
File "~\.virtualenvs\pytorch-jk_rFARN\lib\site-packages\torch\onnx\utils.py", line 982, in _find_symbolic_in_registry
return sym_registry.get_registered_op(op_name, domain, opset_version)
File "~\.virtualenvs\pytorch-jk_rFARN\lib\site-packages\torch\onnx\symbolic_registry.py", line 125, in get_registered_op
raise RuntimeError(msg)
RuntimeError: Exporting the operator resolve_conj to ONNX opset version 9 is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub.
```
I think I understand why `resolve_conj` is invoked here, but causing an ONNX export failure feels like an unintentional side-effect, that could potentially be avoided by pruning the graph before invoking the ONNX optimiser.
If `torch.jit.script` is run first, then exporting the ONNX model succeeds, but `torch.jit.trace` fails in the same way.
### Versions
Test 1 (python 3.9.6/torch 1.10.2):
```
PyTorch version: 1.10.2+cpu
Is debug build: False
CUDA used to build PyTorch: Could not collect
ROCM used to build PyTorch: N/A
OS: Microsoft Windows 10 Enterprise
GCC version: Could not collect
Clang version: Could not collect
CMake version: version 3.21.0-rc3
Libc version: N/A
Python version: 3.9.6 (tags/v3.9.6:db3ff76, Jun 28 2021, 15:26:21) [MSC v.1929 64 bit (AMD64)] (64-bit runtime)
Python platform: Windows-10-10.0.19042-SP0
Is CUDA available: False
CUDA runtime version: 10.0.130
GPU models and configuration: GPU 0: Quadro P400
Nvidia driver version: 461.55
cuDNN version: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\bin\cudnn64_7.dll
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==1.22.2
[pip3] torch==1.10.2
[conda] Could not collect
```
Test 2 (python 3.9.10/torch 1.9.1):
```
PyTorch version: 1.9.1+cpu
Is debug build: False
CUDA used to build PyTorch: Could not collect
ROCM used to build PyTorch: N/A
OS: Microsoft Windows 10 Enterprise
GCC version: Could not collect
Clang version: Could not collect
CMake version: Could not collect
Libc version: N/A
Python version: 3.9.10 (tags/v3.9.10:f2f3f53, Jan 17 2022, 15:14:21) [MSC v.1929 64 bit (AMD64)] (64-bit runtime)
Python platform: Windows-10-10.0.19042-SP0
Is CUDA available: False
CUDA runtime version: Could not collect
GPU models and configuration: GPU 0: Quadro P2000
Nvidia driver version: 462.30
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==1.22.2
[pip3] torch==1.9.1
[conda] Could not collect
```
| This is being tracked internally by Microsoft at https://msdata.visualstudio.com/Vienna/_workitems/edit/1685669
I am not able to open the Microsoft link, but I am encountering the same error when exporting the `MarianMTModel`, specifically `Helsinki-NLP/opus-mt-fr-en`, from the HF transformers package to onnx.
I seem to be unable to find the line(s) causing it, though. Is there a work-around, besides `torch.jit.script`, which is not possible for different reasons?
Additionally, @mvhv, can you explain why this function is triggered with the print statement?
@Kroshtan `torch.conj()` creates a view of a tensor with the conjugate bit set to avoid excessive copying. If a later op can't operate on a view then `torch.resolve_conj()` is called which materialises the conjugate on-demand.
I don't know the exact workings (and don't really care enough to check further), but my guess is that whatever string formatter is used for displaying tensors makes a copy to avoid hogging memory from the runtime while blocking on IO, and that copy triggers resolve_conj because you may as well materialise it now in-case you need it later.
The ops should just be pruned off the tree by ONNX, but it can't know that until it's loaded the graph, and it can't load the graph while it has unknown ops in it.
@BowenBao reproducible in nightly
```
Torch IR graph at exception: graph(%0 : Float(10, 5, strides=[5, 1], requires_grad=0, device=cpu)):
%174 : Long(device=cpu) = prim::Constant[value={0}](), scope: __main__.BadFirst::
%175 : Long(device=cpu) = prim::Constant[value={9223372036854775807}](), scope: __main__.BadFirst::
%176 : Long(device=cpu) = prim::Constant[value={1}](), scope: __main__.BadFirst::
%93 : Float(10, 5, strides=[5, 1], requires_grad=0, device=cpu) = aten::slice(%0, %174, %174, %175, %176), scope: __main__.BadFirst:: # <ipython-input-3-cc5d0ee6a95d>:9:0
%inp : Float(10, strides=[5], requires_grad=0, device=cpu) = aten::select(%93, %176, %174), scope: __main__.BadFirst:: # <ipython-input-3-cc5d0ee6a95d>:9:0
%172 : Float(10, strides=[5], requires_grad=0, device=cpu) = aten::resolve_conj(%inp), scope: __main__.BadFirst:: # pytorch/torch/_tensor_str.py:232:0
%173 : Float(10, strides=[5], requires_grad=0, device=cpu) = aten::resolve_neg(%172), scope: __main__.BadFirst:: # pytorch/torch/_tensor_str.py:232:0
return (%173)
``` | 2022-10-17T19:52:10 |
pytorch/pytorch | 87,457 | pytorch__pytorch-87457 | [
"86325"
] | 0c0df0be7497112022804df03aeeb0fcbadc9243 | diff --git a/torch/onnx/utils.py b/torch/onnx/utils.py
--- a/torch/onnx/utils.py
+++ b/torch/onnx/utils.py
@@ -90,7 +90,7 @@ def select_model_mode_for_export(model, mode: _C_onnx.TrainingMode):
)
originally_training: bool = False
- if not isinstance(model, torch.jit.ScriptFunction):
+ if hasattr(model, "training"):
originally_training = model.training
# ONNX opset 12 has better support for training amenable models, with updated
@@ -119,10 +119,7 @@ def select_model_mode_for_export(model, mode: _C_onnx.TrainingMode):
try:
yield
finally:
- if not (
- isinstance(model, torch.jit.ScriptFunction)
- or mode == _C_onnx.TrainingMode.PRESERVE
- ):
+ if hasattr(model, "training") and not mode == _C_onnx.TrainingMode.PRESERVE:
model.train(originally_training)
| diff --git a/test/onnx/test_utility_funs.py b/test/onnx/test_utility_funs.py
--- a/test/onnx/test_utility_funs.py
+++ b/test/onnx/test_utility_funs.py
@@ -3,9 +3,11 @@
import copy
import functools
import io
+import warnings
from typing import Callable
import onnx
+import parameterized
import torch
import torch.onnx
@@ -17,7 +19,7 @@
skipIfUnsupportedMaxOpsetVersion,
skipIfUnsupportedMinOpsetVersion,
)
-from torch.onnx import OperatorExportTypes, TrainingMode, utils
+from torch.onnx import _constants, OperatorExportTypes, TrainingMode, utils
from torch.onnx._globals import GLOBALS
from torch.onnx.symbolic_helper import _unpack_list, parse_args
from torch.testing._internal import common_utils
@@ -128,8 +130,15 @@ def test_it_returns_empty_list_when_all_ops_convertible(
self.assertEqual(unconvertible_ops, [])
-class TestUtilityFuns_opset9(_BaseTestCase):
- opset_version = 9
[email protected]_class(
+ [
+ {"opset_version": opset}
+ for opset in range(_constants.ONNX_BASE_OPSET, _constants.ONNX_MAX_OPSET + 1)
+ ],
+ class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_opset_{params_dict['opset_version']}",
+)
+class TestUtilityFuns(_BaseTestCase):
+ opset_version = None
def test_is_in_onnx_export(self):
test_self = self
@@ -148,8 +157,6 @@ def forward(self, x):
self.assertFalse(torch.onnx.is_in_onnx_export())
def test_validate_dynamic_axes_invalid_input_output_name(self):
- import warnings
-
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
utils._validate_dynamic_axes(
@@ -792,6 +799,31 @@ def forward(self, x):
# verify that the model state is preserved
self.assertEqual(model.training, old_state)
+ def test_export_does_not_fail_on_frozen_scripted_module(self):
+ class Inner(torch.nn.Module):
+ def forward(self, x):
+ if x > 0:
+ return x
+ else:
+ return x * x
+
+ class Outer(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.inner = torch.jit.script(Inner())
+
+ def forward(self, x):
+ return self.inner(x)
+
+ x = torch.zeros(1)
+ # Freezing is only implemented in eval mode. So we need to call eval()
+ outer_module = Outer().eval()
+ module = torch.jit.trace_module(outer_module, {"forward": (x)})
+ # jit.freeze removes the training attribute in the module
+ module = torch.jit.freeze(module)
+
+ torch.onnx.export(module, (x,), io.BytesIO(), opset_version=self.opset_version)
+
@skipIfUnsupportedMinOpsetVersion(15)
def test_local_function(self):
class N(torch.nn.Module):
@@ -1059,20 +1091,20 @@ def forward(self, x, y, z):
model = M(3)
expected_scope_names = {
- "test_utility_funs.TestUtilityFuns_opset9.test_node_scope.<locals>.M::/"
+ "test_utility_funs.TestUtilityFuns.test_node_scope.<locals>.M::/"
"torch.nn.modules.activation.GELU::gelu1",
- "test_utility_funs.TestUtilityFuns_opset9.test_node_scope.<locals>.M::/"
+ "test_utility_funs.TestUtilityFuns.test_node_scope.<locals>.M::/"
"torch.nn.modules.activation.GELU::gelu2",
- "test_utility_funs.TestUtilityFuns_opset9.test_node_scope.<locals>.M::/"
+ "test_utility_funs.TestUtilityFuns.test_node_scope.<locals>.M::/"
"torch.nn.modules.normalization.LayerNorm::lns.0",
- "test_utility_funs.TestUtilityFuns_opset9.test_node_scope.<locals>.M::/"
+ "test_utility_funs.TestUtilityFuns.test_node_scope.<locals>.M::/"
"torch.nn.modules.normalization.LayerNorm::lns.1",
- "test_utility_funs.TestUtilityFuns_opset9.test_node_scope.<locals>.M::/"
+ "test_utility_funs.TestUtilityFuns.test_node_scope.<locals>.M::/"
"torch.nn.modules.normalization.LayerNorm::lns.2",
- "test_utility_funs.TestUtilityFuns_opset9.test_node_scope.<locals>.M::/"
- "test_utility_funs.TestUtilityFuns_opset9.test_node_scope.<locals>.N::relu/"
+ "test_utility_funs.TestUtilityFuns.test_node_scope.<locals>.M::/"
+ "test_utility_funs.TestUtilityFuns.test_node_scope.<locals>.N::relu/"
"torch.nn.modules.activation.ReLU::relu",
- "test_utility_funs.TestUtilityFuns_opset9.test_node_scope.<locals>.M::",
+ "test_utility_funs.TestUtilityFuns.test_node_scope.<locals>.M::",
}
graph, _, _ = self._model_to_graph(
@@ -1130,7 +1162,7 @@ def forward(self, x):
# so we expect 3 constants with different scopes. The 3 constants are for the 3 layers.
# If CSE in exporter is improved later, this test needs to be updated.
# It should expect 1 constant, with same scope as root.
- scope_prefix = "test_utility_funs.TestUtilityFuns_opset9.test_scope_of_constants_when_combined_by_cse_pass.<locals>"
+ scope_prefix = "test_utility_funs.TestUtilityFuns.test_scope_of_constants_when_combined_by_cse_pass.<locals>"
expected_root_scope_name = f"{scope_prefix}.N::"
expected_layer_scope_name = f"{scope_prefix}.M::layers"
expected_constant_scope_name = [
@@ -1180,7 +1212,7 @@ def forward(self, x):
graph, _, _ = self._model_to_graph(
N(), (torch.randn(2, 3)), input_names=[], dynamic_axes={}
)
- scope_prefix = "test_utility_funs.TestUtilityFuns_opset9.test_scope_of_nodes_when_combined_by_cse_pass.<locals>"
+ scope_prefix = "test_utility_funs.TestUtilityFuns.test_scope_of_nodes_when_combined_by_cse_pass.<locals>"
expected_root_scope_name = f"{scope_prefix}.N::"
expected_layer_scope_name = f"{scope_prefix}.M::layers"
expected_add_scope_names = [
@@ -1884,29 +1916,5 @@ def forward(self, x):
torch.onnx.unregister_custom_op_symbolic("::cat", _onnx_opset_version)
-class TestUtilityFuns_opset10(TestUtilityFuns_opset9):
- opset_version = 10
-
-
-class TestUtilityFuns_opset11(TestUtilityFuns_opset9):
- opset_version = 11
-
-
-class TestUtilityFuns_opset12(TestUtilityFuns_opset9):
- opset_version = 12
-
-
-class TestUtilityFuns_opset13(TestUtilityFuns_opset9):
- opset_version = 13
-
-
-class TestUtilityFuns_opset14(TestUtilityFuns_opset9):
- opset_version = 14
-
-
-class TestUtilityFuns_opset15(TestUtilityFuns_opset9):
- opset_version = 15
-
-
if __name__ == "__main__":
common_utils.run_tests()
| [ONNX] onnx.export fails on frozen top-level ScriptedModule
### 🐛 Describe the bug
Here, onnx.export fails if called on a ScriptedModule that has been frozen:
```python
import torch
class Inner(torch.nn.Module):
def forward(self, x):
if x > 0 :
return x
else:
return x*x
class Outer(torch.nn.Module):
def __init__(self):
super().__init__()
i = Inner()
self.inner = torch.jit.script(i)
def forward(self, x):
return self.inner(x)
x = torch.zeros(1)
o=Outer()
o.eval()
m = torch.jit.trace_module(o, { 'forward' : (x)})
# borisf: passes if you comment this line out
m = torch.jit.optimize_for_inference(torch.jit.freeze(m))
torch.onnx.export(m, (x,), 'test.onnx')
```
Result:
python3 not-part.py
Traceback (most recent call last):
File "not-part.py", line 26, in <module>
torch.onnx.export(m, (x,), 'test.onnx')
File "/opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py", line 479, in export
_export(
File "/opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py", line 1390, in _export
with exporter_context(model, training, verbose):
File "/opt/conda/lib/python3.8/contextlib.py", line 113, in __enter__
return next(self.gen)
File "/opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py", line 175, in exporter_context
with select_model_mode_for_export(
File "/opt/conda/lib/python3.8/contextlib.py", line 113, in __enter__
return next(self.gen)
File "/opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py", line 93, in select_model_mode_for_export
originally_training = model.training
File "/opt/conda/lib/python3.8/site-packages/torch/jit/_script.py", line 785, in __getattr__
return super(RecursiveScriptModule, self).__getattr__(attr)
File "/opt/conda/lib/python3.8/site-packages/torch/jit/_script.py", line 502, in __getattr__
return super(ScriptModule, self).__getattr__(attr)
File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1261, in __getattr__
raise AttributeError("'{}' object has no attribute '{}'".format(
AttributeError: 'RecursiveScriptModule' object has no attribute 'training'
### Versions
Collecting environment information...
PyTorch version: 1.13.0a0+d321be6
Is debug build: False
CUDA used to build PyTorch: 11.7
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.4 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0
Clang version: Could not collect
CMake version: version 3.23.3
Libc version: glibc-2.31
Python version: 3.8.13 | packaged by conda-forge | (default, Mar 25 2022, 06:04:10) [GCC 10.3.0] (64-bit runtime)
Python platform: Linux-5.15.0-46-generic-x86_64-with-glibc2.10
Is CUDA available: True
CUDA runtime version: 11.7.99
GPU models and configuration: GPU 0: NVIDIA RTX A5000
Nvidia driver version: 515.65.01
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.8.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.5.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.5.0
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] k2==1.19.dev20220911+cuda11.7.torch1.13.0a0
[pip3] numpy==1.22.4
[pip3] pytorch-lightning==1.7.5
[pip3] pytorch-quantization==2.1.2
[pip3] torch==1.13.0a0+d321be6
[pip3] torch-tensorrt==1.2.0a0
[pip3] torchaudio==0.13.0
[pip3] torchmetrics==0.9.3
[pip3] torchvision==0.14.0a0
[conda] k2 1.19.dev20220911+cuda11.7.torch1.13.0a0 pypi_0 pypi
[conda] mkl 2020.4 h726a3e6_304 conda-forge
[conda] mkl-include 2020.4 h726a3e6_304 conda-forge
[conda] numpy 1.22.4 py38h99721a1_0 conda-forge
[conda] pytorch-lightning 1.7.5 pypi_0 pypi
[conda] pytorch-quantization 2.1.2 pypi_0 pypi
[conda] torch 1.13.0a0+d321be6 pypi_0 pypi
[conda] torch-tensorrt 1.2.0a0 pypi_0 pypi
[conda] torchaudio 0.13.0 pypi_0 pypi
[conda] torchmetrics 0.9.3 pypi_0 pypi
[conda] torchvision 0.14.0a0 pypi_0 pypi
| Possible fix in select_model_mode_for_export:
```diff
root@30f44d0dcd3f:/git/radtts# diff -ur /opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py.orig /opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py
--- /opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py.orig 2022-10-05 21:20:37.425721244 +0000
+++ /opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py 2022-10-05 21:37:07.644794060 +0000
@@ -89,7 +89,7 @@
)
originally_training: bool = False
- if not isinstance(model, torch.jit.ScriptFunction):
+ if hasattr(model, 'training'):
originally_training = model.training
# ONNX opset 12 has better support for training amenable models, with updated
@@ -118,10 +118,7 @@
try:
yield
finally:
- if not (
- isinstance(model, torch.jit.ScriptFunction)
- or mode == _C_onnx.TrainingMode.PRESERVE
- ):
+ if hasattr(model, 'training') and not mode == _C_onnx.TrainingMode.PRESERVE:
model.train(originally_training)
```
Thanks for reporting this issue!
Let me show a workaround for guys who have the same issue.
You may get an error `'RecursiveScriptModule' object has no attribute 'training'` when you try to convert your TorchScript into ONNX like the following:
```python
import torch
# loading your TorchScript
model = torch.jit.load("model.pt")
# converting the model to ONNX
dummy_input = ...
torch.onnx.export(model, dummy_input, "model.onnx")
```
You can avoid the error by adding the field explicitly:
```python
import torch
# loading your TorchScript
model = torch.jit.load("model.pt")
# ADD THIS LINE
model.training = False
# converting the model to ONNX
dummy_input = ...
torch.onnx.export(model, dummy_input, "model.onnx")
```
> Possible fix in select_model_mode_for_export:
>
Thanks for the suggestion! | 2022-10-21T14:01:17 |
pytorch/pytorch | 89,924 | pytorch__pytorch-89924 | [
"88049"
] | a81f9b3b21f5e45677f66ee7882aa5b085c71228 | diff --git a/.github/scripts/generate_binary_build_matrix.py b/.github/scripts/generate_binary_build_matrix.py
--- a/.github/scripts/generate_binary_build_matrix.py
+++ b/.github/scripts/generate_binary_build_matrix.py
@@ -219,9 +219,9 @@ def generate_wheels_matrix(os: str,
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
"package_type": package_type,
"pytorch_extra_install_requirements":
- "nvidia-cuda-runtime-cu11;"
- "nvidia-cudnn-cu11==8.5.0.96;"
- "nvidia-cublas-cu11==11.10.3.66",
+ "nvidia-cuda-runtime-cu11; platform_system == 'Linux' | "
+ "nvidia-cudnn-cu11==8.5.0.96; platform_system == 'Linux' | "
+ "nvidia-cublas-cu11==11.10.3.66; platform_system == 'Linux'",
"build_name":
f"{package_type}-py{python_version}-{gpu_arch_type}{gpu_arch_version}-with-pypi-cudnn"
.replace(
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -848,7 +848,7 @@ def configure_extension_build():
pytorch_extra_install_requirements = os.getenv("PYTORCH_EXTRA_INSTALL_REQUIREMENTS", "")
if pytorch_extra_install_requirements:
report(f"pytorch_extra_install_requirements: {pytorch_extra_install_requirements}")
- extra_install_requires += pytorch_extra_install_requirements.split(";")
+ extra_install_requires += pytorch_extra_install_requirements.split("|")
# Cross-compile for M1
| PyTorch 1.13 cannot be installed using `poetry` on Mac M1
### 🐛 Describe the bug
This is an installation issue.
Poetry parses the endpoint at `https://pypi.org/pypi/torch/1.13/json` to get dependency metadata. The endpoint returns:
```
"requires_dist": [
"typing-extensions",
"nvidia-cuda-runtime-cu11 (==11.7.99)",
"nvidia-cudnn-cu11 (==8.5.0.96)",
"nvidia-cublas-cu11 (==11.10.3.66)",
"nvidia-cuda-nvrtc-cu11 (==11.7.99)",
"opt-einsum (>=3.3) ; extra == 'opt-einsum'"
],
```
However, the packages `nvidia-cuda-runtime-cu11` , `nvidia-cudnn-cu11`, `nvidia-cuda-nvrtc-cu11`, and `nvidia-cuda-nvrtc-cu11` do not exist for M1, and `poetry update` fails with
```
Unable to find installation candidates for nvidia-cublas-cu11 (11.10.3.66)
Unable to find installation candidates for nvidia-cuda-nvrtc-cu11 (11.7.99)
Unable to find installation candidates for nvidia-cuda-runtime-cu11 (11.7.99)
Unable to find installation candidates for nvidia-cudnn-cu11 (8.5.0.96)
```
This is specific to v1.13. This problem did not exist in v1.12.1
### Versions
This probably isn't relevant until after it's installed, but I get:
```
Collecting environment information...
PyTorch version: 1.13.0
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 12.6 (arm64)
GCC version: Could not collect
Clang version: 14.0.0 (clang-1400.0.29.102)
CMake version: Could not collect
Libc version: N/A
Python version: 3.10.8 (main, Oct 13 2022, 09:48:40) [Clang 14.0.0 (clang-1400.0.29.102)] (64-bit runtime)
Python platform: macOS-12.6-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] mypy==0.982
[pip3] mypy-extensions==0.4.3
[pip3] numpy==1.23.4
[pip3] torch==1.13.0
[conda] Could not collect
```
cc @ezyang @seemethere @malfet
| cc @malfet @seemethere
This issue maybe related to pypi repo ? I just downloaded the M1 version of wheel from pypi:
```
https://files.pythonhosted.org/packages/79/b3/eaea3fc35d0466b9dae1e3f9db08467939347b3aaa53c0fd81953032db33/torch-1.13.0-cp310-none-macosx_11_0_arm64.whl
```
I see following Metadata requirements:
```
Requires-Python: >=3.7.0
Description-Content-Type: text/markdown
License-File: LICENSE
License-File: NOTICE
Requires-Dist: typing-extensions
Provides-Extra: opt-einsum
Requires-Dist: opt-einsum (>=3.3) ; extra == 'opt-einsum'
```
No mentions of the
```
"nvidia-cuda-runtime-cu11 (==11.7.99)",
"nvidia-cudnn-cu11 (==8.5.0.96)",
"nvidia-cublas-cu11 (==11.10.3.66)",
"nvidia-cuda-nvrtc-cu11 (==11.7.99)",
```
These are Linux specific requirements that should not be used in Mac M1
Dependencies to linux package should have `; platform_system=="Linux"` suffix to make it more friendly to poetry
FWIW, this also affects Macs with Intel chips because (at a minimum), nvidia-cuda-nvrtc-cu11 does not supply any Mac wheels on PyPI. Hence, Poetry goes out and finds nothing and throws a RuntimeError about installing torch 1.13.0.
Specifying the `; platform_system=="Linux"` should address this for Intel Macs, though.
The issue here is that Poetry does not support split metadata across wheels; instead, markers should be used with unified metadata. This is not just a Poetry decision; PyPI's metadata API currently gathers metadata from the first distfile to be uploaded. Until such time that [PEP 658](https://peps.python.org/pep-0658/) is implemented in PyPI, having inconsistent metadata between distfiles is likely to confuse non-Pip tools.
`torch>1.12` also can't be installed on Windows with Intel Chip. `pip` and `conda` installatoin works fine however. Also, `torch<1.13` installation via `poetry` works. This might not be a `torch` issue and seems to be related how `poetry` chooses the wheel files but I just wanted to mention that it might be a workaround for someone looking for this to limit `torch<1.13` if `poetry` is mandatory.
I was really looking to get the improved M1 support in 1.13. For now, my work-around is to hard-code a path to the wheel in `pyproject.toml`:
```
[tool.poetry.dependencies]
torch = {url = "https://files.pythonhosted.org/packages/79/b3/eaea3fc35d0466b9dae1e3f9db08467939347b3aaa53c0fd81953032db33/torch-1.13.0-cp310-none-macosx_11_0_arm64.whl"}
```
It seems that the PyPi API doesn't support different requirements on different platforms.
I submitted https://github.com/pytorch/pytorch/pull/88826 to address this for future releases. 🤞
It doesn't solve the [larger issue](https://github.com/pytorch/pytorch/issues/88049#issuecomment-1302555269) @neersighted mentioned of split metadata across wheels, but should at least have the platform marker. I guess the opposite problem of Poetry inspecting metadata for one of the non-Linux wheels and thus _omitting_ these Linux deps on Linux systems might be possible (I'm not sure if the order the wheels are picked is deterministic?).
It's based on the order we get them back from PyPI (and PyPI uses upload order iirc; this is modeled on the fact that the PyPI JSON API uses the first artifact to be uploaded). | 2022-11-30T17:18:54 |
|
pytorch/pytorch | 90,044 | pytorch__pytorch-90044 | [
"87313"
] | ae2fe4033cf3b17259b17f351020b988fa893f91 | diff --git a/torch/onnx/utils.py b/torch/onnx/utils.py
--- a/torch/onnx/utils.py
+++ b/torch/onnx/utils.py
@@ -1739,17 +1739,22 @@ def _add_output_to_block(block: _C.Block, value: _C.Value) -> int:
@_beartype.beartype
def _should_aten_fallback(
- name: str,
- opset_version: int,
- operator_export_type: _C_onnx.OperatorExportTypes,
+ name: str, opset_version: int, operator_export_type: _C_onnx.OperatorExportTypes
):
+ # For BUILD_CAFFE2=0 builds, if domain=="aten" and operator_export_type==ONNX_ATEN,
+ # an aten::ATen operator is created regardless of symbolics existence
+ # For BUILD_CAFFE2=1, the same applies only if there is no symbolic available
+
is_exportable_aten_op = registration.registry.is_registered_op(name, opset_version)
is_onnx_aten_export = operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN
is_aten_fallback_export = (
operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
)
- return is_onnx_aten_export or (
- not is_exportable_aten_op and is_aten_fallback_export
+ is_caffe2_build = _C_onnx._CAFFE2_ATEN_FALLBACK
+
+ return name.startswith("aten::") and (
+ ((is_onnx_aten_export or is_aten_fallback_export) and not is_caffe2_build)
+ or (not is_exportable_aten_op and is_aten_fallback_export)
)
@@ -1844,6 +1849,21 @@ def _run_symbolic_function(
env=env,
)
+ # Direct ATen export requested
+ if _should_aten_fallback(ns_op_name, opset_version, operator_export_type):
+ attrs = {
+ k + "_" + node.kindOf(k)[0]: symbolic_helper._node_get(node, k)
+ for k in node.attributeNames()
+ }
+ outputs = node.outputsSize()
+ attrs["outputs"] = outputs
+ return graph_context.at(
+ op_name,
+ *inputs,
+ overload_name=_get_aten_op_overload_name(node),
+ **attrs,
+ )
+
try:
# Caffe2-specific: Quantized op symbolics are registered for opset 9 only.
if symbolic_helper.is_caffe2_aten_fallback() and opset_version == 9:
@@ -1861,6 +1881,7 @@ def _run_symbolic_function(
if symbolic_function_group is not None:
symbolic_fn = symbolic_function_group.get(opset_version)
if symbolic_fn is not None:
+ # TODO Wrap almost identical attrs assignment or comment the difference.
attrs = {
k: symbolic_helper._node_get(node, k) for k in node.attributeNames()
}
@@ -1874,18 +1895,6 @@ def _run_symbolic_function(
# Clone node to trigger ONNX shape inference
return graph_context.op(op_name, *inputs, **attrs, outputs=node.outputsSize()) # type: ignore[attr-defined]
- if _should_aten_fallback(ns_op_name, opset_version, operator_export_type):
- # Direct ATen export requested
- outputs = node.outputsSize()
- attrs["outputs"] = outputs
- # `overload_name` is set for non-Caffe2 builds only
- return graph_context.at(
- op_name,
- *inputs,
- overload_name=_get_aten_op_overload_name(node),
- **attrs,
- )
-
raise errors.UnsupportedOperatorError(
domain,
op_name,
| diff --git a/test/onnx/test_pytorch_onnx_no_runtime.py b/test/onnx/test_pytorch_onnx_no_runtime.py
--- a/test/onnx/test_pytorch_onnx_no_runtime.py
+++ b/test/onnx/test_pytorch_onnx_no_runtime.py
@@ -32,6 +32,7 @@ def export_to_onnx(
mocks: Optional[Iterable] = None,
operator_export_type: torch.onnx.OperatorExportTypes = torch.onnx.OperatorExportTypes.ONNX,
opset_version: int = GLOBALS.export_onnx_opset_version,
+ **torch_onnx_export_kwargs,
) -> onnx.ModelProto:
"""Exports `model(input)` to ONNX and returns it.
@@ -44,6 +45,7 @@ def export_to_onnx(
mocks: list of mocks to use during export
operator_export_type: export type as described by `torch.onnx.export(...operator_export_type,...)`
opset_version: ONNX opset version as described by `torch.onnx.export(...opset_version,...)`
+ torch_onnx_export_kwargs: extra torch.onnx.export kwargs arguments
Returns:
A valid ONNX model (`onnx.ModelProto`)
"""
@@ -60,6 +62,7 @@ def export_to_onnx(
f,
operator_export_type=operator_export_type,
opset_version=opset_version,
+ **torch_onnx_export_kwargs,
)
# Validate ONNX graph before returning it
@@ -777,6 +780,91 @@ def forward(self, x):
model, inputs, f, dynamic_axes={"x": [0, 1]}, input_names=["x"]
)
+ @common_utils.skipIfNoCaffe2
+ def test_caffe2_aten_fallback(self):
+ class ModelWithAtenNotONNXOp(torch.nn.Module):
+ def forward(self, x, y):
+ abcd = x + y
+ defg = torch.linalg.qr(abcd)
+ return defg
+
+ x = torch.rand(3, 4)
+ y = torch.rand(3, 4)
+ f = io.BytesIO()
+ torch.onnx.export(
+ ModelWithAtenNotONNXOp(),
+ (x, y),
+ f,
+ do_constant_folding=False,
+ operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
+ # support for linalg.qr was added in later op set versions.
+ opset_version=9,
+ )
+ onnx_model = onnx.load(io.BytesIO(f.getvalue()))
+ self.assertAtenOp(onnx_model, "linalg_qr")
+
+ @common_utils.skipIfNoCaffe2
+ def test_caffe2_onnx_aten(self):
+ class ModelWithAtenFmod(torch.nn.Module):
+ def forward(self, x, y):
+ return torch.fmod(x, y)
+
+ x = torch.randn(3, 4, dtype=torch.float32)
+ y = torch.randn(3, 4, dtype=torch.float32)
+ f = io.BytesIO()
+ torch.onnx.export(
+ ModelWithAtenFmod(),
+ (x, y),
+ f,
+ do_constant_folding=False,
+ operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN,
+ opset_version=10, # or higher
+ )
+ onnx_model = onnx.load(io.BytesIO(f.getvalue()))
+ assert onnx_model.graph.node[0].op_type == "Mod"
+
+ @common_utils.skipIfCaffe2
+ def test_aten_fallback(self):
+ class ModelWithAtenNotONNXOp(torch.nn.Module):
+ def forward(self, x, y):
+ abcd = x + y
+ defg = torch.linalg.qr(abcd)
+ return defg
+
+ x = torch.rand(3, 4)
+ y = torch.rand(3, 4)
+ f = io.BytesIO()
+ torch.onnx.export(
+ ModelWithAtenNotONNXOp(),
+ (x, y),
+ f,
+ do_constant_folding=False,
+ operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
+ # support for linalg.qr was added in later op set versions.
+ opset_version=9,
+ )
+ onnx_model = onnx.load(io.BytesIO(f.getvalue()))
+ self.assertAtenOp(onnx_model, "linalg_qr")
+
+ @common_utils.skipIfCaffe2
+ def test_onnx_aten(self):
+ class ModelWithAtenFmod(torch.nn.Module):
+ def forward(self, x, y):
+ return torch.fmod(x, y)
+
+ x = torch.randn(3, 4, dtype=torch.float32)
+ y = torch.randn(3, 4, dtype=torch.float32)
+ f = io.BytesIO()
+ torch.onnx.export(
+ ModelWithAtenFmod(),
+ (x, y),
+ f,
+ do_constant_folding=False,
+ operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN,
+ )
+ onnx_model = onnx.load(io.BytesIO(f.getvalue()))
+ self.assertAtenOp(onnx_model, "fmod", "Tensor")
+
if __name__ == "__main__":
common_utils.run_tests()
| Improper model conversion from PyTorch to ONNX with torch.onnx.OperatorExportTypes.ONNX_ATEN flag
### 🐛 Describe the bug
I have spotted improper model conversion to ONNX when using `torch.onnx.OperatorExportTypes.ONNX_ATEN` flag with `PyTorch >=1.12.0`
My understanding from the [documentation](https://pytorch.org/docs/stable/onnx.html#aten-operators)
> OperatorExportTypes.ONNX_ATEN: All ATen ops (in the TorchScript namespace “aten”) are exported as ATen ops (in opset domain “org.pytorch.aten”). [ATen](https://pytorch.org/cppdocs/#aten) is PyTorch’s built-in tensor library, so this instructs the runtime to use PyTorch’s implementation of these ops.
is that when one uses `torch.onnx.OperatorExportTypes.ONNX_ATEN` flag, all ATen operators has to be exported to ONNX graph. When I run the code snippet below:
```python
import torch
class ModelWithAtenFmod(torch.nn.Module):
def forward(self, x, y):
return torch.fmod(x, y)
x = torch.randn(3, 4, dtype=torch.float32)
y = torch.randn(3, 4, dtype=torch.float32)
OUT = torch.onnx.export_to_pretty_string(
ModelWithAtenFmod(), (x, y),
add_node_names=False,
do_constant_folding=False,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN)
print(OUT)
```
with `PyTorch 1.11.0` I obtain ONNX graph with ATen operator that is correct:
```
### PyTorch 1.11.0
ModelProto {
producer_name: "pytorch"
domain: ""
doc_string: ""
graph:
GraphProto {
name: "torch-jit-export"
inputs: [{name: "aten::ATen_0", type:Tensor dtype: 1, Tensor dims: 3 4},{name: "aten::ATen_1", type:Tensor dtype: 1, Tensor dims: 3 4}]
outputs: [{name: "2", type:Tensor dtype: 1, Tensor dims: ? ?}]
value_infos: []
initializers: []
nodes: [
Node {type: "ATen", inputs: [aten::ATen_0,aten::ATen_1], outputs: [2], attributes: [{ name: 'operator', type: string, value: 'fmod'}]}
]
}
opset_import: [OperatorSetIdProto { domain: , version: 9}OperatorSetIdProto { domain: org.pytorch.aten, version: 1}],
}
```
However, when I run the same code with `PyTorch 1.12.1`, ATen operator got substituted with ONNX native operator and that is not expected behaviour:
```
### PyTorch 1.12.1
ModelProto {
producer_name: "pytorch"
domain: ""
doc_string: ""
graph:
GraphProto {
name: "torch_jit"
inputs: [{name: "onnx::Mod_0", type:Tensor dtype: 1, Tensor dims: 3 4},{name: "onnx::Mod_1", type:Tensor dtype: 1, Tensor dims: 3 4}]
outputs: [{name: "2", type:Tensor dtype: 1, Tensor dims: 3 4}]
value_infos: []
initializers: []
nodes: [
Node {type: "Mod", inputs: [onnx::Mod_0,onnx::Mod_1], outputs: [2], attributes: [{ name: 'fmod', type: int, value: 1}]}
]
}
opset_import: [OperatorSetIdProto { domain: , version: 13}],
}
```
### Versions
PyTorch version: 1.12.1
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 12.6 (arm64)
GCC version: Could not collect
Clang version: 14.0.6
CMake version: version 3.24.1
Libc version: N/A
Python version: 3.8.13 | packaged by conda-forge | (default, Mar 25 2022, 06:04:14) [Clang 12.0.1 ] (64-bit runtime)
Python platform: macOS-12.6-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==1.23.4
[pip3] torch==1.12.1
[pip3] torchvision==0.13.1
[conda] numpy 1.23.4 py38h09ac2d9_0 conda-forge
[conda] pytorch 1.12.1 py3.8_0 pytorch
[conda] torchvision 0.13.1 py38_cpu pytorch
| I got the example of the code snippet from one existing unit test:
https://github.com/pytorch/pytorch/blob/664058fa83f1d8eede5d66418abff6e20bd76ca8/test/jit/test_export_modes.py#L107-L121
Operator substitution is happening inside the `_optimize_graph` function in the code below:
https://github.com/pytorch/pytorch/blob/664058fa83f1d8eede5d66418abff6e20bd76ca8/torch/onnx/utils.py#L726-L743
The `graph` before `_optimize_graph` function call looks like:
```
graph(%0 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu),
%1 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):
%6 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu) = aten::fmod(%0, %1)
return (%6)
```
And after the call:
```
graph(%0 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu),
%1 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):
%2 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu) = onnx::Mod[fmod=1](%0, %1)
return (%2)
```
despite `operator_export_type` being equal to `OperatorExportTypes.ONNX_ATEN`
This is happening inside `graph = _C._jit_pass_onnx(graph, operator_export_type)`
ca374773b4 - [ONNX] update default opset_version to 13 (#73898) introduced this regression. Looking into it further
Fix: https://github.com/pytorch/pytorch/pull/87735 | 2022-12-02T05:43:36 |
pytorch/pytorch | 90,104 | pytorch__pytorch-90104 | [
"87313"
] | a81f9b3b21f5e45677f66ee7882aa5b085c71228 | diff --git a/torch/onnx/utils.py b/torch/onnx/utils.py
--- a/torch/onnx/utils.py
+++ b/torch/onnx/utils.py
@@ -1752,10 +1752,21 @@ def _should_aten_fallback(
)
is_caffe2_build = _C_onnx._CAFFE2_ATEN_FALLBACK
- return name.startswith("aten::") and (
- ((is_onnx_aten_export or is_aten_fallback_export) and not is_caffe2_build)
- or (not is_exportable_aten_op and is_aten_fallback_export)
- )
+ if not name.startswith("aten::"):
+ return False
+
+ if is_caffe2_build:
+ if (
+ is_onnx_aten_export or is_aten_fallback_export
+ ) and not is_exportable_aten_op:
+ return True
+ else:
+ if is_onnx_aten_export or (
+ is_aten_fallback_export and not is_exportable_aten_op
+ ):
+ return True
+
+ return False
@_beartype.beartype
| diff --git a/test/onnx/test_pytorch_onnx_no_runtime.py b/test/onnx/test_pytorch_onnx_no_runtime.py
--- a/test/onnx/test_pytorch_onnx_no_runtime.py
+++ b/test/onnx/test_pytorch_onnx_no_runtime.py
@@ -9,16 +9,18 @@
import unittest.mock
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
+import numpy as np
+
import onnx
import onnx.numpy_helper
import torch
import torch.nn.functional as F
from torch import Tensor
-from torch.onnx import symbolic_helper, utils
+from torch.onnx import OperatorExportTypes, symbolic_helper, utils
from torch.onnx._globals import GLOBALS
from torch.onnx._internal import registration
-from torch.testing._internal import common_utils
+from torch.testing._internal import common_quantization, common_utils
def export_to_onnx(
@@ -781,50 +783,60 @@ def forward(self, x):
)
@common_utils.skipIfNoCaffe2
- def test_caffe2_aten_fallback(self):
+ def test_caffe2_aten_fallback_must_fallback(self):
class ModelWithAtenNotONNXOp(torch.nn.Module):
def forward(self, x, y):
abcd = x + y
defg = torch.linalg.qr(abcd)
return defg
- x = torch.rand(3, 4)
- y = torch.rand(3, 4)
- f = io.BytesIO()
- torch.onnx.export(
- ModelWithAtenNotONNXOp(),
- (x, y),
- f,
- do_constant_folding=False,
- operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
- # support for linalg.qr was added in later op set versions.
- opset_version=9,
- )
- onnx_model = onnx.load(io.BytesIO(f.getvalue()))
- self.assertAtenOp(onnx_model, "linalg_qr")
+ # TODO: Refactor common_utils._decide_skip_caffe2 to support parametrize
+ for operator_export_type in (
+ OperatorExportTypes.ONNX_ATEN,
+ OperatorExportTypes.ONNX_ATEN_FALLBACK,
+ ):
+ x = torch.rand(3, 4)
+ y = torch.rand(3, 4)
+ f = io.BytesIO()
+ torch.onnx.export(
+ ModelWithAtenNotONNXOp(),
+ (x, y),
+ f,
+ do_constant_folding=False,
+ operator_export_type=operator_export_type,
+ # support for linalg.qr was added in later op set versions.
+ opset_version=9,
+ )
+ onnx_model = onnx.load(io.BytesIO(f.getvalue()))
+ self.assertAtenOp(onnx_model, "linalg_qr")
@common_utils.skipIfNoCaffe2
- def test_caffe2_onnx_aten(self):
+ def test_caffe2_onnx_aten_must_not_fallback(self):
class ModelWithAtenFmod(torch.nn.Module):
def forward(self, x, y):
return torch.fmod(x, y)
- x = torch.randn(3, 4, dtype=torch.float32)
- y = torch.randn(3, 4, dtype=torch.float32)
- f = io.BytesIO()
- torch.onnx.export(
- ModelWithAtenFmod(),
- (x, y),
- f,
- do_constant_folding=False,
- operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN,
- opset_version=10, # or higher
- )
- onnx_model = onnx.load(io.BytesIO(f.getvalue()))
- assert onnx_model.graph.node[0].op_type == "Mod"
+ # TODO: Refactor common_utils._decide_skip_caffe2 to support parametrize
+ for operator_export_type in (
+ OperatorExportTypes.ONNX_ATEN_FALLBACK,
+ OperatorExportTypes.ONNX_ATEN,
+ ):
+ x = torch.randn(3, 4, dtype=torch.float32)
+ y = torch.randn(3, 4, dtype=torch.float32)
+ f = io.BytesIO()
+ torch.onnx.export(
+ ModelWithAtenFmod(),
+ (x, y),
+ f,
+ do_constant_folding=False,
+ operator_export_type=operator_export_type,
+ opset_version=10, # or higher
+ )
+ onnx_model = onnx.load(io.BytesIO(f.getvalue()))
+ assert onnx_model.graph.node[0].op_type == "Mod"
@common_utils.skipIfCaffe2
- def test_aten_fallback(self):
+ def test_aten_fallback_must_fallback(self):
class ModelWithAtenNotONNXOp(torch.nn.Module):
def forward(self, x, y):
abcd = x + y
@@ -865,6 +877,106 @@ def forward(self, x, y):
onnx_model = onnx.load(io.BytesIO(f.getvalue()))
self.assertAtenOp(onnx_model, "fmod", "Tensor")
+ @common_utils.skipIfCaffe2
+ def test_onnx_aten_fallback_must_not_fallback(self):
+ # For BUILD_CAFFE2=0, aten fallback only when not exportable
+ class ONNXExportable(torch.nn.Module):
+ def __init__(self):
+ super(ONNXExportable, self).__init__()
+ self.quant = torch.quantization.QuantStub()
+ self.fc1 = torch.nn.Linear(12, 8)
+ self.fc2 = torch.nn.Linear(8, 4)
+ self.fc3 = torch.nn.Linear(4, 6)
+ self.dequant = torch.quantization.DeQuantStub()
+
+ def forward(self, x):
+ x = self.quant(x)
+ x = x.view((-1, 12))
+ h = F.relu(self.fc1(x))
+ h = F.relu(self.fc2(h))
+ h = F.relu(self.fc3(h))
+ h = self.dequant(h)
+ return h
+
+ dummy_input = torch.randn(12)
+ f = io.BytesIO()
+ torch.onnx.export(
+ ONNXExportable(),
+ (dummy_input,),
+ f,
+ do_constant_folding=False,
+ operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
+ )
+ onnx_model = onnx.load(io.BytesIO(f.getvalue()))
+ all_aten_nodes = [
+ p
+ for p in onnx_model.graph.node
+ if p.op_type == "ATen" and p.domain == "org.pytorch.aten"
+ ]
+ self.assertEqual(len(all_aten_nodes), 0)
+
+
+class TestQuantizeEagerONNXExport(common_utils.TestCase):
+ def _test_lower_graph_impl(self, model, data):
+ model.qconfig = torch.ao.quantization.default_qconfig
+ model = torch.ao.quantization.prepare(model)
+ model = torch.ao.quantization.convert(model)
+
+ _ = model(data)
+ input_names = ["x"]
+
+ def _export_to_onnx(model, input, input_names):
+ traced = torch.jit.trace(model, input)
+ buf = io.BytesIO()
+ torch.jit.save(traced, buf)
+ buf.seek(0)
+
+ model = torch.jit.load(buf)
+ f = io.BytesIO()
+ torch.onnx.export(
+ model,
+ input,
+ f,
+ input_names=input_names,
+ operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
+ opset_version=9,
+ )
+
+ _export_to_onnx(model, data, input_names)
+
+ @common_quantization.skipIfNoFBGEMM
+ @common_utils.skipIfNoCaffe2
+ def test_lower_graph_linear(self):
+ model = torch.ao.quantization.QuantWrapper(
+ torch.nn.Linear(5, 10, bias=True)
+ ).to(dtype=torch.float)
+ data_numpy = np.random.rand(1, 2, 5).astype(np.float32)
+ data = torch.from_numpy(data_numpy).to(dtype=torch.float)
+ self._test_lower_graph_impl(model, data)
+
+ @common_quantization.skipIfNoFBGEMM
+ @common_utils.skipIfNoCaffe2
+ def test_lower_graph_conv2d(self):
+ model = torch.ao.quantization.QuantWrapper(
+ torch.nn.Conv2d(3, 5, 2, bias=True)
+ ).to(dtype=torch.float)
+ data_numpy = np.random.rand(1, 3, 6, 6).astype(np.float32)
+ data = torch.from_numpy(data_numpy).to(dtype=torch.float)
+ self._test_lower_graph_impl(model, data)
+
+ @common_quantization.skipIfNoFBGEMM
+ @unittest.skip(
+ "onnx opset9 does not support quantize_per_tensor and caffe2 \
+ does not support conv3d"
+ )
+ def test_lower_graph_conv3d(self):
+ model = torch.ao.quantization.QuantWrapper(
+ torch.nn.Conv3d(3, 5, 2, bias=True)
+ ).to(dtype=torch.float)
+ data_numpy = np.random.rand(1, 3, 6, 6, 6).astype(np.float32)
+ data = torch.from_numpy(data_numpy).to(dtype=torch.float)
+ self._test_lower_graph_impl(model, data)
+
if __name__ == "__main__":
common_utils.run_tests()
| Improper model conversion from PyTorch to ONNX with torch.onnx.OperatorExportTypes.ONNX_ATEN flag
### 🐛 Describe the bug
I have spotted improper model conversion to ONNX when using `torch.onnx.OperatorExportTypes.ONNX_ATEN` flag with `PyTorch >=1.12.0`
My understanding from the [documentation](https://pytorch.org/docs/stable/onnx.html#aten-operators)
> OperatorExportTypes.ONNX_ATEN: All ATen ops (in the TorchScript namespace “aten”) are exported as ATen ops (in opset domain “org.pytorch.aten”). [ATen](https://pytorch.org/cppdocs/#aten) is PyTorch’s built-in tensor library, so this instructs the runtime to use PyTorch’s implementation of these ops.
is that when one uses `torch.onnx.OperatorExportTypes.ONNX_ATEN` flag, all ATen operators has to be exported to ONNX graph. When I run the code snippet below:
```python
import torch
class ModelWithAtenFmod(torch.nn.Module):
def forward(self, x, y):
return torch.fmod(x, y)
x = torch.randn(3, 4, dtype=torch.float32)
y = torch.randn(3, 4, dtype=torch.float32)
OUT = torch.onnx.export_to_pretty_string(
ModelWithAtenFmod(), (x, y),
add_node_names=False,
do_constant_folding=False,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN)
print(OUT)
```
with `PyTorch 1.11.0` I obtain ONNX graph with ATen operator that is correct:
```
### PyTorch 1.11.0
ModelProto {
producer_name: "pytorch"
domain: ""
doc_string: ""
graph:
GraphProto {
name: "torch-jit-export"
inputs: [{name: "aten::ATen_0", type:Tensor dtype: 1, Tensor dims: 3 4},{name: "aten::ATen_1", type:Tensor dtype: 1, Tensor dims: 3 4}]
outputs: [{name: "2", type:Tensor dtype: 1, Tensor dims: ? ?}]
value_infos: []
initializers: []
nodes: [
Node {type: "ATen", inputs: [aten::ATen_0,aten::ATen_1], outputs: [2], attributes: [{ name: 'operator', type: string, value: 'fmod'}]}
]
}
opset_import: [OperatorSetIdProto { domain: , version: 9}OperatorSetIdProto { domain: org.pytorch.aten, version: 1}],
}
```
However, when I run the same code with `PyTorch 1.12.1`, ATen operator got substituted with ONNX native operator and that is not expected behaviour:
```
### PyTorch 1.12.1
ModelProto {
producer_name: "pytorch"
domain: ""
doc_string: ""
graph:
GraphProto {
name: "torch_jit"
inputs: [{name: "onnx::Mod_0", type:Tensor dtype: 1, Tensor dims: 3 4},{name: "onnx::Mod_1", type:Tensor dtype: 1, Tensor dims: 3 4}]
outputs: [{name: "2", type:Tensor dtype: 1, Tensor dims: 3 4}]
value_infos: []
initializers: []
nodes: [
Node {type: "Mod", inputs: [onnx::Mod_0,onnx::Mod_1], outputs: [2], attributes: [{ name: 'fmod', type: int, value: 1}]}
]
}
opset_import: [OperatorSetIdProto { domain: , version: 13}],
}
```
### Versions
PyTorch version: 1.12.1
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 12.6 (arm64)
GCC version: Could not collect
Clang version: 14.0.6
CMake version: version 3.24.1
Libc version: N/A
Python version: 3.8.13 | packaged by conda-forge | (default, Mar 25 2022, 06:04:14) [Clang 12.0.1 ] (64-bit runtime)
Python platform: macOS-12.6-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==1.23.4
[pip3] torch==1.12.1
[pip3] torchvision==0.13.1
[conda] numpy 1.23.4 py38h09ac2d9_0 conda-forge
[conda] pytorch 1.12.1 py3.8_0 pytorch
[conda] torchvision 0.13.1 py38_cpu pytorch
| I got the example of the code snippet from one existing unit test:
https://github.com/pytorch/pytorch/blob/664058fa83f1d8eede5d66418abff6e20bd76ca8/test/jit/test_export_modes.py#L107-L121
Operator substitution is happening inside the `_optimize_graph` function in the code below:
https://github.com/pytorch/pytorch/blob/664058fa83f1d8eede5d66418abff6e20bd76ca8/torch/onnx/utils.py#L726-L743
The `graph` before `_optimize_graph` function call looks like:
```
graph(%0 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu),
%1 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):
%6 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu) = aten::fmod(%0, %1)
return (%6)
```
And after the call:
```
graph(%0 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu),
%1 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):
%2 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu) = onnx::Mod[fmod=1](%0, %1)
return (%2)
```
despite `operator_export_type` being equal to `OperatorExportTypes.ONNX_ATEN`
This is happening inside `graph = _C._jit_pass_onnx(graph, operator_export_type)`
ca374773b4 - [ONNX] update default opset_version to 13 (#73898) introduced this regression. Looking into it further
Fix: https://github.com/pytorch/pytorch/pull/87735 | 2022-12-03T03:00:22 |
pytorch/pytorch | 95,388 | pytorch__pytorch-95388 | [
"92615"
] | 4bd5c1e4f4018e3776c1ea7cac8c912f540f12ac | diff --git a/torchgen/api/python.py b/torchgen/api/python.py
--- a/torchgen/api/python.py
+++ b/torchgen/api/python.py
@@ -1109,6 +1109,7 @@ def dispatch_lambda_arg(cpp_arg: Binding) -> DispatchLambdaArgument:
"::std::tuple<at::Tensor,at::Tensor,at::Tensor>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
+ "::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,int64_t>",
"::std::tuple<at::Tensor,at::Tensor,double,int64_t>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t>",
| diff --git a/test/forward_backward_compatibility/check_forward_backward_compatibility.py b/test/forward_backward_compatibility/check_forward_backward_compatibility.py
--- a/test/forward_backward_compatibility/check_forward_backward_compatibility.py
+++ b/test/forward_backward_compatibility/check_forward_backward_compatibility.py
@@ -269,7 +269,10 @@
("aten::dsplit.int", datetime.date(2022, 9, 1)),
("aten::hsplit.array", datetime.date(2022, 9, 1)),
("aten::hsplit.int", datetime.date(2022, 9, 1)),
- ("aten::lstm_mps_backward.out", datetime.date(2022, 9, 1)),
+ ("aten::lstm_mps_backward.out", datetime.date(2023, 9, 1)),
+ ("aten::lstm_mps_backward", datetime.date(2023, 9, 1)),
+ ("aten::_lstm_mps.out", datetime.date(2023, 9, 1)),
+ ("aten::_lstm_mps", datetime.date(2023, 9, 1)),
("aten::miopen_rnn_backward.out", datetime.date(2022, 9, 1)),
("aten::quantize_per_tensor.tensors_out", datetime.date(2022, 9, 1)),
("aten::split", datetime.date(2022, 9, 1)),
diff --git a/test/test_mps.py b/test/test_mps.py
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -8926,64 +8926,91 @@ def test_cpu_indices(self, device="mps"):
class TestRNNMPS(TestCaseMPS):
def test_lstm_1(self, device="mps", dtype=torch.float32):
+ for layers in [1] if product_version < 13.0 else [1, 2, 5]:
+ torch.random.manual_seed(42)
+ rnn = nn.LSTM(7, 4, layers, device="cpu")
+ input = torch.randn(2, 3, 7, device="cpu")
+ hx = torch.randn(layers, 3, 4, device="cpu")
+ cx = torch.randn(layers, 3, 4, device="cpu")
+
+ cpu_output, (cpu_hn, cpu_cn) = rnn(input, (hx, cx))
+
+ rnn = rnn.to(device)
+ input = input.to(device)
+ hx = hx.to(device)
+ cx = cx.to(device)
+ output, (hn, cn) = rnn(input, (hx, cx))
+
+ self.assertEqual(cpu_output, output)
+ self.assertEqual(cpu_hn, hn)
+ self.assertEqual(cpu_cn, cn)
+
+ # test batch_first
+ rnn = nn.LSTM(7, 4, layers, device="cpu", batch_first=True)
+ input = torch.randn(3, 2, 7, device="cpu")
+ hx = torch.randn(layers, 3, 4, device="cpu")
+ cx = torch.randn(layers, 3, 4, device="cpu")
+ cpu_output, (cpu_hn, cpu_cn) = rnn(input, (hx, cx))
+
+ rnn = rnn.to(device)
+ input = input.to(device)
+ hx = hx.to(device)
+ cx = cx.to(device)
+ output, (hn, cn) = rnn(input, (hx, cx))
+
+ self.assertEqual(cpu_output, output)
+ self.assertEqual(cpu_hn, hn)
+ self.assertEqual(cpu_cn, cn)
+
+ def test_lstm_backward(self, device="mps", dtype=torch.float32):
+ for layers in [1] if product_version < 13.0 else [1, 2, 5]:
+ lstm = nn.LSTM(2, 4, layers) # initialized globally for consistent parameters init
+ lstm.train()
+
+ def get_results(device, inp, hx, cx):
+ rnn = lstm.to(device)
+ inp, hx, cx = inp.to(device), hx.to(device), cx.to(device)
+
+ output, _ = rnn(inp, (hx, cx))
+ f = output.sum()
+
+ param_names, params = zip(*rnn.named_parameters())
+ param_grads = zip(param_names, torch.autograd.grad(f, params, retain_graph=True))
+
+ input_grad, hx_grad, cx_grad = torch.autograd.grad(f, [inp, hx, cx])
+ return output, param_grads, input_grad, hx_grad, cx_grad
+
+ inp = torch.randn((5, 3, 2), requires_grad=True, dtype=dtype, device=device)
+ hx = torch.randn((layers, 3, 4), requires_grad=True, dtype=dtype, device=device)
+ cx = torch.randn((layers, 3, 4), requires_grad=True, dtype=dtype, device=device)
+
+ cpu_output, cpu_weights_grad, cpu_input_grad, cpu_hx_grad, cpu_cx_grad = get_results("cpu", inp, hx, cx)
+ mps_output, mps_weights_grad, mps_input_grad, mps_hx_grad, mps_cx_grad = get_results(device, inp, hx, cx)
+
+ self.assertEqual(cpu_hx_grad, mps_hx_grad)
+ self.assertEqual(cpu_cx_grad, mps_cx_grad)
+ self.assertEqual(cpu_output, mps_output)
+ self.assertEqual(cpu_input_grad, mps_input_grad)
+ for (cpu_name, cpu_weight_grad), (mps_name, mps_weight_grad) in zip(cpu_weights_grad, mps_weights_grad):
+ self.assertEqual(cpu_weight_grad, mps_weight_grad, f"mismatch in cpu:{cpu_name} vs mps:{mps_name}")
+
+ # test batch_first backward
+ lstm = nn.LSTM(2, 4, layers, batch_first=True)
+ lstm.train()
+
+ hx = torch.randn((layers, 5, 4), requires_grad=True, dtype=dtype, device=device)
+ cx = torch.randn((layers, 5, 4), requires_grad=True, dtype=dtype, device=device)
+
+ cpu_output, cpu_weights_grad, cpu_input_grad, cpu_hx_grad, cpu_cx_grad = get_results("cpu", inp, hx, cx)
+ mps_output, mps_weights_grad, mps_input_grad, mps_hx_grad, mps_cx_grad = get_results(device, inp, hx, cx)
+
+ self.assertEqual(cpu_hx_grad, mps_hx_grad)
+ self.assertEqual(cpu_cx_grad, mps_cx_grad)
+ self.assertEqual(cpu_output, mps_output)
+ self.assertEqual(cpu_input_grad, mps_input_grad)
+ for (cpu_name, cpu_weight_grad), (mps_name, mps_weight_grad) in zip(cpu_weights_grad, mps_weights_grad):
+ self.assertEqual(cpu_weight_grad, mps_weight_grad, f"mismatch in cpu:{cpu_name} vs mps:{mps_name}")
- rnn = nn.LSTM(1, 4, 2, device="cpu")
- input = torch.randn(2, 3, 1, device="cpu")
- hx = torch.zeros(2, 3, 4, device="cpu")
- cx = torch.zeros(2, 3, 4, device="cpu")
-
- cpu_output, (cpu_hn, cpu_cn) = rnn(input, (hx, cx))
-
- rnn = rnn.to(device)
- input = input.to(device)
- hx = hx.to(device)
- cx = cx.to(device)
- output, (hn, cn) = rnn(input, (hx, cx))
-
- self.assertEqual(cpu_output, output)
- self.assertEqual(cpu_hn, hn)
- self.assertEqual(cpu_cn, cn)
-
- # test batch_first
- rnn = nn.LSTM(1, 4, 2, device="cpu", batch_first=True)
- input = torch.randn(3, 2, 1, device="cpu")
- hx = torch.zeros(2, 3, 4, device="cpu")
- cx = torch.zeros(2, 3, 4, device="cpu")
- cpu_output, (cpu_hn, cpu_cn) = rnn(input, (hx, cx))
-
- rnn = rnn.to(device)
- input = input.to(device)
- hx = hx.to(device)
- cx = cx.to(device)
- output, (hn, cn) = rnn(input, (hx, cx))
-
- self.assertEqual(cpu_output, output)
- self.assertEqual(cpu_hn, hn)
- self.assertEqual(cpu_cn, cn)
-
- @unittest.skipIf(True, "Backward of lstm returns wrong result")
- def test_lstm_2(self, device="mps", dtype=torch.float32):
- def get_results(device):
- rnn = nn.LSTM(1, 4, 1, device=device)
- inp = torch.randn(2, 3, 1, device=device, requires_grad=True)
- hx = torch.zeros(1, 3, 4, device=device)
- cx = torch.zeros(1, 3, 4, device=device)
-
- output, _ = rnn(inp, (hx, cx))
- output.sum().backward()
-
- weight_grad = rnn.weight_ih_l0.grad.clone()
- input_grad = inp.grad.clone()
-
- return output, weight_grad, input_grad
-
-
- cpu_output, cpu_weight_grad, cpu_input_grad = get_results("cpu")
- mps_output, mps_weight_grad, mps_input_grad = get_results("mps")
-
- self.assertEqual(cpu_output, mps_output)
- self.assertEqual(cpu_input_grad, mps_input_grad)
- self.assertEqual(cpu_weight_grad, mps_weight_grad)
def test_RNN_cell_no_broadcasting(self):
def test(cell_module, input, hx, input_size, hidden_size):
| MPS backend produces bad training results in comparison to other backends
### 🐛 Describe the bug
Using the [MPS backend](https://pytorch.org/docs/stable/notes/mps.html) to train a model produces much worse results than using other backends (e.g. CPU or CUDA). To be clear, I am not talking about the speed of the training, but rather about the metrics for the quality (loss, perplexity) of the model after it has been trained. For example, if you run the training of the [word_language_model](https://github.com/pytorch/examples/tree/40289773aa4916fad0d50967917b3ae8aa534fd6/word_language_model) in the [pytorch/examples](https://github.com/pytorch/examples/tree/40289773aa4916fad0d50967917b3ae8aa534fd6) repository with either the CPU or the CUDA backend, your values for `loss` and `ppl` will be similar to this:
```
$ python main.py --cuda --epochs 1
| epoch 1 | 200/ 2983 batches | lr 20.00 | ms/batch 18.51 | loss 7.63 | ppl 2063.29
| epoch 1 | 400/ 2983 batches | lr 20.00 | ms/batch 17.47 | loss 6.86 | ppl 950.96
| epoch 1 | 600/ 2983 batches | lr 20.00 | ms/batch 17.47 | loss 6.48 | ppl 653.41
| epoch 1 | 800/ 2983 batches | lr 20.00 | ms/batch 17.46 | loss 6.29 | ppl 539.53
| epoch 1 | 1000/ 2983 batches | lr 20.00 | ms/batch 17.50 | loss 6.14 | ppl 465.56
| epoch 1 | 1200/ 2983 batches | lr 20.00 | ms/batch 17.54 | loss 6.07 | ppl 430.74
| epoch 1 | 1400/ 2983 batches | lr 20.00 | ms/batch 17.52 | loss 5.95 | ppl 384.86
| epoch 1 | 1600/ 2983 batches | lr 20.00 | ms/batch 17.55 | loss 5.96 | ppl 387.05
| epoch 1 | 1800/ 2983 batches | lr 20.00 | ms/batch 17.54 | loss 5.82 | ppl 337.38
| epoch 1 | 2000/ 2983 batches | lr 20.00 | ms/batch 17.52 | loss 5.80 | ppl 329.33
| epoch 1 | 2200/ 2983 batches | lr 20.00 | ms/batch 17.54 | loss 5.67 | ppl 289.19
| epoch 1 | 2400/ 2983 batches | lr 20.00 | ms/batch 17.53 | loss 5.67 | ppl 290.80
| epoch 1 | 2600/ 2983 batches | lr 20.00 | ms/batch 17.53 | loss 5.66 | ppl 285.86
| epoch 1 | 2800/ 2983 batches | lr 20.00 | ms/batch 17.54 | loss 5.55 | ppl 256.72
-----------------------------------------------------------------------------------------
| end of epoch 1 | time: 54.77s | valid loss 5.53 | valid ppl 252.11
-----------------------------------------------------------------------------------------
=========================================================================================
| End of training | test loss 5.44 | test ppl 230.14
=========================================================================================
```
Running the same training with MPS enabled consistently results in significantly worse values for `loss` and `ppl` (tested with pytorch `v1.13.1` and `v2.0.0.dev20230119`):
```
$ python main.py --mps --epochs 1
| epoch 1 | 200/ 2983 batches | lr 20.00 | ms/batch 115.26 | loss 7.99 | ppl 2959.03
| epoch 1 | 400/ 2983 batches | lr 20.00 | ms/batch 114.31 | loss 7.52 | ppl 1849.67
| epoch 1 | 600/ 2983 batches | lr 20.00 | ms/batch 114.39 | loss 7.38 | ppl 1603.87
| epoch 1 | 800/ 2983 batches | lr 20.00 | ms/batch 113.73 | loss 7.30 | ppl 1475.25
| epoch 1 | 1000/ 2983 batches | lr 20.00 | ms/batch 113.39 | loss 7.26 | ppl 1421.42
| epoch 1 | 1200/ 2983 batches | lr 20.00 | ms/batch 113.48 | loss 7.25 | ppl 1406.03
| epoch 1 | 1400/ 2983 batches | lr 20.00 | ms/batch 113.49 | loss 7.18 | ppl 1317.59
| epoch 1 | 1600/ 2983 batches | lr 20.00 | ms/batch 113.44 | loss 7.19 | ppl 1330.15
| epoch 1 | 1800/ 2983 batches | lr 20.00 | ms/batch 114.70 | loss 7.16 | ppl 1280.67
| epoch 1 | 2000/ 2983 batches | lr 20.00 | ms/batch 113.67 | loss 7.16 | ppl 1285.51
| epoch 1 | 2200/ 2983 batches | lr 20.00 | ms/batch 113.82 | loss 7.16 | ppl 1288.31
| epoch 1 | 2400/ 2983 batches | lr 20.00 | ms/batch 113.25 | loss 7.13 | ppl 1247.44
| epoch 1 | 2600/ 2983 batches | lr 20.00 | ms/batch 113.25 | loss 7.18 | ppl 1313.82
| epoch 1 | 2800/ 2983 batches | lr 20.00 | ms/batch 113.55 | loss 7.17 | ppl 1301.76
-----------------------------------------------------------------------------------------
| end of epoch 1 | time: 362.77s | valid loss 7.01 | valid ppl 1112.40
-----------------------------------------------------------------------------------------
=========================================================================================
| End of training | test loss 6.93 | test ppl 1019.79
=========================================================================================
```
Increasing the number of epochs will only increase the difference between the results of the two training configurations.
### Versions
**Latest:**
```
PyTorch version: 1.13.1
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 13.1 (arm64)
GCC version: Could not collect
Clang version: 14.0.0 (clang-1400.0.29.202)
CMake version: version 3.23.2
Libc version: N/A
Python version: 3.10.9 (main, Jan 11 2023, 09:18:18) [Clang 14.0.6 ] (64-bit runtime)
Python platform: macOS-13.1-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==1.23.5
[pip3] torch==1.13.1
[conda] numpy 1.23.5 py310hb93e574_0
[conda] numpy-base 1.23.5 py310haf87e8b_0
[conda] pytorch 1.13.1 py3.10_0 pytorch
```
**Nightly:**
```
PyTorch version: 2.0.0.dev20230119
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 13.1 (arm64)
GCC version: Could not collect
Clang version: 14.0.0 (clang-1400.0.29.202)
CMake version: version 3.23.2
Libc version: N/A
Python version: 3.10.9 (main, Jan 11 2023, 09:18:18) [Clang 14.0.6 ] (64-bit runtime)
Python platform: macOS-13.1-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==1.23.5
[pip3] torch==2.0.0.dev20230119
[conda] numpy 1.23.5 py310hb93e574_0
[conda] numpy-base 1.23.5 py310haf87e8b_0
[conda] pytorch 2.0.0.dev20230119 py3.10_0 pytorch-nightly
```
cc @kulinseth @albanD @malfet @DenisVieriu97 @razarmehr @abhudev
| Hello, the bad results come from a numerical correctness issue of LSTM on MPS backend. We're working on fixing this. | 2023-02-23T18:18:52 |
pytorch/pytorch | 95,397 | pytorch__pytorch-95397 | [
"95266"
] | 96f627dcdeadb5028da918e602add1e041239e3a | diff --git a/torch/_dynamo/variables/torch.py b/torch/_dynamo/variables/torch.py
--- a/torch/_dynamo/variables/torch.py
+++ b/torch/_dynamo/variables/torch.py
@@ -481,9 +481,34 @@ def get_state_from_generator():
if self.value == torch._C._nn.scaled_dot_product_attention:
# See:[Note] SDPA_flash's meta function returns incorrect Philox seed and offset
# in pytorch/torch/_meta_registrations.py
- fake_query = args[0].as_proxy().node.meta["example_value"]
- fake_key = args[1].as_proxy().node.meta["example_value"]
- fake_value = args[2].as_proxy().node.meta["example_value"]
+ all_kwargs = kwargs.copy()
+ all_kwargs.update(
+ dict(
+ zip(
+ (
+ "query",
+ "key",
+ "value",
+ "attn_mask",
+ "dropout_p",
+ "is_causal",
+ ),
+ args,
+ )
+ )
+ )
+ fake_query = all_kwargs["query"].as_proxy().node.meta["example_value"]
+ fake_key = all_kwargs["key"].as_proxy().node.meta["example_value"]
+ fake_value = all_kwargs["value"].as_proxy().node.meta["example_value"]
+ fake_mask = all_kwargs.get("attn_mask")
+ if isinstance(fake_mask, TensorVariable):
+ fake_mask = fake_mask.as_proxy().node.meta["example_value"]
+ else:
+ fake_mask = None
+ dropout_p = kwargs.get("dropout_p")
+ dropout_p = dropout_p.value if dropout_p is not None else 0.0
+ is_causal = kwargs.get("is_causal")
+ is_causal = is_causal.value if is_causal is not None else False
# We look through the stack to find a cuda autocast context
# If we do we will convert the fake tensors to torch.float16
is_cuda_autocast_context = False
@@ -502,15 +527,10 @@ def get_state_from_generator():
fake_value = fake_value.clone().to(amp_dtype)
backend_choice = torch._fused_sdp_choice(
- fake_query, fake_key, fake_value
+ fake_query, fake_key, fake_value, fake_mask, dropout_p, is_causal
)
if backend_choice == torch.backends.cuda.SDPBackend.FLASH_ATTENTION:
- dropout_p = kwargs.get("dropout_p")
- # Lets see if they passed it in as not an arg
- if len(args) >= 5:
- dropout_p = args[4]
-
- if dropout_p is not None and dropout_p.value != 0.0:
+ if dropout_p is not None and dropout_p != 0.0:
unimplemented(
"FlashAttention with dropout is not supported in cuda graphs"
)
| diff --git a/test/dynamo/test_dynamic_shapes.py b/test/dynamo/test_dynamic_shapes.py
--- a/test/dynamo/test_dynamic_shapes.py
+++ b/test/dynamo/test_dynamic_shapes.py
@@ -60,6 +60,11 @@ def make_dynamic_cls(cls):
# Cannot call sizes() on tensor with symbolic sizes/strides
)
+unittest.expectedFailure(
+ DynamicShapesMiscTests.test_parsing_sdpa_dynamic_shapes
+ # Cannot call sizes() on tensor with symbolic sizes/strides
+)
+
# DynamicShapesSubGraphTests
unittest.expectedFailure(
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -3145,6 +3145,53 @@ def forward(self, query, key, value):
self.assertEqual(compiled.device.index, 0)
self.assertEqual(compiled.dtype, torch.float16)
+ @unittest.skipIf(
+ not PLATFORM_SUPPORTS_FUSED_SDPA or not SM80OrLater,
+ "Can't run fused SDPA on this platform",
+ )
+ def test_parsing_sdpa(self):
+ class MyModule(torch.nn.Module):
+ def forward(self, query, key, value):
+ out = F.scaled_dot_product_attention(query, key, value, None, 0, True)
+ out = F.scaled_dot_product_attention(
+ query=query,
+ key=key,
+ value=value,
+ attn_mask=None,
+ dropout_p=0,
+ is_causal=True,
+ )
+ out = F.scaled_dot_product_attention(
+ query,
+ key=key,
+ value=value,
+ attn_mask=None,
+ dropout_p=0,
+ is_causal=True,
+ )
+ out = F.scaled_dot_product_attention(
+ query, key, value, None, dropout_p=0, is_causal=True
+ )
+ return out
+
+ device = "cuda"
+ dtype = torch.float16
+ seq_len_q = 1
+ seq_len_k = 1
+ head_dim = 8
+ query = torch.ones(
+ 1, 8, seq_len_q, head_dim, device=device, dtype=dtype, requires_grad=True
+ )
+ key = torch.ones(
+ 1, 8, seq_len_k, head_dim, device=device, dtype=dtype, requires_grad=True
+ )
+ value = torch.ones(
+ 1, 8, seq_len_k, head_dim, device=device, dtype=dtype, requires_grad=True
+ )
+ module = MyModule()
+ opt_mod = torch._dynamo.optimize("inductor")(module)
+ opt_mod(query, key, value)
+
def test_autocast_cpu(self):
class MyModule(torch.nn.Module):
def forward(self, x):
| Strange “IndexError” when compiling
### 🐛 Describe the bug
I ran into a strange error where the compiler failed to identify/match the signature of the function.
```python
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, n_embd=768, bias=False):
super().__init__()
self.n_head = 6
self.embd = nn.Embedding(50257, 768)
self.c_attn = nn.Linear(in_features=n_embd, out_features=3 * n_embd, bias=bias)
self.dropout = 0
def forward(self, x):
x = self.embd(x)
(B, T, C) = x.size()
q, k, v = self.c_attn(x).chunk(chunks=3, dim=-1)
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
y = torch.nn.functional.scaled_dot_product_attention(
query=q, key=k, value=v, attn_mask=None, dropout_p=self.dropout, is_causal=True
)
return y
```
The forward pass works correctly:
```python
model = Model().cuda()
batch_size = 2
x = torch.randint(0, 50257, (batch_size, 1024)).cuda()
y = model(x)
```
However, if I compile the model:
```python
model = torch.compile(model)
_ = model(x)
```
The compiler throws an indexing error (see below for error message).
Strangely enough, the compiler works correctly when I remove the parameter names from the function call (i.e. “query/key/value”)
```python
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, n_embd=768, bias=False):
super().__init__()
self.n_head = 6
self.embd = nn.Embedding(50257, 768)
self.c_attn = nn.Linear(in_features=n_embd, out_features=3 * n_embd, bias=bias)
self.dropout = 0
def forward(self, x):
x = self.embd(x)
(B, T, C) = x.size()
q, k, v = self.c_attn(x).chunk(chunks=3, dim=-1)
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2)
y = torch.nn.functional.scaled_dot_product_attention(
q, k, v, attn_mask=None, dropout_p=self.dropout, is_causal=True
)
return y
```
```python
model = Model().cuda()
batch_size = 2
x = torch.randint(0, 50257, (batch_size, 1024)).cuda()
model = torch.compile(model)
_ = model(x)
y = model(x)
```
Posted this on the PyTorch Forum too:
* [Strange “IndexError” when compiling](https://discuss.pytorch.org/t/strange-indexerror-when-compiling/173191)
## Full Error Message
```python
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py:324, in _compile(code, globals, locals, builtins, compiler_fn, one_graph, export, hooks, frame)
323 try:
--> 324 out_code = transform_code_object(code, transform)
325 orig_code_map[out_code] = code
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py:445, in transform_code_object(code, transformations, safe)
443 propagate_line_nums(instructions)
--> 445 transformations(instructions, code_options)
446 return clean_and_assemble_instructions(instructions, keys, code_options)[1]
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py:311, in _compile.<locals>.transform(instructions, code_options)
299 tracer = InstructionTranslator(
300 instructions,
301 code,
(...)
309 mutated_closure_cell_contents,
310 )
--> 311 tracer.run()
312 output = tracer.output
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:1738, in InstructionTranslator.run(self)
1737 _step_logger()(logging.INFO, f"torchdynamo start tracing {self.f_code.co_name}")
-> 1738 super().run()
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:588, in InstructionTranslatorBase.run(self)
584 self.output.push_tx(self)
585 while (
586 self.instruction_pointer is not None
587 and not self.output.should_exit
--> 588 and self.step()
589 ):
590 pass
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:552, in InstructionTranslatorBase.step(self)
551 unimplemented(f"missing: {inst.opname}")
--> 552 getattr(self, inst.opname)(inst)
554 return inst.opname != "RETURN_VALUE"
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:342, in break_graph_if_unsupported.<locals>.decorator.<locals>.wrapper(self, inst)
341 try:
--> 342 return inner_fn(self, inst)
343 except Unsupported as excp:
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:1026, in InstructionTranslatorBase.CALL_FUNCTION_KW(self, inst)
1025 assert len(kwargs) == len(argnames)
-> 1026 self.call_function(fn, args, kwargs)
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:486, in InstructionTranslatorBase.call_function(self, fn, args, kwargs)
485 raise AssertionError(f"Attempt to trace forbidden callable {inner_fn}")
--> 486 self.push(fn.call_function(self, args, kwargs))
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/variables/torch.py:484, in TorchVariable.call_function(self, tx, args, kwargs)
481 if self.value == torch._C._nn.scaled_dot_product_attention:
482 # See:[Note] SDPA_flash's meta function returns incorrect Philox seed and offset
483 # in pytorch/torch/_meta_registrations.py
--> 484 fake_query = args[0].as_proxy().node.meta["example_value"]
485 fake_key = args[1].as_proxy().node.meta["example_value"]
IndexError: list index out of range
from user code:
File "/tmp/ipykernel_24294/2653966783.py", line 19, in forward
y = torch.nn.functional.scaled_dot_product_attention(
Set torch._dynamo.config.verbose=True for more information
You can suppress this exception and fall back to eager by setting:
torch._dynamo.config.suppress_errors = True
The above exception was the direct cause of the following exception:
InternalTorchDynamoError Traceback (most recent call last)
Cell In[5], line 2
1 model = torch.compile(model)
----> 2 _ = model(x)
File /opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py:1501, in Module._call_impl(self, *args, **kwargs)
1496 # If we don't have any hooks, we want to skip the rest of the logic in
1497 # this function, and just call forward.
1498 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1499 or _global_backward_pre_hooks or _global_backward_hooks
1500 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501 return forward_call(*args, **kwargs)
1502 # Do not call functions when jit is used
1503 full_backward_hooks, non_full_backward_hooks = [], []
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py:82, in OptimizedModule.forward(self, *args, **kwargs)
81 def forward(self, *args, **kwargs):
---> 82 return self.dynamo_ctx(self._orig_mod.forward)(*args, **kwargs)
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py:209, in _TorchDynamoContext.__call__.<locals>._fn(*args, **kwargs)
207 dynamic_ctx.__enter__()
208 try:
--> 209 return fn(*args, **kwargs)
210 finally:
211 set_eval_frame(prior)
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py:337, in catch_errors_wrapper.<locals>.catch_errors(frame, cache_size)
334 return hijacked_callback(frame, cache_size, hooks)
336 with compile_lock:
--> 337 return callback(frame, cache_size, hooks)
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py:404, in convert_frame.<locals>._convert_frame(frame, cache_size, hooks)
402 counters["frames"]["total"] += 1
403 try:
--> 404 result = inner_convert(frame, cache_size, hooks)
405 counters["frames"]["ok"] += 1
406 return result
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py:104, in wrap_convert_context.<locals>._fn(*args, **kwargs)
102 torch.fx.graph_module._forward_from_src = fx_forward_from_src_skip_result
103 try:
--> 104 return fn(*args, **kwargs)
105 finally:
106 torch._C._set_grad_enabled(prior_grad_mode)
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py:262, in convert_frame_assert.<locals>._convert_frame_assert(frame, cache_size, hooks)
259 global initial_grad_state
260 initial_grad_state = torch.is_grad_enabled()
--> 262 return _compile(
263 frame.f_code,
264 frame.f_globals,
265 frame.f_locals,
266 frame.f_builtins,
267 compiler_fn,
268 one_graph,
269 export,
270 hooks,
271 frame,
272 )
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/utils.py:163, in dynamo_timed.<locals>.dynamo_timed_inner.<locals>.time_wrapper(*args, **kwargs)
161 compilation_metrics[key] = []
162 t0 = time.time()
--> 163 r = func(*args, **kwargs)
164 time_spent = time.time() - t0
165 # print(f"Dynamo timer: key={key}, latency={latency:.2f} sec")
File /opt/conda/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py:394, in _compile(code, globals, locals, builtins, compiler_fn, one_graph, export, hooks, frame)
392 except Exception as e:
393 exception_handler(e, code, frame)
--> 394 raise InternalTorchDynamoError() from e
InternalTorchDynamoError:
```
### Versions
Collecting environment information...
PyTorch version: 2.0.0.dev20230220+cu118
Is debug build: False
CUDA used to build PyTorch: 11.8
ROCM used to build PyTorch: N/A
OS: Ubuntu 22.04.1 LTS (x86_64)
GCC version: (Ubuntu 11.3.0-1ubuntu1~22.04) 11.3.0
Clang version: Could not collect
CMake version: version 3.25.0
Libc version: glibc-2.35
Python version: 3.10.9 (main, Jan 11 2023, 15:21:40) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.15.0-60-generic-x86_64-with-glibc2.35
Is CUDA available: True
CUDA runtime version: 11.8.89
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: GPU 0: NVIDIA RTX A6000
Nvidia driver version: 525.60.13
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 46 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 36
On-line CPU(s) list: 0-35
Vendor ID: GenuineIntel
Model name: Intel(R) Core(TM) i9-10980XE CPU @ 3.00GHz
CPU family: 6
Model: 85
Thread(s) per core: 2
Core(s) per socket: 18
Socket(s): 1
Stepping: 7
CPU max MHz: 4800.0000
CPU min MHz: 1200.0000
BogoMIPS: 6000.00
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb cat_l3 cdp_l3 invpcid_single ssbd mba ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm mpx rdt_a avx512f avx512dq rdseed adx smap clflushopt clwb intel_pt avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts hwp hwp_act_window hwp_epp hwp_pkg_req avx512_vnni md_clear flush_l1d arch_capabilities
Virtualization: VT-x
L1d cache: 576 KiB (18 instances)
L1i cache: 576 KiB (18 instances)
L2 cache: 18 MiB (18 instances)
L3 cache: 24.8 MiB (1 instance)
NUMA node(s): 1
NUMA node0 CPU(s): 0-35
Vulnerability Itlb multihit: KVM: Mitigation: VMX disabled
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Mitigation; Clear CPU buffers; SMT vulnerable
Vulnerability Retbleed: Mitigation; Enhanced IBRS
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Mitigation; TSX disabled
Versions of relevant libraries:
[pip3] intel-extension-for-pytorch==1.13.100
[pip3] numpy==1.24.1
[pip3] torch==2.0.0.dev20230220+cu118
[pip3] torchaudio==2.0.0.dev20230221+cu118
[pip3] torchvision==0.15.0.dev20230221+cu118
[conda] intel-extension-for-pytorch 1.13.100 pypi_0 pypi
[conda] numpy 1.24.1 pypi_0 pypi
[conda] torch 2.0.0.dev20230220+cu118 pypi_0 pypi
[conda] torchaudio 2.0.0.dev20230221+cu118 pypi_0 pypi
[conda] torchvision 0.15.0.dev20230221+cu118 pypi_0 pypi
cc @ezyang @soumith @msaroufim @wconstab @ngimel @bdhirsh
| cc @drisspg
Will take a look
I will push a fix
@drisspg thank you! | 2023-02-23T19:16:54 |
pytorch/pytorch | 95,840 | pytorch__pytorch-95840 | [
"95775"
] | c04134cdb136b4118b9dff5c187111f12a03e9c5 | diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -294,15 +294,6 @@ def __init__(self):
"""
)
- if config.triton.convolution != "aten":
- self.header.splice(
- """
- from torch._inductor.triton_ops.conv_perf_model import early_config_prune
- from torch._inductor.triton_ops.conv_perf_model import estimate_conv_time
- from torch._inductor.triton_ops.autotune import conv_heuristics
- """
- )
-
self.write_prefix()
for name, value in V.graph.constants.items():
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -153,9 +153,6 @@ class triton:
# Synchronize after every kernel launch, to help pinpoint bugs
debug_sync_kernel = False
- # choose conv backend, "aten" or "triton"
- convolution = "aten"
-
# Always load full blocks (rather than broadcasting inside the block)
dense_indexing = False
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -2184,20 +2184,10 @@ def simplify_and_reorder(self):
for reads_name in body.reads_name2expr.keys()
]
priority_idx = []
- if config.triton.convolution == "aten":
- memory_addrs = [
- *body.reads_name2expr.values(),
- *body.writes_name2expr.values(),
- ]
- else:
- # prioritize reads layout/loop_ordering over writes
- if len(body.reads_name2expr.values()) > 0:
- memory_addrs = [*body.reads_name2expr.values()]
- else:
- memory_addrs = [*body.writes_name2expr.values()]
- for i, reads_buf in enumerate(reads_bufs):
- if isinstance(reads_buf, Convolution):
- priority_idx.append(i)
+ memory_addrs = [
+ *body.reads_name2expr.values(),
+ *body.writes_name2expr.values(),
+ ]
index_vars = []
reduce_vars = []
index_size = []
@@ -3140,12 +3130,8 @@ def create(
)
req_stride_order = get_stride_order(output.stride())
- if config.triton.convolution == "aten":
- weight = cls.require_stride_order(weight, req_stride_order)
- x = cls.require_stride_order(x, req_stride_order)
- else:
- x = cls.require_stride1(cls.realize_input(x))
- weight = cls.require_stride1(cls.realize_input(weight))
+ weight = cls.require_stride_order(weight, req_stride_order)
+ x = cls.require_stride_order(x, req_stride_order)
stride = tuple(stride_)
padding = tuple(padding_)
@@ -3163,7 +3149,7 @@ def create(
_, _, *kernel_size = weight_shape
# choose runtime kernel
- config_conv = config.triton.convolution
+ config_conv = "aten"
if (
config_conv == "aten"
or len(kernel_size) != 2 # triton conv only supports conv2d
@@ -3196,7 +3182,7 @@ def create(
)
# for conv2d or conv3d, prefer channels last format
- transform_x_layout = config.triton.convolution != "aten"
+ transform_x_layout = False
if kernel == "triton_ops.conv":
output_layout_str = "torch.channels_last"
else:
| Compatibility issue of torch._inductor.triton_ops.conv_perf_model and latest nightly triton
### 🐛 Describe the bug
In <https://github.com/openai/triton/commit/20100a7254e62efd0fced864b52a877c520fc38a#diff-e15e164b247309ce1b9a886812ae86b7043b4350af85f1b7cffe9a1f4dc45c05L361> (`python/src/triton.cc`, `init_triton_runtime()`), triton removed many attributes of `triton._C.libtriton.triton.runtime` such as `cc`, `max_shared_memory`, which is called in `triton_ops/conv_perf_model` in inductor. This issue can be triggered by setting `torch._inductor.config.triton.convolution` to `triton` and compiling a model with conv.
Pinning triton to a version prior to this change (such as `2.0.0.dev20221030`) workarounds this issue.
### Error logs
```
Traceback (most recent call last):
File "/home/username/pytorch2/test3.py", line 34, in <module>
print(model(torch.randn(1, 3, 224, 224).cuda()))
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 82, in __call__
return self.dynamo_ctx(self._orig_mod.__call__)(*args, **kwargs)
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 215, in _fn
return fn(*args, **kwargs)
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/username/pytorch2/test3.py", line 11, in forward
def forward(self, x):
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 215, in _fn
return fn(*args, **kwargs)
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py", line 2821, in forward
return compiled_fn(full_args)
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py", line 1222, in g
return f(*args)
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py", line 2388, in debug_compiled_function
return compiled_function(*args)
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py", line 1900, in runtime_wrapper
all_outs = call_func_with_args(
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py", line 1247, in call_func_with_args
out = normalize_as_list(f(args))
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py", line 1222, in g
return f(*args)
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/autograd/function.py", line 506, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py", line 2153, in forward
fw_outs = call_func_with_args(
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py", line 1247, in call_func_with_args
out = normalize_as_list(f(args))
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_inductor/compile_fx.py", line 250, in run
return model(new_inputs)
File "/tmp/torchinductor_username/xf/cxfscyw26zjrfhp4tamvn7hmhsnr6qtiq232xccma7l6sdo3u7dr.py", line 90, in call
buf2 = triton_ops.conv(buf1, primals_1, None, (1, 1), (0, 0), (1, 1), False, (0, 0), 1)
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_inductor/triton_ops/conv.py", line 732, in forward
return _conv._call(
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_inductor/triton_ops/conv.py", line 606, in _call
_kernel_delta_x[grid](
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 75, in run
pruned_configs = self.prune_configs(kwargs)
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 95, in prune_configs
pruned_configs = self.early_config_prune(self.configs, self.nargs)
File "/staff/username/miniconda3/envs/pytorch2/lib/python3.10/site-packages/torch/_inductor/triton_ops/conv_perf_model.py", line 100, in early_config_prune
cc = _triton.runtime.cc(backend, device)
AttributeError: module 'triton._C.libtriton.triton.runtime' has no attribute 'cc'
```
### Minified repro
```python
# it seems that minifier failed to generate a minified code with `TORCHDYNAMO_REPRO_AFTER="aot"`
# so I will just put my MRE here.
import torch
import torch._dynamo
import torch._inductor.config
import logging
class ModuleTest2(torch.nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.weight = torch.nn.Parameter(torch.randn(1, 3, 224, 224))
def forward(self, x):
x = x + torch.ones_like(x)
res = torch.nn.functional.conv2d(x, self.weight)
res = res + torch.ones_like(res)
return res
torch._dynamo.config.log_level = logging.DEBUG
torch._dynamo.config.output_code = True
torch._dynamo.config.verbose = True
torch._inductor.config.triton.convolution = "triton"
torch._inductor.config.debug = True
model = ModuleTest2().cuda()
model = torch.compile(model)
print(model)
print(model(torch.randn(1, 3, 224, 224).cuda()))
```
### Versions
```
Collecting environment information...
PyTorch version: 2.0.0.dev20230228
Is debug build: False
CUDA used to build PyTorch: 11.8
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.5 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0
Clang version: 15.0.1 (https://github.com/exaloop/llvm-project 55b0b8fa1c9f9082b535628fc9fa6313280c0b9a)
CMake version: version 3.25.2
Libc version: glibc-2.31
Python version: 3.10.9 (main, Jan 11 2023, 15:21:40) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.4.0-137-generic-x86_64-with-glibc2.31
Is CUDA available: True
CUDA runtime version: Could not collect
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: GPU 0: NVIDIA A100-PCIE-40GB
Nvidia driver version: 465.19.01
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.8.8.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.8.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.8.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.8.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.8.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.8.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.8.0
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
Address sizes: 46 bits physical, 57 bits virtual
CPU(s): 128
On-line CPU(s) list: 0-127
Thread(s) per core: 2
Core(s) per socket: 32
Socket(s): 2
NUMA node(s): 2
Vendor ID: GenuineIntel
CPU family: 6
Model: 106
Model name: Intel(R) Xeon(R) Platinum 8358 CPU @ 2.60GHz
Stepping: 6
Frequency boost: enabled
CPU MHz: 900.000
CPU max MHz: 3400.0000
CPU min MHz: 800.0000
BogoMIPS: 5200.00
Virtualization: VT-x
L1d cache: 3 MiB
L1i cache: 2 MiB
L2 cache: 80 MiB
L3 cache: 96 MiB
NUMA node0 CPU(s): 0-31,64-95
NUMA node1 CPU(s): 32-63,96-127
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Mitigation; Clear CPU buffers; SMT vulnerable
Vulnerability Retbleed: Not affected
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb cat_l3 invpcid_single ssbd mba ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm rdt_a avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb intel_pt avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local wbnoinvd dtherm ida arat pln pts avx512vbmi umip pku ospke avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg tme avx512_vpopcntdq rdpid md_clear pconfig flush_l1d arch_capabilities
Versions of relevant libraries:
[pip3] numpy==1.23.5
[pip3] torch==2.0.0.dev20230228
[pip3] torchaudio==2.0.0.dev20230301
[pip3] torchvision==0.15.0.dev20230228
[conda] blas 1.0 mkl
[conda] mkl 2021.4.0 h06a4308_640
[conda] mkl-service 2.4.0 py310h7f8727e_0
[conda] mkl_fft 1.3.1 py310hd6ae3a3_0
[conda] mkl_random 1.2.2 py310h00e6091_0
[conda] numpy 1.23.5 py310hd5efca6_0
[conda] numpy-base 1.23.5 py310h8e6c178_0
[conda] pytorch 2.0.0.dev20230228 py3.10_cuda11.8_cudnn8.7.0_0 pytorch-nightly
[conda] pytorch-cuda 11.8 h7e8668a_3 pytorch-nightly
[conda] pytorch-mutex 1.0 cuda pytorch-nightly
[conda] torchaudio 2.0.0.dev20230301 py310_cu118 pytorch-nightly
[conda] torchvision 0.15.0.dev20230228 py310_cu118 pytorch-nightly
```
cc @ezyang @gchanan @zou3519 @soumith @msaroufim @wconstab @ngimel @bdhirsh
| Reproduced this error with v2.0.0 release candidates as well. The versions are below:
pytorch-triton 2.0.0+b8b470bc59
torch 2.0.0+cu117
torchaudio 2.0.0+cu117
torchvision 0.15.0+cu117
As @taoky noted, this bug will be triggered if the source code include "torch._inductor.config.triton.convolution = "triton"".
I'm not sure whether this bug should block the release, i.e. is the torch._inductor.config.triton.convolution = "triton" a commonly used configuration?
Would be good to get @ezyang and @albanD input here.
cc @seemethere @malfet @atalman
Removing `releng` label as it has nothing to do with release engineering activities.
As name suggests, `torch._inductor` is an internal package, as so no all permutations of tweaks available there are guaranteed to work. But if this can be triggered via `torch.compile` kwargs then indeed it a a release blocking issue.
cc: @jansel @ngimel
This will be fixed by https://github.com/pytorch/pytorch/pull/95556 for nightlies. For the release, I'd suggest just removing the flag.
https://github.com/pytorch/pytorch/pull/95840 and https://github.com/pytorch/pytorch/pull/95842 for removing the config. | 2023-03-02T00:09:02 |
|
pytorch/pytorch | 96,450 | pytorch__pytorch-96450 | [
"95671"
] | 9cfa076da885b7feffbffb8d7b7a184e448d2f2a | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1024,6 +1024,7 @@ def main():
'typing-extensions',
'sympy',
'networkx',
+ 'jinja2',
]
extras_require = {
| Manual Installation of Jinja (for wheel env) or networkx (for conda) Package is Required to Use Torch.Compile
### 🐛 Describe the bug
While testing v2.0.0 release candidates and nightlies, using
pip install torch torchvision torchAudio --index-url https://download.pytorch.org/whl/nightly/cu117 or
and resnet18 code snippet in https://github.com/pytorch/pytorch/issues/95223
The code will produce the following error: see Error Logs section
https://github.com/pytorch/pytorch/blob/b818b3fe1c1fa90529b8500cd8ef800bec8415e8/setup.py#L1035 tells us that jinja2 would only be installed if torch is installed like "pip install torch[dynamo]". However, our recommended installation command is usually without the "[dynamo]".
Should we resolve this issue? Or we do expect users to do an extra step of "pip install jinja2"?
cc @ezyang @gchanan @zou3519 @soumith @msaroufim @wconstab @ngimel @bdhirsh @malfet @ng
### Error logs
[2023-02-28 00:33:39,306] torch._inductor.graph: [ERROR] Error from lowering
Traceback (most recent call last):
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/graph.py", line 342, in call_function
out = lowerings[target](*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/lowering.py", line 226, in wrapped
out = decomp_fn(*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/kernel/mm.py", line 133, in tuned_addmm
mm_template.generate(
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/select_algorithm.py", line 349, in generate
assert self.template, "requires jinja2"
AssertionError: requires jinja2
Traceback (most recent call last):
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/graph.py", line 342, in call_function
out = lowerings[target](*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/lowering.py", line 226, in wrapped
out = decomp_fn(*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/kernel/mm.py", line 133, in tuned_addmm
mm_template.generate(
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/select_algorithm.py", line 349, in generate
assert self.template, "requires jinja2"
AssertionError: requires jinja2
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/output_graph.py", line 708, in call_user_compiler
compiled_fn = compiler_fn(gm, self.fake_example_inputs())
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/debug_utils.py", line 1055, in debug_wrapper
compiled_gm = compiler_fn(gm, example_inputs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/__init__.py", line 1393, in __call__
return compile_fx(model_, inputs_, config_patches=self.config)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/compile_fx.py", line 455, in compile_fx
return aot_autograd(
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/backends/common.py", line 48, in compiler_fn
cg = aot_module_simplified(gm, example_inputs, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_functorch/aot_autograd.py", line 2805, in aot_module_simplified
compiled_fn = create_aot_dispatcher_function(
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/utils.py", line 163, in time_wrapper
r = func(*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_functorch/aot_autograd.py", line 2498, in create_aot_dispatcher_function
compiled_fn = compiler_fn(flat_fn, fake_flat_args, aot_config)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_functorch/aot_autograd.py", line 1713, in aot_wrapper_dedupe
return compiler_fn(flat_fn, leaf_flat_args, aot_config)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_functorch/aot_autograd.py", line 2133, in aot_dispatch_autograd
compiled_fw_func = aot_config.fw_compiler(
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/utils.py", line 163, in time_wrapper
r = func(*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/compile_fx.py", line 430, in fw_compiler
return inner_compile(
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/debug_utils.py", line 595, in debug_wrapper
compiled_fn = compiler_fn(gm, example_inputs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/debug.py", line 239, in inner
return fn(*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/compile_fx.py", line 176, in compile_fx_inner
graph.run(*example_inputs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/utils.py", line 163, in time_wrapper
r = func(*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/graph.py", line 203, in run
return super().run(*args)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/fx/interpreter.py", line 136, in run
self.env[node] = self.run_node(node)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/graph.py", line 421, in run_node
result = super().run_node(n)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/fx/interpreter.py", line 177, in run_node
return getattr(self, n.op)(n.target, args, kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_inductor/graph.py", line 346, in call_function
raise LoweringException(e, target, args, kwargs) from e
torch._inductor.exc.LoweringException: AssertionError: requires jinja2
target: aten.addmm.default
args[0]: TensorBox(StorageBox(
InputBuffer(name='primals_62', layout=FixedLayout('cuda', torch.float32, size=[1000], stride=[1]))
))
args[1]: TensorBox(StorageBox(
ComputedBuffer(name='buf183', layout=FixedLayout('cuda', torch.float32, size=(16, 512), stride=[512, 1]), data=Pointwise(
'cuda',
torch.float32,
tmp0 = load(buf182, i1 + 512 * i0)
tmp1 = index_expr(49, torch.float32)
tmp2 = tmp0 / tmp1
return tmp2
,
ranges=(16, 512),
origins={view}
))
))
args[2]: TensorBox(
ReinterpretView(
StorageBox(
InputBuffer(name='primals_61', layout=FixedLayout('cuda', torch.float32, size=[1000, 512], stride=[512, 1]))
),
FixedLayout('cuda', torch.float32, size=[512, 1000], stride=[1, 512]),
origins=
)
)
While executing %addmm : [#users=1] = call_function[target=torch.ops.aten.addmm.default](args = (%primals_62, %view, %permute), kwargs = {})
Original traceback:
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torchvision/models/resnet.py", line 280, in _forward_impl
x = self.fc(x)
| File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torchvision/models/resnet.py", line 285, in forward
return self._forward_impl(x)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/weiwangmeta/rn18.py", line 10, in <module>
out = compiled_model(x)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/eval_frame.py", line 82, in __call__
return self.dynamo_ctx(self._orig_mod.__call__)(*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/eval_frame.py", line 215, in _fn
return fn(*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/eval_frame.py", line 343, in catch_errors
return callback(frame, cache_size, hooks)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 404, in _convert_frame
result = inner_convert(frame, cache_size, hooks)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 104, in _fn
return fn(*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 262, in _convert_frame_assert
return _compile(
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/utils.py", line 163, in time_wrapper
r = func(*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 324, in _compile
out_code = transform_code_object(code, transform)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/bytecode_transformation.py", line 530, in transform_code_object
transformations(instructions, code_options)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/convert_frame.py", line 311, in transform
tracer.run()
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 1862, in run
super().run()
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 619, in run
and self.step()
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 583, in step
getattr(self, inst.opname)(inst)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/symbolic_convert.py", line 1941, in RETURN_VALUE
self.output.compile_subgraph(
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/output_graph.py", line 555, in compile_subgraph
self.compile_and_call_fx_graph(tx, list(reversed(stack_values)), root)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/output_graph.py", line 626, in compile_and_call_fx_graph
compiled_fn = self.call_user_compiler(gm)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/utils.py", line 163, in time_wrapper
r = func(*args, **kwargs)
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torch/_dynamo/output_graph.py", line 713, in call_user_compiler
raise BackendCompilerFailed(self.compiler_fn, e) from e
torch._dynamo.exc.BackendCompilerFailed: debug_wrapper raised LoweringException: AssertionError: requires jinja2
target: aten.addmm.default
args[0]: TensorBox(StorageBox(
InputBuffer(name='primals_62', layout=FixedLayout('cuda', torch.float32, size=[1000], stride=[1]))
))
args[1]: TensorBox(StorageBox(
ComputedBuffer(name='buf183', layout=FixedLayout('cuda', torch.float32, size=(16, 512), stride=[512, 1]), data=Pointwise(
'cuda',
torch.float32,
tmp0 = load(buf182, i1 + 512 * i0)
tmp1 = index_expr(49, torch.float32)
tmp2 = tmp0 / tmp1
return tmp2
,
ranges=(16, 512),
origins={view}
))
))
args[2]: TensorBox(
ReinterpretView(
StorageBox(
InputBuffer(name='primals_61', layout=FixedLayout('cuda', torch.float32, size=[1000, 512], stride=[512, 1]))
),
FixedLayout('cuda', torch.float32, size=[512, 1000], stride=[1, 512]),
origins=
)
)
While executing %addmm : [#users=1] = call_function[target=torch.ops.aten.addmm.default](args = (%primals_62, %view, %permute), kwargs = {})
Original traceback:
File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torchvision/models/resnet.py", line 280, in _forward_impl
x = self.fc(x)
| File "/home/weiwangmeta/.conda/envs/clean-nightly-20230226/lib/python3.9/site-packages/torchvision/models/resnet.py", line 285, in forward
return self._forward_impl(x)
Set torch._dynamo.config.verbose=True for more information
You can suppress this exception and fall back to eager by setting:
torch._dynamo.config.suppress_errors = True
### Minified repro
conda create -n test_nightly python=3.10
conda activate test_nightly
python resnet18.py (where resnet18.py is the above code snippet)
### Versions
nightly up to 02/27/2023
v2.0.0-rc up to rc2 and present in release/2.0 top commits as well.
| pip install torch torchvision torchAudio --index-url https://download.pytorch.org/whl/test/cu117 does not seem to reproduce the issue, only pip install torch torchvision torchAudio --index-url https://download.pytorch.org/whl/nightly/cu117 can, not sure why.
For conda env:
conda create -n test_rc2_test_conda python=3.10
conda activate test_rc2_test_conda
conda install pytorch torchvision torchaudio pytorch-cuda=11.7 -c pytorch-test -c nvidia
python rn18.py
failed with:
torch._dynamo.exc.BackendCompilerFailed: debug_wrapper raised RuntimeError: Need **networkx** installed to perform smart recomputation heuristics
Verified that conda nightly binaries would fail in the same way (need networkx)
So below is what we got so far:
nightly RC2 (as of 02/28/2023)
wheel **needs jinja2** Ok
conda needs **networkx** needs **networkx**
For conda nightly: after "pip install networkx", it fails similarly to the wheels (nightly)
torch._dynamo.exc.BackendCompilerFailed: debug_wrapper raised LoweringException: AssertionError: requires jinja2
For conda test channel (i.e. RC2), after "pip install networkx", the error disappeared (similar to the wheels case).
So the jinja2 issue might be specific to the master branch. While for conda, it is indeed RC2 issue. | 2023-03-09T21:34:43 |
|
pytorch/pytorch | 96,452 | pytorch__pytorch-96452 | [
"91516"
] | 9cfa076da885b7feffbffb8d7b7a184e448d2f2a | diff --git a/torch/utils/tensorboard/summary.py b/torch/utils/tensorboard/summary.py
--- a/torch/utils/tensorboard/summary.py
+++ b/torch/utils/tensorboard/summary.py
@@ -380,7 +380,7 @@ def make_histogram(values, bins, max_bins=None):
limits = new_limits
# Find the first and the last bin defining the support of the histogram:
- cum_counts = np.cumsum(np.greater(counts, 0, dtype=np.int32))
+ cum_counts = np.cumsum(np.greater(counts, 0))
start, end = np.searchsorted(cum_counts, [0, cum_counts[-1] - 1], side="right")
start = int(start)
end = int(end) + 1
| Tensorboard SummaryWriter `add_histogram` fails with NumPy 1.24+
### 🐛 Describe the bug
The use of `np.greater(counts, 0, dtype=np.int32)` in `add_histogram` is deprecated a while ago, and the deprecation expired in Numpy 1.24+:
> The `dtype=` argument to comparison ufuncs is now applied correctly. That
> means that only `bool` and `object` are valid values and `dtype=object` is
> enforced.
Source: https://numpy.org/doc/stable/release/1.24.0-notes.html#expired-deprecations
Minimal example:
```python
from torch.utils.tensorboard import SummaryWriter
import numpy as np
writer = SummaryWriter()
for i in range(10):
x = np.random.random(1000)
writer.add_histogram('distribution centers', x + i, i)
writer.close()
```
Error message with Numpy 1.24+:
```
Traceback (most recent call last):
File "main.py", line 6, in <module>
writer.add_histogram('distribution centers', x + i, i)
File "/home/johnson/venv/lib/python3.8/site-packages/torch/utils/tensorboard/writer.py", line 485, in add_histogram
histogram(tag, values, bins, max_bins=max_bins), global_step, walltime
File "/home/johnson/venv/lib/python3.8/site-packages/torch/utils/tensorboard/summary.py", line 358, in histogram
hist = make_histogram(values.astype(float), bins, max_bins)
File "/home/johnson/venv/lib/python3.8/site-packages/torch/utils/tensorboard/summary.py", line 386, in make_histogram
cum_counts = np.cumsum(np.greater(counts, 0, dtype=np.int32))
TypeError: No loop matching the specified signature and casting was found for ufunc greater
```
This error does not occur in Numpy 1.23.5. Numpy should have print a deprecation warning about this:
```
DeprecationWarning: using `dtype=` in comparisons is only useful for `dtype=object` (and will do nothing for bool). This operation will fail in the future.
```
but I think PyTorch has suppressed the warning.
### Versions
```
Collecting environment information...
PyTorch version: 1.13.1+cu117
Is debug build: False
CUDA used to build PyTorch: 11.7
ROCM used to build PyTorch: N/A
OS: Ubuntu 18.04.6 LTS (x86_64)
GCC version: (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0
Clang version: 6.0.0-1ubuntu2 (tags/RELEASE_600/final)
CMake version: version 3.25.1
Libc version: glibc-2.27
Python version: 3.8.0 (default, Feb 25 2021, 22:10:10) [GCC 8.4.0] (64-bit runtime)
Python platform: Linux-5.4.0-42-generic-x86_64-with-glibc2.27
Is CUDA available: True
CUDA runtime version: Could not collect
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: GPU 0: Quadro RTX 6000
Nvidia driver version: 520.61.05
cuDNN version: Probably one of the following:
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn.so.8.4.1
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn_adv_infer.so.8.4.1
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn_adv_train.so.8.4.1
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn_cnn_infer.so.8.4.1
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn_cnn_train.so.8.4.1
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn_ops_infer.so.8.4.1
/usr/local/cuda-11.7/targets/x86_64-linux/lib/libcudnn_ops_train.so.8.4.1
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] mypy==0.931
[pip3] mypy-extensions==0.4.3
[pip3] numpy==1.24.1
[pip3] torch==1.13.1
[conda] blas 1.0 mkl
[conda] mkl 2020.1 217
[conda] mkl-service 2.3.0 py38he904b0f_0
[conda] mkl_fft 1.1.0 py38h23d657b_0
[conda] mkl_random 1.1.1 py38h0573a6f_0
[conda] numpy 1.18.5 py38ha1c710e_0
[conda] numpy-base 1.18.5 py38hde5b4d6_0
[conda] numpydoc 1.1.0 py_0
```
| (related: there are a few more numpy1.24 compatibility issues at https://github.com/pytorch/pytorch/issues/91329) | 2023-03-09T21:47:17 |
|
pytorch/pytorch | 96,462 | pytorch__pytorch-96462 | [
"95958"
] | c9913cf66fda1ccec49a70ffb0304401eadd9d52 | diff --git a/torch/_decomp/__init__.py b/torch/_decomp/__init__.py
--- a/torch/_decomp/__init__.py
+++ b/torch/_decomp/__init__.py
@@ -308,6 +308,7 @@ def core_aten_decompositions() -> Dict[OpOverload, Callable]:
aten.trace,
aten.transpose.int,
aten.tril.default,
+ aten.triu.default,
aten.unfold,
aten.unfold_backward,
aten.upsample_bilinear2d,
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -1505,30 +1505,6 @@ def fn(index):
)
-@register_lowering(aten.triu)
-def triu(x, diagonal=0):
- x_loader = x.make_loader()
- dtype = x.get_dtype()
-
- def inner_fn(index):
- *_, i, j = index
- return ops.where(
- ops.ge(
- ops.index_expr(j - i - diagonal, torch.int32),
- ops.constant(0, torch.int32),
- ),
- x_loader(index),
- ops.constant(0, dtype),
- )
-
- return Pointwise.create(
- device=x.get_device(),
- dtype=dtype,
- inner_fn=inner_fn,
- ranges=list(x.get_size()),
- )
-
-
@register_lowering(aten.select_scatter, type_promotion_kind=None)
def select_scatter(x, src, dim: int, index: int):
assert x.get_dtype() == src.get_dtype()
| diff --git a/test/inductor/test_torchinductor_opinfo.py b/test/inductor/test_torchinductor_opinfo.py
--- a/test/inductor/test_torchinductor_opinfo.py
+++ b/test/inductor/test_torchinductor_opinfo.py
@@ -448,6 +448,7 @@ def wrapper_set_seed(op, *args, **kwargs):
"mT",
"mH",
"rsub",
+ "triu",
}
| compile() breaks TransformerEncoder mask dtype check
### 🐛 Describe the bug
Running `compile()` on a function that builds an attention mask and passes a tensor through a `TransformerEncoder` results in the dtype of the mask getting changed, triggering an exception:
```
import torch
import torch.nn as nn
def transformer_encoder(inputs, input_seq_len):
encoder_layer = nn.TransformerEncoderLayer(
d_model=16,
nhead=2,
dim_feedforward=32,
dropout=0.1,
activation='relu',
batch_first=True,
)
encoder_norm = nn.LayerNorm(16)
encoder = nn.TransformerEncoder(
encoder_layer, 2, encoder_norm
)
src_mask = torch.ones(inputs.shape[1], inputs.shape[1], dtype=torch.bool).triu_(diagonal=1)
padding_mask = (torch.arange(inputs.shape[1])[None, :].cpu() >= input_seq_len[:, None])
print(src_mask,src_mask.dtype, )
print(padding_mask,padding_mask.dtype, )
return encoder(inputs,
mask=src_mask,
src_key_padding_mask=padding_mask,
)
transformer_encoder_opt = torch.compile(transformer_encoder)
inputs = torch.randn(2,3,16)
input_seq_len = torch.tensor([3,2])
transformer_encoder_opt(inputs, input_seq_len)
```
Output:
```
tensor([[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]) torch.bool
tensor([[False, False, False],
[False, False, True]]) torch.bool
```
```
File /mnt/ext/phd/research/pt2/lib/python3.9/site-packages/torch/nn/functional.py:5000, in _canonical_mask(mask, mask_name, other_type, other_name, target_type, check_other)
4998 _mask_is_float = torch.is_floating_point(mask)
4999 if _mask_dtype != torch.bool and not _mask_is_float:
-> 5000 raise AssertionError(
5001 f"only bool and floating types of {mask_name} are supported")
5002 if check_other and other_type is not None:
5003 if _mask_dtype != other_type:
AssertionError: only bool and floating types of attn_mask are supported
```
Output for non compiled function:
```
tensor([[False, True, True],
[False, False, True],
[False, False, False]]) torch.bool
tensor([[False, False, False],
[False, False, True]]) torch.bool
```
### Error logs
_No response_
### Minified repro
_No response_
### Versions
Collecting environment information...
PyTorch version: 2.1.0.dev20230302+cu118
Is debug build: False
CUDA used to build PyTorch: 11.8
ROCM used to build PyTorch: N/A
OS: Ubuntu 22.04.2 LTS (x86_64)
GCC version: (Ubuntu 11.3.0-1ubuntu1~22.04) 11.3.0
Clang version: Could not collect
CMake version: version 3.25.0
Libc version: glibc-2.35
Python version: 3.9.13 (main, Oct 13 2022, 21:15:33) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.15.0-60-generic-x86_64-with-glibc2.35
Is CUDA available: True
CUDA runtime version: Could not collect
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: GPU 0: NVIDIA GeForce RTX 2080 Ti
Nvidia driver version: 525.78.01
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.5
/usr/lib/x86_64-linux-gnu/libcudnn.so.8.2.2
/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.2.2
/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.2.2
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.2.2
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.2.2
/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.2.2
/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.2.2
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 39 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 8
On-line CPU(s) list: 0-7
Vendor ID: GenuineIntel
Model name: Intel(R) Core(TM) i7-9700 CPU @ 3.00GHz
CPU family: 6
Model: 158
Thread(s) per core: 1
Core(s) per socket: 8
Socket(s): 1
Stepping: 13
CPU max MHz: 4700.0000
CPU min MHz: 800.0000
BogoMIPS: 6000.00
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_
perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_
timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2
erms invpcid mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d arch_capabilities
Virtualisation: VT-x
L1d cache: 256 KiB (8 instances)
L1i cache: 256 KiB (8 instances)
L2 cache: 2 MiB (8 instances)
L3 cache: 12 MiB (1 instance)
NUMA node(s): 1
NUMA node0 CPU(s): 0-7
Vulnerability Itlb multihit: KVM: Mitigation: VMX disabled
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Mitigation; Clear CPU buffers; SMT disabled
Vulnerability Retbleed: Mitigation; Enhanced IBRS
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
Vulnerability Srbds: Mitigation; Microcode
Vulnerability Tsx async abort: Mitigation; TSX disabled
Versions of relevant libraries:
[pip3] numpy==1.24.1
[pip3] pytorch-triton==2.0.0+b8b470bc59
[pip3] torch==2.1.0.dev20230302+cu118
[pip3] torchaudio==2.0.0.dev20230302+cu118
[pip3] torchvision==0.15.0.dev20230302+cu118
[conda] numpy 1.21.5 pypi_0 pypi
[conda] torch 1.7.1 pypi_0 pypi
[conda] torchseq 3.0.0a0 dev_0 <develop>
cc @ezyang @soumith @msaroufim @wconstab @ngimel @bdhirsh
| 2023-03-09T22:37:17 |
|
pytorch/pytorch | 97,885 | pytorch__pytorch-97885 | [
"95781"
] | c263bd43e8e8502d4726643bc6fd046f0130ac0e | diff --git a/torch/optim/adam.py b/torch/optim/adam.py
--- a/torch/optim/adam.py
+++ b/torch/optim/adam.py
@@ -529,7 +529,6 @@ def _fused_adam(
capturable: bool, # Needed for consistency.
differentiable: bool,
) -> None:
- grouped_tensors = _group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps])
grad_scale_dict = {grad_scale.device: grad_scale} if grad_scale is not None else None
found_inf_dict = {found_inf.device: found_inf} if found_inf is not None else None
grouped_tensors = _group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps])
@@ -542,16 +541,15 @@ def _fused_adam(
device_max_exp_avg_sqs,
device_state_steps,
) = grouped_tensors[(device, dtype)]
- if grad_scale is not None and found_inf is not None:
+ device_grad_scale, device_found_inf = None, None
+ if grad_scale is not None:
if device not in grad_scale_dict:
grad_scale_dict[device] = grad_scale.to(device, non_blocking=True)
+ device_grad_scale = grad_scale_dict[device]
+ if found_inf is not None:
if found_inf not in found_inf_dict:
found_inf_dict[device] = found_inf.to(device, non_blocking=True)
- device_grad_scale = grad_scale_dict[device]
device_found_inf = found_inf_dict[device]
- else:
- device_grad_scale = None
- device_found_inf = None
torch._foreach_add_(device_state_steps, 1)
torch._fused_adam_(
device_params,
diff --git a/torch/optim/adamw.py b/torch/optim/adamw.py
--- a/torch/optim/adamw.py
+++ b/torch/optim/adamw.py
@@ -602,16 +602,15 @@ def _fused_adamw(
device_max_exp_avg_sqs,
device_state_steps,
) = grouped_tensors[(device, dtype)]
- if grad_scale is not None and found_inf is not None:
+ device_grad_scale, device_found_inf = None, None
+ if grad_scale is not None:
if device not in grad_scale_dict:
grad_scale_dict[device] = grad_scale.to(device, non_blocking=True)
+ device_grad_scale = grad_scale_dict[device]
+ if found_inf is not None:
if found_inf not in found_inf_dict:
found_inf_dict[device] = found_inf.to(device, non_blocking=True)
- device_grad_scale = grad_scale_dict[device]
device_found_inf = found_inf_dict[device]
- else:
- device_grad_scale = None
- device_found_inf = None
torch._foreach_add_(device_state_steps, 1)
torch._fused_adamw_(
device_params,
| diff --git a/test/test_optim.py b/test/test_optim.py
--- a/test/test_optim.py
+++ b/test/test_optim.py
@@ -1619,11 +1619,13 @@ def test_functional_fused_optimizer_with_foundinf(self):
from torch.optim import adam, adamw
num_tensors = 5
- for functional_optim, amsgrad in itertools.product((adam.adam, adamw.adamw), (False, True)):
- params, grads, exp_avgs, exp_avg_sqs = [[torch.ones((1,), device="cuda") for _ in range(num_tensors)] for _ in range(4)]
+ for functional_optim, amsgrad, no_grad_scale in itertools.product((adam.adam, adamw.adamw), (False, True), (False, True)):
+ params, grads, exp_avgs, exp_avg_sqs = [
+ [torch.ones((1,), device="cuda") for _ in range(num_tensors)] for _ in range(4)]
+ prev_params = [t.clone().detach() for t in params]
max_exp_avg_sqs = [torch.ones((1,), device="cuda") for _ in range(num_tensors)] if amsgrad else []
state_steps = [torch.ones((1,), dtype=torch.float32, device="cuda") for _ in range(num_tensors)]
- grad_scale = torch.ones((1,), dtype=torch.float32, device="cuda")
+ grad_scale = None if no_grad_scale else torch.ones((1,), dtype=torch.float32, device="cuda")
found_inf = torch.ones((1,), dtype=torch.float32, device="cuda")
functional_optim(
@@ -1654,6 +1656,7 @@ def test_functional_fused_optimizer_with_foundinf(self):
for _ in range(num_tensors)
],
)
+ self.assertEqual(params, prev_params)
def test_empty_grad(self):
optimizers = [
| Fused AdamW causes NaN loss
### 🐛 Describe the bug
There already has been an extended discussion of this issue over on the nanoGPT repository:
- karpathy/nanoGPT#167
I have been encouraged to submit a separate bug report here, since the issue seems to lie with pytorch. I have prepared a preconfigured [fork of nanoGPT](https://github.com/oddlama/nanoGPT_nan) where I provided my configuration and part of my dataset which causes the issue to appear. It now immediately produces NaNs after the first training step, but only if fused adam is in use.
The settings are basically the nanoGPT shakespeare configuration, but using a blocksize of 343 and vocab size of 2006. The data is quite sparse (only 1,3% is not 0, which significantly accelerates getting to the issue). I've included 1000 batches of my own real data so you will have the same conditions as I do. I'm training on a single 2080Ti, please look at a diff of the latest commit in this repository to see what I changed exactly - it's not much.
```bash
> git clone https://github.com/oddlama/nanoGPT_nan
> cd nanoGPT_nan
```
```bash
> python train.py config/nan.py --allow_fused=True
[...]
number of parameters: 11.39M
using fused AdamW: True
compiling the model... (takes a ~minute)
step 0: train loss 6.6010, val loss 6.6009
[2023-03-01 15:19:43,485] torch._inductor.utils: [WARNING] using triton random, expect difference from eager
/projects/venv/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py:1251: UserWarning: Your compiler for AOTAutograd is returning a a function that doesn't take boxed arguments. Please wrap it with functorch.compile.make_boxed_func or handle the boxed arguments yourself. See https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670 for rationale.
warnings.warn(
iter 0: loss 6.6793, time 22316.39ms, mfu -100.00%
iter 1: loss nan, time 3249.91ms, mfu -100.00%
```
```bash
> python train.py config/nan.py --allow_fused=False
[...]
number of parameters: 11.39M
using fused AdamW: False
compiling the model... (takes a ~minute)
step 0: train loss 6.6010, val loss 6.6009
[2023-03-01 15:20:26,110] torch._inductor.utils: [WARNING] using triton random, expect difference from eager
/projects/nanoGPT_nan/venv/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py:1251: UserWarning: Your compiler for AOTAutograd is returning a a function that doesn't take boxed arguments. Please wrap it with functorch.compile.make_boxed_func or handle the boxed arguments yourself. See https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670 for rationale.
warnings.warn(
iter 0: loss 6.6793, time 22286.14ms, mfu -100.00%
iter 1: loss 6.6811, time 3534.44ms, mfu -100.00%
iter 2: loss 6.6770, time 3365.06ms, mfu -100.00%
iter 3: loss 4.7208, time 3366.52ms, mfu -100.00%
iter 4: loss 3.0927, time 3365.22ms, mfu -100.00%
iter 5: loss 2.0694, time 3362.80ms, mfu 6.51%
iter 6: loss 1.4752, time 3360.81ms, mfu 6.51%
# continues to work fine
```
Hope this helps.
P.S.:
Can anyone explain what the warning means? Doesn't seem to be related to the issue, but is it a user error that should be fixed?
```
[2023-03-01 15:19:43,485] torch._inductor.utils: [WARNING] using triton random, expect difference from eager
/projects/venv/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py:1251: UserWarning: Your compiler for AOTAutograd is returning a a function that doesn't take boxed arguments. Please wrap it with functorch.compile.make_boxed_func or handle the boxed arguments yourself. See https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670 for rationale.
warnings.warn(
```
### Versions
```
Collecting environment information...
PyTorch version: 2.0.0.dev20230220+cu118
Is debug build: False
CUDA used to build PyTorch: 11.8
ROCM used to build PyTorch: N/A
OS: Arch Linux (x86_64)
GCC version: (GCC) 12.2.1 20230201
Clang version: 15.0.7
CMake version: version 3.25.0
Libc version: glibc-2.37
Python version: 3.10.9 (main, Dec 19 2022, 17:35:49) [GCC 12.2.0] (64-bit runtime)
Python platform: Linux-5.15.79.1-microsoft-standard-WSL2-x86_64-with-glibc2.37
Is CUDA available: True
CUDA runtime version: 11.8.89
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: GPU 0: NVIDIA GeForce RTX 2080 Ti
Nvidia driver version: 516.94
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 48 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 24
On-line CPU(s) list: 0-23
Vendor ID: AuthenticAMD
Model name: AMD Ryzen 9 5900X 12-Core Processor
CPU family: 25
Model: 33
Thread(s) per core: 2
Core(s) per socket: 12
Socket(s): 1
Stepping: 0
BogoMIPS: 7400.01
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl tsc_reliable nonstop_tsc cpuid extd_apicid pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 erms rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves clzero xsaveerptr arat umip vaes vpclmulqdq rdpid fsrm
Hypervisor vendor: Microsoft
Virtualization type: full
L1d cache: 384 KiB (12 instances)
L1i cache: 384 KiB (12 instances)
L2 cache: 6 MiB (12 instances)
L3 cache: 32 MiB (1 instance)
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Retbleed: Not affected
Vulnerability Spec store bypass: Vulnerable
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Retpolines, IBPB conditional, IBRS_FW, STIBP conditional, RSB filling, PBRSB-eIBRS Not affected
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] mypy-extensions==1.0.0
[pip3] numpy==1.24.1
[pip3] pytorch-triton==2.0.0+d54c04abe2
[pip3] torch==2.0.0.dev20230220+cu118
[pip3] torchaudio==2.0.0.dev20230223+cu118
[pip3] torchinfo==1.7.2
[pip3] torchsummary==1.5.1
[pip3] torchvision==0.15.0.dev20230223+cu118
[conda] Could not collect
```
cc @ezyang @gchanan @zou3519 @vincentqb @jbschlosser @albanD @janeyx99
| cc @crcrpar This is pretty concerning--do you have any thoughts? Also, we most likely want to add AMP x fused adam(w) unit tests as it looks to be a common source of wrongness.
I'll look into it for sure.
though just so you know, there's a case of fused ones with grad scalers: https://github.com/pytorch/pytorch/blob/e5a959a2d49e627905c2ad58314a85c9e2f2c1df/test/test_cuda.py#L2462
> Can anyone explain what the warning means? Doesn't seem to be related to the issue, but is it a user error that should be fixed?
```
[2023-03-01 15:19:43,485] torch._inductor.utils: [WARNING] using triton random, expect difference from eager
/projects/venv/lib/python3.10/site-packages/torch/_functorch/aot_autograd.py:1251: UserWarning: Your compiler for AOTAutograd is returning a a function that doesn't take boxed arguments. Please wrap it with functorch.compile.make_boxed_func or handle the boxed arguments yourself. See https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670 for rationale.
warnings.warn(
```
@oddlama nope - the first warnings is just letting you know that the results of operators involving randomness won't give identical results across eager mode and `torch.compile`. The second warning is a bug that you can ignore - it was recently fixed at https://github.com/pytorch/pytorch/pull/95521 | 2023-03-29T15:50:29 |
pytorch/pytorch | 97,886 | pytorch__pytorch-97886 | [
"96755"
] | c263bd43e8e8502d4726643bc6fd046f0130ac0e | diff --git a/torch/cuda/amp/grad_scaler.py b/torch/cuda/amp/grad_scaler.py
--- a/torch/cuda/amp/grad_scaler.py
+++ b/torch/cuda/amp/grad_scaler.py
@@ -286,7 +286,9 @@ def unscale_(self, optimizer):
def _maybe_opt_step(self, optimizer, optimizer_state, *args, **kwargs):
retval = None
- if not sum(v.item() for v in optimizer_state["found_inf_per_device"].values()):
+ # NOTE(crcrpar): Gradients could be inf/nan after `GradScaler.unscale_(optimizer)`
+ # especially when gradient clipping is applied.
+ if not sum(v.item() for v in self._check_inf_per_device(optimizer).values()):
retval = optimizer.step(*args, **kwargs)
return retval
| diff --git a/test/test_cuda.py b/test/test_cuda.py
--- a/test/test_cuda.py
+++ b/test/test_cuda.py
@@ -2512,6 +2512,43 @@ def _grad_scaling_autocast_fused_optimizers(self, optimizer_ctor, optimizer_kwar
actual = actual.squeeze()
self.assertEqual(state_control[k], actual)
+ def test_grads_invalidated_between_unscale_and_step(self):
+ for optimizer_ctor, optimizer_kwargs in product(
+ (torch.optim.Adam, torch.optim.AdamW),
+ (
+ {"foreach": False, "fused": False},
+ {"foreach": True, "fused": False},
+ {"foreach": False, "fused": True},
+ ),
+ ):
+ with self.subTest(optimizer=optimizer_ctor, optimizer_kwargs=optimizer_kwargs):
+ self._test_grads_invalidated_between_unscale_and_step(optimizer_ctor, optimizer_kwargs)
+
+ def _test_grads_invalidated_between_unscale_and_step(self, optimizer_ctor, optimizer_kwargs):
+ model, _, optimizer, _, data, loss_fn, _ = self._create_scaling_case(
+ optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs,
+ )
+ scaler = torch.cuda.amp.GradScaler(init_scale=128.0)
+
+ orig_params = [p.clone().detach() for p in model.parameters()]
+
+ for i, (input, target) in enumerate(data):
+ optimizer.zero_grad()
+ with torch.autocast('cuda', enabled=True):
+ output = model(input)
+ loss = loss_fn(output, target)
+ scaler.scale(loss).backward()
+ scaler.unscale_(optimizer)
+
+ # deliberately break grads
+ for j, param in enumerate(model.parameters()):
+ param.grad.copy_(torch.inf if j % 2 else torch.nan)
+
+ scaler.step(optimizer)
+ scaler.update()
+
+ self.assertEqual(orig_params, list(model.parameters()))
+
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
| Fused AdamW has worse loss than Apex and unfused AdamW for fp16/AMP
### 🐛 Describe the bug
@stas00 recently benchmarked on an 80GB A100 and discovered fused AdamW is worse than both the Apex equivalent and the default implementation. Moreover, fused AdamW is not more performant in this case.
| Variation | Train samples per second | Diff % | Train loss |
|:--------------------------|----------:|-------:|--------:|
| --optim adamw_torch_fused | 389.41 | 0 | 2.66 |
| --optim adamw_torch | 389.37 | 0 | 2.55 |
| --optim adamw_apex_fused | 399.27 | 3 | 2.53 |
Repro:
See @stas00's comment below!
The batch size (--per_device_train_batch_size) can be adjusted to be smaller.
cc @ezyang @gchanan @zou3519 @vincentqb @jbschlosser @albanD @crcrpar
### Versions
Master/latest nightly
| 2023-03-29T15:50:32 |
|
pytorch/pytorch | 98,381 | pytorch__pytorch-98381 | [
"97720"
] | 3aae95a884fac86412ddbba0e8f78bf7e11ffdda | diff --git a/torch/_dynamo/variables/nn_module.py b/torch/_dynamo/variables/nn_module.py
--- a/torch/_dynamo/variables/nn_module.py
+++ b/torch/_dynamo/variables/nn_module.py
@@ -399,12 +399,28 @@ def gen_source(source, name):
)
elif name == "__getitem__":
assert not kwargs and len(args) == 1
- assert type(module).__getitem__ in (
+ builtin_supported = (
torch.nn.ModuleDict.__getitem__,
torch.nn.ModuleList.__getitem__,
torch.nn.ParameterList.__getitem__,
torch.nn.Sequential.__getitem__,
- ), typestr(module)
+ )
+
+ if type(module).__getitem__ not in builtin_supported:
+ assert isinstance(args[0], variables.ConstantVariable), typestr(args[0])
+ key = args[0].as_python_constant()
+ assert isinstance(key, (str, int))
+ fn = getattr(module, name).__func__
+
+ assert isinstance(fn, types.FunctionType)
+
+ src = AttrSource(AttrSource(self.source, name), "__func__")
+ return tx.inline_user_function_return(
+ variables.UserFunctionVariable(fn, source=src, **options),
+ [self] + list(args),
+ kwargs,
+ )
+
assert self.source
if isinstance(args[0], SliceVariable):
| diff --git a/test/dynamo/test_modules.py b/test/dynamo/test_modules.py
--- a/test/dynamo/test_modules.py
+++ b/test/dynamo/test_modules.py
@@ -295,6 +295,31 @@ def forward(self, x):
return x
+class CustomGetItemModuleList(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.layers = torch.nn.ModuleList(
+ [
+ torch.nn.Linear(10, 10),
+ torch.nn.ReLU(),
+ torch.nn.Linear(10, 10),
+ torch.nn.ReLU(),
+ ]
+ )
+
+ def __getitem__(self, idx: int):
+ return self.layers[idx]
+
+ def __len__(self) -> int:
+ return len(self.layers)
+
+ def forward(self, x):
+ for i in range(len(self)):
+ x = self[i](x)
+
+ return x
+
+
class ModuleDict(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -310,6 +335,23 @@ def forward(self, x):
return x
+class CustomGetItemModuleDict(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.layers = torch.nn.ModuleDict(
+ {
+ "0": torch.nn.Linear(10, 10),
+ }
+ )
+
+ def __getitem__(self, key: str) -> torch.nn.Module:
+ return self.layers[key]
+
+ def forward(self, x):
+ x = self["0"](x)
+ return x
+
+
class TensorList(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -728,7 +770,9 @@ class NNModuleTests(torch._dynamo.test_case.TestCase):
test_cfgmod = make_test(CfgModule())
test_stringmember = make_test(StringMember())
test_modulelist = make_test(ModuleList())
+ test_modulelist = make_test(CustomGetItemModuleList())
test_moduledict = make_test(ModuleDict())
+ test_moduledict = make_test(CustomGetItemModuleDict())
test_super1 = make_test(SuperModule())
test_super2 = make_test(SuperModule2())
test_super_class_method = make_test(SuperChildCallsClassMethod())
| TorchDynamo does not support modules with custom __getitem__ implementation
### 🐛 Describe the bug
When writing a custom module which implements the __getitem__ method dynamo fails with AssertionError.
For more context on this (e.g. why a custom module for this) see: https://github.com/pytorch/pytorch/issues/71203
and
https://github.com/ludwig-ai/ludwig/blob/master/ludwig/features/feature_utils.py#L150
cc @ezyang @soumith @msaroufim @wconstab @ngimel @bdhirsh @voznesenskym @penguinwu @anijain2305 @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @Xia-Weiwen @wenzhe-nrv @jiayisunx @desertfire @jansel @agunapal
### Error logs
```
Traceback (most recent call last):
File "/Users/mreso/Projects/playground/ludwig/moduledict.py", line 31, in <module>
cfoo(1)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 82, in forward
return self.dynamo_ctx(self._orig_mod.forward)(*args, **kwargs)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 209, in _fn
return fn(*args, **kwargs)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 337, in catch_errors
return callback(frame, cache_size, hooks)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py", line 404, in _convert_frame
result = inner_convert(frame, cache_size, hooks)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py", line 104, in _fn
return fn(*args, **kwargs)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py", line 262, in _convert_frame_assert
return _compile(
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/utils.py", line 163, in time_wrapper
r = func(*args, **kwargs)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py", line 324, in _compile
out_code = transform_code_object(code, transform)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py", line 445, in transform_code_object
transformations(instructions, code_options)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py", line 311, in transform
tracer.run()
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py", line 1726, in run
super().run()
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py", line 576, in run
and self.step()
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py", line 540, in step
getattr(self, inst.opname)(inst)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py", line 342, in wrapper
return inner_fn(self, inst)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py", line 148, in impl
self.push(fn_var.call_function(self, self.popn(nargs), {}))
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/variables/builtin.py", line 566, in call_function
result = handler(tx, *args, **kwargs)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/variables/builtin.py", line 790, in call_getitem
return args[0].call_method(tx, "__getitem__", args[1:], kwargs)
File "/Users/mreso/miniconda3/envs/pt2/lib/python3.10/site-packages/torch/_dynamo/variables/nn_module.py", line 402, in call_method
assert type(module).__getitem__ in (
AssertionError: FeatureDict
from user code:
File "/Users/mreso/Projects/playground/ludwig/moduledict.py", line 25, in forward
return self.ld["key1"](x)
Set torch._dynamo.config.verbose=True for more information
You can suppress this exception and fall back to eager by setting:
torch._dynamo.config.suppress_errors = True
```
### Minified repro
```python
import torch
class FeatureDict(torch.nn.Module):
#Torch ModuleDict wrapper that permits keys with any name.
def __init__(self):
super().__init__()
self.module_dict = torch.nn.ModuleDict()
def __getitem__(self, key) -> torch.nn.Module:
return self.module_dict[key]
def __setitem__(self, key: str, module: torch.nn.Module) -> None:
self.module_dict[key] = module
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.ld = FeatureDict()
self.ld["key1"] = torch.nn.Linear(1,1)
def forward(self, x):
return self.ld["key1"](x)
foo = Foo()
cfoo = torch.compile(foo)
cfoo(1)
```
### Versions
```
Collecting environment information...
PyTorch version: 2.0.0+cpu
Is debug build: False
CUDA used to build PyTorch: Could not collect
ROCM used to build PyTorch: N/A
OS: Ubuntu 18.04.6 LTS (x86_64)
GCC version: (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0
Clang version: Could not collect
CMake version: version 3.21.3
Libc version: glibc-2.27
Python version: 3.9.16 (main, Mar 8 2023, 14:00:05) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.4.0-1097-aws-x86_64-with-glibc2.27
Is CUDA available: False
CUDA runtime version: 10.0.130
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: Could not collect
Nvidia driver version: Could not collect
cuDNN version: Probably one of the following:
/usr/local/cuda-10.1/targets/x86_64-linux/lib/libcudnn.so.7.6.5
/usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn.so.7.6.5
/usr/local/cuda-11.0/targets/x86_64-linux/lib/libcudnn.so.8.0.5
/usr/local/cuda-11.0/targets/x86_64-linux/lib/libcudnn_adv_infer.so.8.0.5
/usr/local/cuda-11.0/targets/x86_64-linux/lib/libcudnn_adv_train.so.8.0.5
/usr/local/cuda-11.0/targets/x86_64-linux/lib/libcudnn_cnn_infer.so.8.0.5
/usr/local/cuda-11.0/targets/x86_64-linux/lib/libcudnn_cnn_train.so.8.0.5
/usr/local/cuda-11.0/targets/x86_64-linux/lib/libcudnn_ops_infer.so.8.0.5
/usr/local/cuda-11.0/targets/x86_64-linux/lib/libcudnn_ops_train.so.8.0.5
/usr/local/cuda-11.1/targets/x86_64-linux/lib/libcudnn.so.8.0.5
/usr/local/cuda-11.1/targets/x86_64-linux/lib/libcudnn_adv_infer.so.8.0.5
/usr/local/cuda-11.1/targets/x86_64-linux/lib/libcudnn_adv_train.so.8.0.5
/usr/local/cuda-11.1/targets/x86_64-linux/lib/libcudnn_cnn_infer.so.8.0.5
/usr/local/cuda-11.1/targets/x86_64-linux/lib/libcudnn_cnn_train.so.8.0.5
/usr/local/cuda-11.1/targets/x86_64-linux/lib/libcudnn_ops_infer.so.8.0.5
/usr/local/cuda-11.1/targets/x86_64-linux/lib/libcudnn_ops_train.so.8.0.5
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 16
On-line CPU(s) list: 0-15
Thread(s) per core: 2
Core(s) per socket: 8
Socket(s): 1
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 85
Model name: Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz
Stepping: 7
CPU MHz: 3600.005
BogoMIPS: 5999.99
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 32K
L1i cache: 32K
L2 cache: 1024K
L3 cache: 36608K
NUMA node0 CPU(s): 0-15
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single pti fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves ida arat pku ospke
Versions of relevant libraries:
[pip3] numpy==1.24.1
[pip3] torch==2.0.0+cpu
[pip3] torchaudio==2.0.1+cpu
[pip3] torchvision==0.15.1+cpu
[conda] numpy 1.24.1 pypi_0 pypi
[conda] torch 2.0.0+cpu pypi_0 pypi
[conda] torchaudio 2.0.1+cpu pypi_0 pypi
[conda] torchvision 0.15.1+cpu pypi_0 pypi
```
| We can support them by inlining the ```__getitem__``` function, but need to update guards, seems non-trivial. For a quick fix, we should remove the assertion and fallback to python for unsupported cases. | 2023-04-05T03:28:20 |
pytorch/pytorch | 98,523 | pytorch__pytorch-98523 | [
"96975"
] | 8963d77163e50ec5156972b6a867cb51e7751585 | diff --git a/torch/utils/data/dataloader.py b/torch/utils/data/dataloader.py
--- a/torch/utils/data/dataloader.py
+++ b/torch/utils/data/dataloader.py
@@ -34,7 +34,6 @@
Dataset,)
from torch.utils.data.datapipes.datapipe import _IterDataPipeSerializationWrapper, _MapDataPipeSerializationWrapper
-from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES
from . import _utils
@@ -104,6 +103,7 @@ def _get_distributed_settings():
def _sharding_worker_init_fn(worker_init_fn, world_size, rank_id, worker_id):
+ global_worker_id = worker_id
info = torch.utils.data.get_worker_info()
assert info is not None
total_workers = info.num_workers
@@ -111,10 +111,10 @@ def _sharding_worker_init_fn(worker_init_fn, world_size, rank_id, worker_id):
assert isinstance(datapipe, (IterDataPipe, MapDataPipe))
# To distribute elements across distributed process evenly, we should shard data on distributed
# processes first then shard on worker processes
- torch.utils.data.graph_settings.apply_sharding(
- datapipe, world_size, rank_id, sharding_group=SHARDING_PRIORITIES.DISTRIBUTED)
- torch.utils.data.graph_settings.apply_sharding(
- datapipe, total_workers, worker_id, sharding_group=SHARDING_PRIORITIES.MULTIPROCESSING)
+ total_workers *= world_size
+ global_worker_id = global_worker_id * world_size + rank_id
+ # For BC, use default SHARDING_PRIORITIES
+ torch.utils.data.graph_settings.apply_sharding(datapipe, total_workers, global_worker_id)
if worker_init_fn is not None:
worker_init_fn(worker_id)
@@ -666,8 +666,8 @@ def __init__(self, loader):
# Adds forward compatibilities so classic DataLoader can work with DataPipes:
# Taking care of distributed sharding
if isinstance(self._dataset, (IterDataPipe, MapDataPipe)):
- torch.utils.data.graph_settings.apply_sharding(
- self._dataset, self._world_size, self._rank, sharding_group=SHARDING_PRIORITIES.DISTRIBUTED)
+ # For BC, use default SHARDING_PRIORITIES
+ torch.utils.data.graph_settings.apply_sharding(self._dataset, self._world_size, self._rank)
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last)
diff --git a/torch/utils/data/datapipes/iter/sharding.py b/torch/utils/data/datapipes/iter/sharding.py
--- a/torch/utils/data/datapipes/iter/sharding.py
+++ b/torch/utils/data/datapipes/iter/sharding.py
@@ -13,15 +13,18 @@
"ShardingFilterIterDataPipe",
]
+
class SHARDING_PRIORITIES(IntEnum):
DEFAULT = 1
DISTRIBUTED = 2
MULTIPROCESSING = 3
+
class _ShardingIterDataPipe(IterDataPipe):
- def apply_sharding(self, num_of_instances, instance_id, sharding_group):
+ def apply_sharding(self, num_of_instances: int, instance_id: int, sharding_group: SHARDING_PRIORITIES):
raise NotImplementedError
+
@functional_datapipe('sharding_filter')
class ShardingFilterIterDataPipe(_ShardingIterDataPipe):
r"""
diff --git a/torch/utils/data/graph_settings.py b/torch/utils/data/graph_settings.py
--- a/torch/utils/data/graph_settings.py
+++ b/torch/utils/data/graph_settings.py
@@ -35,6 +35,14 @@ def _get_all_graph_pipes_helper(graph: DataPipeGraph, id_cache: Set[int]) -> Lis
return results
+def _is_sharding_datapipe(datapipe: DataPipe) -> bool:
+ if isinstance(datapipe, _ShardingIterDataPipe):
+ return True
+ if hasattr(datapipe, "apply_sharding") and inspect.ismethod(datapipe.apply_sharding):
+ return True
+ return False
+
+
def apply_sharding(datapipe: DataPipe,
num_of_instances: int,
instance_id: int,
@@ -48,11 +56,16 @@ def apply_sharding(datapipe: DataPipe,
def _helper(graph, prev_applied=None):
for _, (dp, sub_graph) in graph.items():
applied = None
- if isinstance(dp, _ShardingIterDataPipe):
+ if _is_sharding_datapipe(dp):
if prev_applied is not None:
raise RuntimeError("Sharding twice on a single pipeline is likely unintended and will cause data loss. "
f"Sharding already applied to {prev_applied} while trying to apply to {dp}")
- dp.apply_sharding(num_of_instances, instance_id, sharding_group=sharding_group)
+ # For BC, only provide sharding_group if accepted
+ sig = inspect.signature(dp.apply_sharding)
+ if len(sig.parameters) < 3:
+ dp.apply_sharding(num_of_instances, instance_id)
+ else:
+ dp.apply_sharding(num_of_instances, instance_id, sharding_group=sharding_group)
applied = dp
if applied is None:
applied = prev_applied
| diff --git a/test/test_datapipe.py b/test/test_datapipe.py
--- a/test/test_datapipe.py
+++ b/test/test_datapipe.py
@@ -2656,6 +2656,23 @@ def test_circular_serialization_with_dill(self):
})}
self.assertEqual(res2, exp_res_2)
+
+class CustomShardingIterDataPipe(IterDataPipe):
+ def __init__(self, dp):
+ self.dp = dp
+ self.num_of_instances = 1
+ self.instance_id = 0
+
+ def apply_sharding(self, num_of_instances, instance_id):
+ self.num_of_instances = num_of_instances
+ self.instance_id = instance_id
+
+ def __iter__(self):
+ for i, d in enumerate(self.dp):
+ if i % self.num_of_instances == self.instance_id:
+ yield d
+
+
class TestSharding(TestCase):
def _get_pipeline(self):
@@ -2762,6 +2779,12 @@ def construct_sharded_pipe():
with self.assertRaises(Exception):
dp.apply_sharding(2, 1, sharding_group=LEGACY_SHARDING_PRIORITIES.DEFAULT)
+ def test_legacy_custom_sharding(self):
+ dp = self._get_pipeline()
+ sharded_dp = CustomShardingIterDataPipe(dp)
+ torch.utils.data.graph_settings.apply_sharding(sharded_dp, 3, 1)
+ items = list(sharded_dp)
+ self.assertEqual([1, 20], items)
def test_sharding_length(self):
numbers_dp = dp.iter.IterableWrapper(range(13))
@@ -2796,6 +2819,19 @@ def test_old_dataloader(self):
self.assertEqual(sorted(expected), sorted(items))
+ def test_legacy_custom_sharding_with_old_dataloader(self):
+ dp0 = self._get_pipeline()
+ expected = list(dp0)
+
+ dp0 = self._get_pipeline()
+ dp0 = CustomShardingIterDataPipe(dp0)
+ dl = DataLoader(dp0, batch_size=1, shuffle=False, num_workers=2)
+ items = []
+ for i in dl:
+ items.append(i)
+
+ self.assertEqual(sorted(expected), sorted(items))
+
def test_multi_sharding(self):
# Raises Error when multiple sharding on the single branch
numbers_dp = dp.iter.IterableWrapper(range(13))
| (bug)(torch2.0/datapipes) Potentially backwards incompatible change with DataLoader and `is_shardable` Datapipes
### 🐛 Describe the bug
[PR-94095](https://github.com/pytorch/pytorch/pull/94095) breaks BC for custom `DataPipe` implementations that implement `is_shardable()` and `apply_sharding()`.
The BC breaking change is that the `apply_sharding()` method is not called on the datapipe from the `DataLoader` unless the datapipe is a subclas of `_ShardingIterDataPipe`. Therefore, on custom datapipes that implement `is_shardable()` and `apply_sharding()` you end up getting duplicate elements in the batch when using with a dataloader with `num_workers > 1`.
(please correct me if I'm wrong but) I didn't see any mention of this BC breaking change in the release notes for:
1. `torchdata-0.6.0`: https://github.com/pytorch/data/releases
2. `torch-2.0`: https://github.com/pytorch/pytorch/releases/tag/v2.0.0
Since the PR introduces a "private" (`_ShardingIterDataPipe`) it makes it unnatural to subclass from existing custom sharded datapipes. I believe that the intent of the PR was to have users use `ShardingFilterIterDataPipe` (e.g. chain the source pipe with `.sharding_filter()`) but this may be unfeasible due to:
1. Custom sharding strategies where `sharding_filter`'s modulo-n style sharding does not suffice.
2. Need to change all the call-sites to have callers additionally call `my_datapipe().sharding_filter()` as `my_datapipe` is not longer implicitly sharded.
I'm wondering what the best next steps are? Subclass `_ShardingIterDataPipe`? or fix the BC break (we could use a `@runtime_checkable` as:
```
@runtime_checkable
class Shardable(Protocol):
"""
Indicates whether a datapipe is shardable. A shardable datapipe distributes (shards) the data records
amongst the dataloader worker instances in such a way that the same record is not processed twice
when the dataloader ``num_workers > 1``.
.. note::
A shardable datapipe must implement BOTH the indicator method ``is_shardable()``
AND the sharding logic ``apply_sharding(num_instances, instance_id)``.
Use with ``isinstance()`` to check whether a datapipe is shardable
.. doctest::
>>> from torchdata.datapipes.iter import IterableWrapper
>>> from ape.datapipes import Shardable
>>> dp = IterableWrapper([1,2,3]).sharding_filter()
>>> isinstance(dp, Shardable)
True
"""
def is_shardable(self) -> bool: # pragma: no cover
...
def apply_sharding(
self, num_instances: int, instance_id: int
) -> None: # pragma: no cover
...
```
### Versions
ubuntu@ip-10-2-87-95% python collect_env.py ~/tmp
Collecting environment information...
PyTorch version: 2.0.0+cu117
Is debug build: False
CUDA used to build PyTorch: 11.7
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.5 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0
Clang version: Could not collect
CMake version: version 3.26.0
Libc version: glibc-2.31
Python version: 3.9.15 (main, Feb 13 2023, 21:22:03) [GCC 9.4.0] (64-bit runtime)
Python platform: Linux-5.15.0-1015-aws-x86_64-with-glibc2.31
Is CUDA available: True
CUDA runtime version: 11.6.124
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration:
GPU 0: NVIDIA A100-SXM4-40GB
GPU 1: NVIDIA A100-SXM4-40GB
GPU 2: NVIDIA A100-SXM4-40GB
GPU 3: NVIDIA A100-SXM4-40GB
GPU 4: NVIDIA A100-SXM4-40GB
GPU 5: NVIDIA A100-SXM4-40GB
GPU 6: NVIDIA A100-SXM4-40GB
GPU 7: NVIDIA A100-SXM4-40GB
Nvidia driver version: 510.73.08
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
Address sizes: 46 bits physical, 48 bits virtual
CPU(s): 96
On-line CPU(s) list: 0-95
Thread(s) per core: 2
Core(s) per socket: 24
Socket(s): 2
NUMA node(s): 2
Vendor ID: GenuineIntel
CPU family: 6
Model: 85
Model name: Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz
Stepping: 7
CPU MHz: 2999.998
BogoMIPS: 5999.99
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 1.5 MiB
L1i cache: 1.5 MiB
L2 cache: 48 MiB
L3 cache: 71.5 MiB
NUMA node0 CPU(s): 0-23,48-71
NUMA node1 CPU(s): 24-47,72-95
Vulnerability Itlb multihit: KVM: Mitigation: VMX unsupported
Vulnerability L1tf: Mitigation; PTE Inversion
Vulnerability Mds: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Vulnerability Meltdown: Mitigation; PTI
Vulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
Vulnerability Spec store bypass: Vulnerable
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Retpolines, STIBP disabled, RSB filling
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq monitor ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single pti fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves ida arat pku ospke
Versions of relevant libraries:
[pip3] mypy==0.991
[pip3] mypy-boto3-batch==1.26.34
[pip3] mypy-boto3-ec2==1.26.71
[pip3] mypy-boto3-iam==1.26.62
[pip3] mypy-boto3-s3==1.26.62
[pip3] mypy-extensions==1.0.0
[pip3] numpy==1.24.2
[pip3] torch==2.0.0
[pip3] torch-tb-profiler==0.4.1
[pip3] torchdata==0.6.0
[pip3] torchmetrics==0.11.1
[pip3] torchsnapshot-nightly==2023.2.17
[pip3] torchx==0.5.0.dev0
[pip3] triton==2.0.0
[conda] No relevant packages
cc @ezyang @gchanan @zou3519 @SsnL @VitalyFedyunin @ejguan @NivekT @dzhulgakov
| cc) @wenleix
@kiukchung To unblock you ASAP, would you pls subclass from `_ShardingIterDataPipe`? I plan to make `apply_sharding` running with `DataPipe` that with `apply_sharding` method.
And, yes. We should include it TorchData's release note for BC breaking. cc: @NivekT
Thanks @ejguan! That's exactly what I've done to get myself unblocked for now. But I found a few more interesting quirks when I subclassed `_ShardingIterDataPipe` that might be worth documenting in the instructions to "migrate" over to the new shardable paradigm:
1. The `is_shardable()` method is now gone (things will work if you forget to remove it, but for cleanliness you should remove it).
2. The `apply_sharding()` method introduces a new `sharding_group` parameter to the method signature. At first glance, the method signature is BC since `sharding_group` defaults to `SHARDING_PRIORITIES.DEFAULT`. But the way `DataLoader` calls `apply_sharding()` breaks BC if you don't modify the `apply_sharding()` logic. Prior to 2.0, the DataLoader would call `apply_sharding()` once if `is_shardable` attribute existed and returned `True`. And the `num_instances` and `instance_id` would already account for process group rank and world_size. In 2.0 the dataloader calls `apply_sharding()` twice and what gets passed as the `instance_id` and `num_workers` parameter is dependent on the `sharding_group`:
a. IF `sharding_group=MULTIPROCESSING` then instance_id and num_workers is the dataloader worker's rank and num_workers (on the host).
b. IF `sharding_group=DISTRIBUTED` then instance_id and num_workers is the process group's (trainer's) rank and world size
So in essence if my prior implementation looked like this:
```
def apply_sharding(self, num_instances, instance_id):
self.global_world_size = num_instances # world_size * num_dataloader_workers
self.global_rank = instance_id # dataloader_worker_rank * num_dataloader_workers + rank * world_size
# sharding would be done by
shard_idx % self.global_world_size == self.global_rank
```
It would NOT be enough to simply add the `sharding_group` param, but rather it has to change to account for the two calls that the dataloader worker will make:
```
def apply_sharding(self, num_instances, instance_id, sharding_group=SHARDING_PRIORITY.DEFAULT):
if sharding_group == SHARDING_PRIORITY.MULTIPROCESSING:
self.instance_id = instance_id if self.instance_id
self.num_instances = num_instances
elif sharding_group == SHARDING_PRIORITY.DISTRIBUTED:
self.rank = rank
self.world_size = world_size
else: # SHARDING_PRIORITY.DEFAULT
self.global_world_size = num_instances
self.global_rank = instance_id
# sharding would be done by
if self.global_world_size == -1:
self.global_world_size = self.num_instances * self.world_size
if self.global_rank == -1:
self.global_rank = self.instance_id * self.num_instances + self.rank * self.world_size
shard_idx % self.global_world_size == self.global_rank
```
@kiukchung Sorry for the inconvenience, we should realize it when the change is made in https://github.com/pytorch/pytorch/pull/88424
Since we have already made this BC-breaking change, to resolve this issue going forward, we will do the follow work:
1. Update the release note from TorchData to reflect those changes
2. Update `apply_sharding` to accept any DataPipe with `apply_sharding` method tracked in https://github.com/pytorch/data/issues/1081
- Add a feature to inspect the `dp.apply_sharding` method. IF the last `sharding_group` is not acceptable, we will treat it as `DEFAULT` and skip sending this extra argument to the `dp.apply_sharding`
BTW, are you still relying on `is_shardable` to determine if you want to apply sharding to the DataPipe?
@ejguan thanks for the quick updates! sounds like a plan for me. I'm currently unblocked since I went ahead and subclassed from `_ShardingIterDataPipe`.
> are you still relying on is_shardable to determine if you want to apply sharding to the DataPipe?
Not anymore, I removed the `is_shardable()` method since it is no longer used by the DataLoader. I don't manually call the `apply_sharding()` method anywhere.
I used to have a runtime_checkable Protocol called `Shardable` (as shown at the top) but this is also obsolete since I can always do an `isinstance(_ShardingIterDataPipe, pipe)` if needed (but I don't have a use-case for this other than in certain sanity tests).
I'll comment on https://github.com/pytorch/data/issues/1081 for specifics on the forward fix.
Closing in favor of tracking https://github.com/pytorch/data/issues/1081 since I have a workaround for now.
I am actually thinking about forward fixing it directly since PyTorch might do a minor release.
@ejguan @kiukchung Please note:
This issue is in the milestones :[ https://github.com/pytorch/pytorch/milestone/36?closed=1](https://github.com/pytorch/pytorch/milestone/36?closed=1), if you want to see your fix included in this minor release. Please post it as a cherry-pick into the [[v2.0.1] Release Tracker](https://github.com/pytorch/pytorch/issues/97272).
**The deadline is April 14, 5PM PST.**
**Only issues that have ‘cherry-picks’ will be considered for the release.**
Common FAQs:
Q1: Where can I find more information on the release process and terminology?
A: [pytorch/RELEASE.md at master · pytorch/pytorch · GitHub](https://github.com/pytorch/pytorch/blob/master/RELEASE.md)
Q2: Am I guaranteed to be included in the cherry-pick if I do above?
A: No, it is not guaranteed, the Release Team will review all submissions against the listed criteria before making the final decision on what to include on 4/17.
Q3: When is 2.1 going to be released?
A: We do not have a formal date at this time but will update the community when we do. Our immediate focus is 2.0.1. Note that 1.12 was released on 6/28/22, 1.13 on 10/28/22 and 2.0 on 3/15/23.
Q4: **I missed the 4/14 5PM PST deadline, is there any option to have an extension?**
A: **No, in order to meet our 4/28 goal, we must hold 4/14 as our deadline and will not accept any requests after the fact. We are over communicating the timelines and process with the community to avoid such issues.**
Q5: Where should I double check to see if my issue is in the cherry pick tracker?
A: [[v2.0.1] Release Tracker · Issue #97272 · pytorch/pytorch · GitHub](https://github.com/pytorch/pytorch/issues/97272)
Q6: Where can I find the Release Compatibility Matrix for PyTorch?
A: [pytorch/RELEASE.md at master · pytorch/pytorch · GitHub](https://github.com/pytorch/pytorch/blob/master/RELEASE.md#release-compatibility-matrix)
Please contact OSS Releng team members if you have any questions/comments. Again we appreciate everyone’s time and commitment to the community, PyTorch and 2.0 and 2.01 releases!
Please refer to this post for more details: https://dev-discuss.pytorch.org/t/pytorch-release-2-0-1-important-information/1176 | 2023-04-06T18:06:29 |
pytorch/pytorch | 98,777 | pytorch__pytorch-98777 | [
"97207"
] | f08d20ade03ca2badc48a0eb430ba72cf9cdd4c9 | diff --git a/torch/storage.py b/torch/storage.py
--- a/torch/storage.py
+++ b/torch/storage.py
@@ -306,14 +306,37 @@ def _isint(x):
else:
return isinstance(x, int)
+_always_warn_typed_storage_removal = False
+
+def _get_always_warn_typed_storage_removal():
+ return _always_warn_typed_storage_removal
+
+def _set_always_warn_typed_storage_removal(always_warn):
+ global _always_warn_typed_storage_removal
+ assert isinstance(always_warn, bool)
+ _always_warn_typed_storage_removal = always_warn
+
def _warn_typed_storage_removal(stacklevel=2):
- message = (
- "TypedStorage is deprecated. It will be removed in the future and "
- "UntypedStorage will be the only storage class. This should only matter "
- "to you if you are using storages directly. To access UntypedStorage "
- "directly, use tensor.untyped_storage() instead of tensor.storage()"
- )
- warnings.warn(message, UserWarning, stacklevel=stacklevel + 1)
+ global _always_warn_typed_storage_removal
+
+ def is_first_time():
+ if not hasattr(_warn_typed_storage_removal, 'has_warned'):
+ return True
+ else:
+ return not _warn_typed_storage_removal.__dict__['has_warned']
+
+ if _get_always_warn_typed_storage_removal() or is_first_time():
+ message = (
+ "TypedStorage is deprecated. It will be removed in the future and "
+ "UntypedStorage will be the only storage class. This should only matter "
+ "to you if you are using storages directly. To access UntypedStorage "
+ "directly, use tensor.untyped_storage() instead of tensor.storage()"
+ )
+ warnings.warn(message, UserWarning, stacklevel=stacklevel + 1)
+ _warn_typed_storage_removal.__dict__['has_warned'] = True
+
+def _reset_warn_typed_storage_removal():
+ _warn_typed_storage_removal.__dict__['has_warned'] = False
class TypedStorage:
is_sparse = False
| diff --git a/test/test_torch.py b/test/test_torch.py
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -37,7 +37,8 @@
skipCUDAMemoryLeakCheckIf, BytesIOContext,
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
- skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like)
+ skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
+ AlwaysWarnTypedStorageRemoval)
from multiprocessing.reduction import ForkingPickler
from torch.testing._internal.common_device_type import (
expectedFailureMeta,
@@ -6714,15 +6715,39 @@ def test_typed_storage_deprecation_warning(self):
# Check that each of the TypedStorage function calls produce a warning
# if warnings are reset between each
for f in funcs:
- with warnings.catch_warnings(record=True) as w:
- warnings.resetwarnings()
- f()
- self.assertEqual(len(w), 1, msg=str([str(a) for a in w]))
- warning = w[0].message
- self.assertTrue(warning, DeprecationWarning)
- self.assertTrue(re.search(
- '^TypedStorage is deprecated',
- str(warning)))
+ with AlwaysWarnTypedStorageRemoval(True):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.resetwarnings()
+ f()
+ self.assertEqual(len(w), 1, msg=str([str(a) for a in w]))
+ warning = w[0].message
+ self.assertTrue(warning, DeprecationWarning)
+ self.assertTrue(re.search(
+ '^TypedStorage is deprecated',
+ str(warning)))
+
+ # Test that only the first warning is raised by default
+ torch.storage._reset_warn_typed_storage_removal()
+ with warnings.catch_warnings(record=True) as w:
+ warnings.resetwarnings()
+ torch.FloatStorage()
+ torch.randn(10).storage()
+ self.assertEqual(len(w), 1, msg=str([str(a) for a in w]))
+ warning = w[0].message
+ self.assertTrue(re.search(
+ '^TypedStorage is deprecated',
+ str(warning)))
+ # Check the line of code from the warning's stack
+ with open(w[0].filename, encoding="utf-8") as f:
+ code_line = f.readlines()[w[0].lineno - 1]
+ self.assertTrue(re.search(re.escape('torch.FloatStorage()'), code_line))
+
+ # Check that warnings are not emitted if it happened in the past
+ with warnings.catch_warnings(record=True) as w:
+ warnings.resetwarnings()
+ torch.FloatStorage()
+ torch.randn(10).storage()
+ self.assertEqual(len(w), 0, msg=str([str(a) for a in w]))
def test_from_file(self):
def assert_with_filename(filename):
diff --git a/torch/testing/_internal/common_utils.py b/torch/testing/_internal/common_utils.py
--- a/torch/testing/_internal/common_utils.py
+++ b/torch/testing/_internal/common_utils.py
@@ -1123,6 +1123,18 @@ def __exit__(self, exception_type, exception_value, traceback):
self.deterministic_restore,
warn_only=self.warn_only_restore)
+class AlwaysWarnTypedStorageRemoval:
+ def __init__(self, always_warn):
+ assert isinstance(always_warn, bool)
+ self.always_warn = always_warn
+
+ def __enter__(self):
+ self.always_warn_restore = torch.storage._get_always_warn_typed_storage_removal()
+ torch.storage._set_always_warn_typed_storage_removal(self.always_warn)
+
+ def __exit__(self, exception_type, exception_value, traceback):
+ torch.storage._set_always_warn_typed_storage_removal(self.always_warn_restore)
+
# Context manager for setting cuda sync debug mode and reset it
# to original value
# we are not exposing it to the core because sync debug mode is
| repeated warning `UserWarning: TypedStorage is deprecated`
### 🐛 Describe the bug
I noticed that the warning `UserWarning: TypedStorage is deprecated` that results from calling `Tensor.storage()` is issued repeatedly, even though the documentation about `torch.set_warn_always` says that by default pytorch warnings should only be issued once per process. While modifying my code to call `untyped_storage()` instead of `storage()` could get rid of the excessive warnings, I would prefer not to since I want to keep my code compatible with pre-2.0.0 versions for now, and `untyped_storage()` is only available >=2.0.0.
Example:
```
import torch
torch.tensor([1,2,3]).storage()
torch.tensor([1,2,3]).storage()
torch.tensor([1,2,3]).storage()
```
Output:
```
/path_to_file/test_warning.py:3: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage()
torch.tensor([1,2,3]).storage()
/path_to_file/test_warning.py:4: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage()
torch.tensor([1,2,3]).storage()
/path_to_file/test_warning.py:5: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage()
torch.tensor([1,2,3]).storage()
```
### Versions
```
Collecting environment information...
PyTorch version: 2.0.0
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 13.2.1 (arm64)
GCC version: Could not collect
Clang version: 12.0.0 (clang-1200.0.32.28)
CMake version: version 3.26.0
Libc version: N/A
Python version: 3.9.16 (main, Mar 1 2023, 12:19:04) [Clang 14.0.6 ] (64-bit runtime)
Python platform: macOS-13.2.1-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Apple M1 Pro
Versions of relevant libraries:
[pip3] numpy==1.24.2
[pip3] torch==2.0.0
[conda] numpy 1.24.2 pypi_0 pypi
[conda] torch 2.0.0 pypi_0 pypi
```
cc @albanD
| cc @kurtamohler could you fix that?
As a temporary workaround, you can silent this warning with regular python warning filter: `python -W ignore::UserWarning: foo.py`
The complication is that the warning message contains the line of code that triggered the warning, with `warnings.warn(..., stacklevel=...)`:
https://github.com/pytorch/pytorch/blob/0b094ca37f6ac0d3c1899653ea069dbaaf33176b/torch/storage.py#L365-L372
So one option is to remove the `stacklevel` arg. But having the stack in the message is helpful for finding where the warning came from (https://github.com/pytorch/pytorch/pull/89867)
Another option is to only emit the warning the first time `_warn_typed_storage_removal()` gets called. For testing purposes, we could have a flag to make it always warn. Does that seem reasonable?
cc @ezyang
Yeah, let's just have this warn only once and never again. If you're trying to scrub these, you can comment it out | 2023-04-10T19:43:35 |
pytorch/pytorch | 99,092 | pytorch__pytorch-99092 | [
"97409"
] | f08d20ade03ca2badc48a0eb430ba72cf9cdd4c9 | diff --git a/torch/nn/modules/activation.py b/torch/nn/modules/activation.py
--- a/torch/nn/modules/activation.py
+++ b/torch/nn/modules/activation.py
@@ -1229,20 +1229,27 @@ def merge_masks(self, attn_mask: Optional[Tensor], key_padding_mask: Optional[Te
check_other=False,
)
- if attn_mask is not None:
- mask_type = 0
- merged_mask = attn_mask
if key_padding_mask is not None:
mask_type = 1
merged_mask = key_padding_mask
- if (attn_mask is not None) and (key_padding_mask is not None):
+
+ if attn_mask is not None:
# In this branch query can't be a nested tensor, so it has a shape
batch_size, seq_len, _ = query.shape
mask_type = 2
- key_padding_mask_expanded = key_padding_mask.view(batch_size, 1, 1, seq_len) \
- .expand(-1, self.num_heads, -1, -1)
- attn_mask_expanded = attn_mask.view(1, 1, seq_len, seq_len).expand(batch_size, self.num_heads, -1, -1)
- merged_mask = attn_mask_expanded + key_padding_mask_expanded
+
+ # Always expands attn_mask to 4D
+ if attn_mask.dim() == 3:
+ attn_mask_expanded = attn_mask.view(batch_size, -1, seq_len, seq_len)
+ else: # attn_mask.dim() == 2:
+ attn_mask_expanded = attn_mask.view(1, 1, seq_len, seq_len).expand(batch_size, self.num_heads, -1, -1)
+ merged_mask = attn_mask_expanded
+
+ if key_padding_mask is not None:
+ key_padding_mask_expanded = key_padding_mask.view(batch_size, 1, 1, seq_len).expand(-1, self.num_heads, -1, -1)
+ merged_mask = attn_mask_expanded + key_padding_mask_expanded
+
+ # no attn_mask and no key_padding_mask, returns None, None
return merged_mask, mask_type
| diff --git a/test/test_transformers.py b/test/test_transformers.py
--- a/test/test_transformers.py
+++ b/test/test_transformers.py
@@ -34,7 +34,6 @@
if TEST_FAIRSEQ:
import fairseq.models.transformer as fairseq_transformer
-
@contextlib.contextmanager
def use_deterministic_algorithims(mode: bool, warn_only: bool):
r"""
@@ -155,6 +154,37 @@ def test_train_with_pad_and_catch_error(self, device):
l1_bool = nn.L1Loss()(test_train_bool[:, 0:2, :], test_eval_bool[:, 0:2, :]).item()
self.assertTrue(l1_bool < 1e-4, "Eval/Train difference in pad_mask BOOL")
+ @parametrize("device", device_list)
+ @parametrize("attn_mask_dim", [2, 3, None])
+ @parametrize("key_padding_mask_dim", [2, None])
+ def test_multiheadattention_fastpath_attn_mask(self, device, attn_mask_dim, key_padding_mask_dim):
+ with torch.no_grad():
+ B = 2
+ L = 4
+ D = 8
+ H = 4
+
+
+ if attn_mask_dim == 2:
+ attn_mask = torch.randn(L, L, device=device) > 0
+ elif attn_mask_dim == 3:
+ attn_mask = torch.randn(B * H, L, L, device=device) > 0
+ elif attn_mask_dim is None:
+ attn_mask = None
+
+ if key_padding_mask_dim == 2:
+ key_padding_mask = torch.randn(B, L, device=device) > 0
+ elif key_padding_mask_dim is None:
+ key_padding_mask = None
+
+ mha = nn.MultiheadAttention(D, H, batch_first=True, device=device)
+ X = torch.randn(B, L, D, device=device)
+
+ mha.train() # disable fast path
+ out, _ = mha(X, X, X, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)
+ mha.eval() # enable fast path
+ out, _ = mha(X, X, X, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)
+
@parametrize("device", device_list)
@parametrize("nhead", [1, 4, 8])
def test_transformerencoderlayer_src_mask(self, device, nhead):
| nn.MultiheadAttention breaks for mask_type=2 when fast path is enabled (e.g. with batched attn_mask)
### 🐛 Describe the bug
**TLDR:** When `nn.MultiheadAttention` is used with a batched `attn_mask` which [should be](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html) shape (N*H, L, S) (where S=L for self-attn) **_and_** fast path is enabled it crashes.
_It works as expected when fast path is **not** enabled_
### Minimal example to reproduce:
```python
import torch
import torch.nn as nn
with torch.no_grad():
B = 8
L = 100
D = 512
H = 8
mha = nn.MultiheadAttention(D, H, batch_first=True)
X = torch.randn(B, L, D)
M = torch.randn(B * H, L, L) > 0
mha.train() # disable fast path
out, _ = mha(X, X, X, attn_mask=M, need_weights=False) # works
mha.eval() # enable fast path
out, _ = mha(X, X, X, attn_mask=M, need_weights=False) # crashes
```
Gives:
```
RuntimeError: For mask_type == 2 mask shape should match input shape
```
I did a little debugging in the torch code around [here](https://github.com/pytorch/pytorch/blob/fe0afc58529bdf148bfda580a60e3d778b5b8f1d/torch/nn/modules/activation.py#L1176) and it seems that when `self.merge_masks` is called the `mask_type` returned is 0. However, when `torch._native_multi_head_attention` is called with `mask_type=0` but a (N*H, L, S) `attn_mask`, it seems to ignore the `mask_type` flag (see error above).
I'm on pytorch 2.0 (conda) on M2 Max macOS 13.2.1.
### Versions
Collecting environment information...
PyTorch version: 2.0.0
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: macOS 13.2.1 (arm64)
GCC version: Could not collect
Clang version: 14.0.0 (clang-1400.0.29.202)
CMake version: Could not collect
Libc version: N/A
Python version: 3.9.16 (main, Jan 11 2023, 10:02:19) [Clang 14.0.6 ] (64-bit runtime)
Python platform: macOS-13.2.1-arm64-arm-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Apple M2 Max
Versions of relevant libraries:
[pip3] lion-pytorch==0.0.4
[pip3] mypy-extensions==0.4.3
[pip3] numpy==1.23.5
[pip3] pytorch-lightning==2.0.0
[pip3] pytorch-wavelets==1.3.0
[pip3] torch==2.0.0
[pip3] torch-yin==0.1.3
[pip3] torchaudio==2.0.0
[pip3] torchmetrics==0.11.1
[pip3] xitorch==0.4.0.dev0+3327cc0
[conda] lion-pytorch 0.0.4 pypi_0 pypi
[conda] numpy 1.23.5 py39h1398885_0
[conda] numpy-base 1.23.5 py39h90707a3_0
[conda] pytorch 2.0.0 py3.9_0 pytorch
[conda] pytorch-lightning 2.0.0 pypi_0 pypi
[conda] pytorch-wavelets 1.3.0 pypi_0 pypi
[conda] torch-yin 0.1.3 pypi_0 pypi
[conda] torchaudio 2.0.0 py39_cpu pytorch
[conda] torchmetrics 0.11.1 pypi_0 pypi
[conda] xitorch 0.4.0.dev0+3327cc0 pypi_0 pypi
cc @ezyang @gchanan @zou3519 @jbschlosser @bhosmer @cpuhrsch @erichan1
| +1 having same issue
Not exactly the same, but I am also having issues with the fast path since upgrading to 2.0:
#97128
cc @mikekgfb
+1 having same issue
Marking this for high priority due to traffic to make sure it gets discussed during triage review. cc @jisaacso @drisspg @jbschlosser
@erichan1 - Once a fix is out can you mark it for cherry pick as well? cc @atalman
@erichan1 Please post a cherry pick by April 14, 5PM PST.
This issue is in the milestones :[ https://github.com/pytorch/pytorch/milestone/36?closed=1](https://github.com/pytorch/pytorch/milestone/36?closed=1), if you want to see your fix included in this minor release. Please post it as a cherry-pick into the [[v2.0.1] Release Tracker](https://github.com/pytorch/pytorch/issues/97272).
**The deadline is April 14, 5PM PST.**
**Only issues that have ‘cherry-picks’ will be considered for the release.**
Common FAQs:
Q1: Where can I find more information on the release process and terminology?
A: [pytorch/RELEASE.md at master · pytorch/pytorch · GitHub](https://github.com/pytorch/pytorch/blob/master/RELEASE.md)
Q2: Am I guaranteed to be included in the cherry-pick if I do above?
A: No, it is not guaranteed, the Release Team will review all submissions against the listed criteria before making the final decision on what to include on 4/17.
Q3: When is 2.1 going to be released?
A: We do not have a formal date at this time but will update the community when we do. Our immediate focus is 2.0.1. Note that 1.12 was released on 6/28/22, 1.13 on 10/28/22 and 2.0 on 3/15/23.
Q4: **I missed the 4/14 5PM PST deadline, is there any option to have an extension?**
A: **No, in order to meet our 4/28 goal, we must hold 4/14 as our deadline and will not accept any requests after the fact. We are over communicating the timelines and process with the community to avoid such issues.**
Q5: Where should I double check to see if my issue is in the cherry pick tracker?
A: [[v2.0.1] Release Tracker · Issue #97272 · pytorch/pytorch · GitHub](https://github.com/pytorch/pytorch/issues/97272)
Q6: Where can I find the Release Compatibility Matrix for PyTorch?
A: [pytorch/RELEASE.md at master · pytorch/pytorch · GitHub](https://github.com/pytorch/pytorch/blob/master/RELEASE.md#release-compatibility-matrix)
Please contact OSS Releng team members if you have any questions/comments. Again we appreciate everyone’s time and commitment to the community, PyTorch and 2.0 and 2.01 releases!
Please refer to this post for more details: https://dev-discuss.pytorch.org/t/pytorch-release-2-0-1-important-information/1176
Posted guidelines for a proper fix [here](https://github.com/pytorch/pytorch/pull/98375#issuecomment-1499504721) on the open PR. | 2023-04-13T21:55:01 |
pytorch/pytorch | 99,103 | pytorch__pytorch-99103 | [
"95892"
] | 34dd578b918ceb07dfa70f40f4e120e9f8f104fa | diff --git a/torch/distributed/checkpoint/optimizer.py b/torch/distributed/checkpoint/optimizer.py
--- a/torch/distributed/checkpoint/optimizer.py
+++ b/torch/distributed/checkpoint/optimizer.py
@@ -255,7 +255,8 @@ def load_sharded_optimizer_state_dict(
sharding_spec = ChunkShardingSpec(
dim=0,
placements=[
- f"rank:{i}/cuda:{i}" for i in range(dist.get_world_size())
+ f"rank:{i}/cuda:{i % torch.cuda.device_count()}"
+ for i in range(dist.get_world_size())
],
)
else:
| ```load_sharded_optimizer_state_dict``` error on multi node
### 🐛 Describe the bug
I am hitting the following error when loading optimizer using FSDP with 2 nodes. No issues when saving/loading optimizer with single node.
```
optim_state = load_sharded_optimizer_state_dict(
model_state_dict=state_dict["model"],
optimizer_key="optimizer",
storage_reader=checkpoint.FileSystemReader(checkpoint_dir),
)
```
Error:
```
optim_state = load_sharded_optimizer_state_dict(
File "/opt/conda/lib/python3.9/site-packages/torch/distributed/checkpoint/optimizer.py", line 281, in load_sharded_optimizer_state_dict
state_dict[key] = _shard_tensor(
File "/opt/conda/lib/python3.9/site-packages/torch/distributed/_shard/api.py", line 69, in _shard_tensor
st = sharding_spec.shard(tensor, src_rank=src_rank, process_group=process_group)
File "/opt/conda/lib/python3.9/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py", line 166, in shard
local_tensor = torch.empty(
RuntimeError: CUDA error: invalid device ordinal
CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
```
### Versions
```
PyTorch version: 2.0.0.dev20230227+cu117
```
cc @mrshenli @pritamdamania87 @zhaojuanmao @satgera @rohan-varma @gqchen @aazzolini @osalpekar @jiayisuse @H-Huang @kwen2501 @awgu
| cc @wz337 for distributed checkpoint issues
> RuntimeError: CUDA error: invalid device ordinal
Looks like the checkpoint was saved on a node with more devices than where it is loaded?
@cavdard
load_sharded_optimizer_state_dict needs to be used in conjunction with FSDP sharded optimizer state.
Question: did you set the state_dict_type to sharded_state_dict? Example: https://github.com/wz337/pytorch/blob/master/torch/distributed/checkpoint/examples/fsdp_checkpoint_example.py#L91
Encounter the same problem here.
It seems to try to create a tensor on device like `cuda:31`, which is a global rank index.
@wz337
Yes I am using `StateDictType.SHARDED_STATE_DICT`
```
with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT):
state_dict = {
"model": model.state_dict(),
"scheduler": scheduler.state_dict(),
.....
}
```
> Encounter the same problem here. It seems to try to create a tensor on device like `cuda:31`, which is a global rank index.
Could you provide a bit context of your setup as well? Thanks!
> > Encounter the same problem here. It seems to try to create a tensor on device like `cuda:31`, which is a global rank index.
>
> Could you provide a bit context of your setup as well? Thanks!
I used 8 nodes with 8 V100 on each node, and save/load the sharded state dict using the following code.
```python
with FSDP.state_dict_type(
model,
StateDictType.SHARDED_STATE_DICT,
):
state_dict = {
'model': model.state_dict(),
'optimizer': FSDP.optim_state_dict(model, optimizer)
}
save_state_dict(state_dict, FileSystemWriter(ckpt_path))
```
```python
with FSDP.state_dict_type(
model,
StateDictType.SHARDED_STATE_DICT,
):
storage_reader = FileSystemReader(ckpt_path)
state_dict = {
'model': model.state_dict(),
}
load_state_dict(state_dict, storage_reader)
state_dict |= load_sharded_optimizer_state_dict(
model_state_dict=state_dict['model'],
optimizer_key='optimizer',
storage_reader=storage_reader,
)
```
Error occurred during `load_sharded_optimizer_state_dict` call. | 2023-04-13T23:10:01 |
|
pytorch/pytorch | 108,259 | pytorch__pytorch-108259 | [
"108472"
] | c9cbdaf24fa35d8d3f51b2cc39c08fa404b720cb | diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -331,12 +331,13 @@ def write_header(self):
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
- from torch import empty_strided, as_strided, device
+ from torch import empty_strided, device
from {codecache.__name__} import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
aten = torch.ops.aten
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
+ reinterpret_tensor = torch.ops.inductor._reinterpret_tensor
async_compile = AsyncCompile()
"""
@@ -788,7 +789,8 @@ def make_buffer_reuse(self, old, new):
return f"{self.declare}{new.get_name()} = {old.get_name()}{del_line} {self.comment} reuse"
return (
- f"{self.declare}{new.get_name()} = {self.namespace}as_strided({old.get_name()}, "
+ f"{self.declare}{new.get_name()} = reinterpret_tensor("
+ f"{old.get_name()}, "
f"{self.codegen_shape_tuple(new.get_size())}, "
f"{self.codegen_shape_tuple(new.get_stride())}){del_line} {self.comment} reuse"
)
@@ -945,6 +947,7 @@ def write_header(self):
self.header.splice(
"""
#include <torch/csrc/inductor/inductor_ops.h>
+ #define reinterpret_tensor torch::inductor::_reinterpret_tensor
"""
)
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -432,7 +432,7 @@ def get_dtype(self, buffer_name: str):
return self.name_to_buffer[buffer_name].get_dtype()
if buffer_name in self.graph_inputs:
return self.graph_inputs[buffer_name].get_dtype()
- m = re.match(r"as_strided\(([a-zA-Z0-9_]+),", buffer_name)
+ m = re.match(r"(as_strided|reinterpret_tensor)\(([a-zA-Z0-9_]+),", buffer_name)
if m:
return self.get_dtype(m.group(1))
raise KeyError(f"could not find {buffer_name}")
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -1873,12 +1873,10 @@ def codegen_reference(self):
size = V.graph.wrapper_code.codegen_shape_tuple(self.layout.size)
stride = V.graph.wrapper_code.codegen_shape_tuple(self.layout.stride)
offset = V.graph.wrapper_code.codegen_sizevar(self.layout.offset)
- namespace = V.graph.wrapper_code.namespace
- if offset != "0":
- return (
- f"{namespace}as_strided({self.get_name()}, {size}, {stride}, {offset})"
- )
- return f"{namespace}as_strided({self.get_name()}, {size}, {stride})"
+ # reinterpret_tensor is similar to as_strided except:
+ # - offset is added to the existing offset (rather than replacing it)
+ # - view tracking is disabled similar to unsafe_view
+ return f"reinterpret_tensor({self.get_name()}, {size}, {stride}, {offset})"
class SliceView(View):
| diff --git a/test/inductor/test_cpp_wrapper.py b/test/inductor/test_cpp_wrapper.py
--- a/test/inductor/test_cpp_wrapper.py
+++ b/test/inductor/test_cpp_wrapper.py
@@ -172,6 +172,7 @@ class BaseTest(NamedTuple):
BaseTest("test_dtype_sympy_expr"),
BaseTest("test_embedding_bag"), # test default FallbackKernel
BaseTest("test_index_put_deterministic_fallback"),
+ BaseTest("test_adding_tensor_offsets"),
BaseTest("test_int_div", "", test_cpu_repro.CPUReproTests()),
BaseTest("test_linear1"),
BaseTest("test_linear2"),
@@ -253,6 +254,7 @@ class BaseTest(NamedTuple):
BaseTest("test_custom_op"),
BaseTest("test_embedding_bag"), # test default FallbackKernel
BaseTest("test_index_put_deterministic_fallback"),
+ BaseTest("test_adding_tensor_offsets"),
BaseTest("test_index_tensor"),
BaseTest("test_linear1"),
BaseTest("test_linear2"),
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -4671,6 +4671,16 @@ def fn(ind, x, src):
args = [torch.tensor([1], dtype=torch.int64), torch.randn(8, 4), torch.randn(4)]
self.common(fn, args)
+ def test_adding_tensor_offsets(self):
+ @torch.compile(fullgraph=True)
+ def fn(x):
+ return x[16:32]
+
+ with torch.no_grad():
+ x = torch.randn(1024, device=self.device)
+ self.assertEqual(fn(x[0:]), x[16:][:16])
+ self.assertEqual(fn(x[128:]), x[128 + 16 :][:16])
+
# from GPT2ForSequenceClassification
def test_index_tensor(self):
def fn(x, y):
| Silent correctness issue if input tensors have existing storage offsets
### 🐛 Describe the bug
This is already fixed on main by https://github.com/pytorch/pytorch/pull/108168
```
def test_adding_tensor_offsets(self):
@torch.compile(fullgraph=True)
def fn(x):
return x[16:32]
with torch.no_grad():
x = torch.randn(1024, device=self.device)
self.assertEqual(fn(x[0:]), x[16:][:16])
self.assertEqual(fn(x[128:]), x[128 + 16 :][:16])
```
Fails with
```
Traceback (most recent call last):
File "/home/jansel/conda/envs/pytorch/lib/python3.10/unittest/case.py", line 59, in testPartExecutor
yield
File "/home/jansel/conda/envs/pytorch/lib/python3.10/unittest/case.py", line 591, in run
self._callTestMethod(testMethod)
File "/home/jansel/conda/envs/pytorch/lib/python3.10/unittest/case.py", line 549, in _callTestMethod
method()
File "/home/jansel/pytorch/torch/testing/_internal/common_utils.py", line 2388, in wrapper
method(*args, **kwargs)
File "/home/jansel/pytorch/test/inductor/test_torchinductor.py", line 6997, in new_test
return value(self)
File "/home/jansel/pytorch/test/inductor/test_torchinductor.py", line 4682, in test_adding_tensor_offsets
self.assertEqual(fn(x[128:]), x[128 + 16 :][:16])
File "/home/jansel/pytorch/torch/testing/_internal/common_utils.py", line 3285, in assertEqual
raise error_metas.pop()[0].to_error(
AssertionError: Tensor-likes are not close!
Mismatched elements: 16 / 16 (100.0%)
Greatest absolute difference: 2.224123239517212 at index (13,) (up to 1e-05 allowed)
Greatest relative difference: 12.916085243225098 at index (2,) (up to 1.3e-06 allowed)
```
The existing storage_offset is discarded by inductor.
### Versions
release/2.1 branch
cc @ezyang @gchanan @zou3519 @kadeng @msaroufim @wconstab @bdhirsh @anijain2305 @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @Xia-Weiwen @wenzhe-nrv @jiayisunx @peterbell10 @ipiszy @ngimel @yf225 @chenyang78 @muchulee8 @aakhundov
| 2023-08-30T17:47:58 |
|
pytorch/pytorch | 108,385 | pytorch__pytorch-108385 | [
"106085"
] | da1ccca830dda18423ec8191d942c9ebcc953cb9 | diff --git a/torch/nn/modules/rnn.py b/torch/nn/modules/rnn.py
--- a/torch/nn/modules/rnn.py
+++ b/torch/nn/modules/rnn.py
@@ -30,6 +30,17 @@ def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tens
class RNNBase(Module):
+ r"""Base class for RNN modules (RNN, LSTM, GRU).
+
+ Implements aspects of RNNs shared by the RNN, LSTM, and GRU classes, such as module initialization
+ and utility methods for parameter storage management.
+
+ .. note::
+ The forward method is not implemented by the RNNBase class.
+
+ .. note::
+ LSTM and GRU classes override some methods implemented by RNNBase.
+ """
__constants__ = ['mode', 'input_size', 'hidden_size', 'num_layers', 'bias',
'batch_first', 'dropout', 'bidirectional', 'proj_size']
__jit_unused_properties__ = ['all_weights']
@@ -351,9 +362,10 @@ def _replicate_for_data_parallel(self):
class RNN(RNNBase):
- r"""Applies a multi-layer Elman RNN with :math:`\tanh` or :math:`\text{ReLU}` non-linearity to an
- input sequence.
+ r"""__init__(self,input_size,hidden_size,num_layers=1,nonlinearity='tanh',bias=True,batch_first=False,dropout=0.0,bidirectional=False,device=None,dtype=None)
+ Applies a multi-layer Elman RNN with :math:`\tanh` or :math:`\text{ReLU}` non-linearity to an
+ input sequence.
For each element in the input sequence, each layer computes the following
function:
@@ -453,6 +465,17 @@ class RNN(RNNBase):
>>> output, hn = rnn(input, h0)
"""
+ @overload
+ def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1,
+ nonlinearity: str = 'tanh', bias: bool = True, batch_first: bool = False,
+ dropout: float = 0., bidirectional: bool = False, device=None,
+ dtype=None) -> None:
+ ...
+
+ @overload
+ def __init__(self, *args, **kwargs):
+ ...
+
def __init__(self, *args, **kwargs):
if 'proj_size' in kwargs:
raise ValueError("proj_size argument is only supported for LSTM, not RNN or GRU")
@@ -462,7 +485,7 @@ def __init__(self, *args, **kwargs):
elif self.nonlinearity == 'relu':
mode = 'RNN_RELU'
else:
- raise ValueError(f"Unknown nonlinearity '{self.nonlinearity}'")
+ raise ValueError(f"Unknown nonlinearity '{self.nonlinearity}'. Select from 'tanh' or 'relu'.")
super().__init__(mode, *args, **kwargs)
@overload
@@ -569,10 +592,13 @@ def forward(self, input, hx=None): # noqa: F811
#
# TODO: remove the overriding implementations for LSTM and GRU when TorchScript
# support expressing these two modules generally.
+
+
class LSTM(RNNBase):
- r"""Applies a multi-layer long short-term memory (LSTM) RNN to an input
- sequence.
+ r"""__init__(self,input_size,hidden_size,num_layers=1,bias=True,batch_first=False,dropout=0.0,bidirectional=False,proj_size=0,device=None,dtype=None)
+ Applies a multi-layer long short-term memory (LSTM) RNN to an input
+ sequence.
For each element in the input sequence, each layer computes the following
function:
@@ -731,6 +757,16 @@ class LSTM(RNNBase):
>>> output, (hn, cn) = rnn(input, (h0, c0))
"""
+ @overload
+ def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True,
+ batch_first: bool = False, dropout: float = 0., bidirectional: bool = False,
+ proj_size: int = 0, device=None, dtype=None) -> None:
+ ...
+
+ @overload
+ def __init__(self, *args, **kwargs):
+ ...
+
def __init__(self, *args, **kwargs):
super().__init__('LSTM', *args, **kwargs)
@@ -859,8 +895,9 @@ def forward(self, input, hx=None): # noqa: F811
class GRU(RNNBase):
- r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
+ r"""__init__(self,input_size,hidden_size,num_layers=1,bias=True,batch_first=False,dropout=0.0,bidirectional=False,device=None,dtype=None)
+ Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
@@ -988,6 +1025,16 @@ class GRU(RNNBase):
>>> output, hn = rnn(input, h0)
"""
+ @overload
+ def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True,
+ batch_first: bool = False, dropout: float = 0., bidirectional: bool = False,
+ device=None, dtype=None) -> None:
+ ...
+
+ @overload
+ def __init__(self, *args, **kwargs):
+ ...
+
def __init__(self, *args, **kwargs):
if 'proj_size' in kwargs:
raise ValueError("proj_size argument is only supported for LSTM, not RNN or GRU")
| Specifying parameters for RNN classes (for signature helpers) and document RNNBase
### 📚 The doc issue
The RNN, LSTM, and GRU classes are initialized with *args and **kwargs, despite the docstring specifying a list of possible arguments. This can be a mild annoyance for those using editors with signature helpers and may confuse users about PyTorch's implementation of RNNs.
Additionally, there is no documentation of the RNNBase class to explain its purpose in the PyTorch codebase.
### Suggest a potential alternative/fix
Trivial changes to the [RNN.py](https://github.com/pytorch/pytorch/blob/main/torch/nn/modules/rnn.py#L449) file to specify the initialization parameters of each RNN class with appropriate type signatures. This will benefit those using editors with signature helpers.
Write a short docstring for the RNNBase class that (1) explains that it is a base implementation of all RNNs and (2) directs users to other documentation.
I'd be happy to make these changes!
cc @svekars @carljparker @albanD @mruberry @jbschlosser @walterddr @mikaylagawarecki @zou3519
| 2023-09-01T01:24:37 |
||
pytorch/pytorch | 108,523 | pytorch__pytorch-108523 | [
"101688"
] | 8a3b01776972520c6bfb047dcfb52a0a52438bc8 | diff --git a/torch/serialization.py b/torch/serialization.py
--- a/torch/serialization.py
+++ b/torch/serialization.py
@@ -1326,12 +1326,12 @@ def _load(zip_file, map_location, pickle_module, pickle_file='data.pkl', overall
byteorderdata = zip_file.get_record(byteordername)
if byteorderdata not in [b'little', b'big']:
raise ValueError('Unknown endianness type: ' + byteorderdata.decode())
- elif get_default_load_endianness() == LoadEndianness.LITTLE:
+ elif get_default_load_endianness() == LoadEndianness.LITTLE or \
+ get_default_load_endianness() is None:
byteorderdata = b'little'
elif get_default_load_endianness() == LoadEndianness.BIG:
byteorderdata = b'big'
- elif get_default_load_endianness() == LoadEndianness.NATIVE or \
- get_default_load_endianness() is None:
+ elif get_default_load_endianness() == LoadEndianness.NATIVE:
pass
else:
raise ValueError('Invalid load endianness type')
@@ -1339,14 +1339,14 @@ def _load(zip_file, map_location, pickle_module, pickle_file='data.pkl', overall
if not zip_file.has_record(byteordername) and \
get_default_load_endianness() is None and \
sys.byteorder == 'big':
- # Default behaviour should be changed in future
+ # Default behaviour was changed
# See https://github.com/pytorch/pytorch/issues/101688
warnings.warn("The default load endianness for checkpoints without a byteorder mark "
- "on big endian machines will be changed from 'native' to 'little' endian "
- "in a future release, to avoid this behavior please use "
+ "on big endian machines was changed from 'native' to 'little' endian, "
+ "to avoid this behavior please use "
"torch.serialization.set_default_load_endianness to set "
"the desired default load endianness",
- DeprecationWarning)
+ UserWarning)
def load_tensor(dtype, numel, key, location):
name = f'data/{key}'
| diff --git a/test/test_serialization.py b/test/test_serialization.py
--- a/test/test_serialization.py
+++ b/test/test_serialization.py
@@ -14,6 +14,7 @@
import pickle
import shutil
import pathlib
+import platform
from copy import deepcopy
from itertools import product
@@ -3859,6 +3860,47 @@ def test_serialization_load_bom_data_cfloat(self):
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_le_bom))
self.assertTrue(torch.equal(tensor_be_no_bom, tensor_be_bom))
+ @unittest.skipIf(platform.machine() != 's390x', "s390x-specific test")
+ def test_serialization_warning_s390x(self):
+ data_be_no_bom = (b'PK\x03\x04\x00\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x19\x00\t\x00tensor.double.BE/data.pklFB\x05\x00ZZZZZ\x80\x02'
+ b'ctorch._utils\n_rebuild_tensor_v2\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\n'
+ b'DoubleStorage\nq\x02X\x01\x00\x00\x000q\x03X\x03\x00\x00\x00cpuq\x04K\x04tq\x05'
+ b'QK\x00K\x02K\x02\x86q\x06K\x02K\x01\x86q\x07\x89ccollections\nOrderedDict\nq\x08'
+ b')Rq\ttq\nRq\x0b.PK\x07\x08S\xd3\xba&\x9b\x00\x00\x00\x9b\x00\x00\x00PK\x03\x04\x00'
+ b'\x00\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x17\x00 \x00tensor.double.BE/data/0FB\x1c\x00ZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
+ b'?\xc9^|\xff\xa4v\x97\xbf\xe9\xb0\x8dP\x8c\xbc\xce\xbf\xd3\xdb\xb7[\xef\x0e\xdc?\xde'
+ b'\x00\xf9Q\x08\xb14PK\x07\x083@\x82/ \x00\x00\x00 \x00\x00\x00PK\x03\x04\x00\x00'
+ b'\x08\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x18\x00\x1a\x00tensor.double.BE/versionFB\x16\x00ZZZZZZZZZZZZZZZZZZZZZZ3\nPK\x07'
+ b'\x08\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00PK\x01\x02\x00\x00\x00\x00\x08\x08'
+ b'\x00\x00\x00\x00\x00\x00S\xd3\xba&\x9b\x00\x00\x00\x9b\x00\x00\x00\x19\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00tensor.double.BE/da'
+ b'ta.pklPK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00\x00\x00\x00\x003@\x82/ '
+ b'\x00\x00\x00 \x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\xeb\x00\x00\x00tensor.double.BE/data/0PK\x01\x02\x00\x00\x00\x00\x08\x08\x00\x00'
+ b'\x00\x00\x00\x00\xd1\x9egU\x02\x00\x00\x00\x02\x00\x00\x00\x18\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00p\x01\x00\x00tensor.double.BE/versionPK\x06\x06'
+ b',\x00\x00\x00\x00\x00\x00\x00\x1e\x03-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03'
+ b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\xd2\x00\x00\x00\x00'
+ b'\x00\x00\x00\xd2\x01\x00\x00\x00\x00\x00\x00PK\x06\x07\x00\x00\x00\x00\xa4\x02\x00'
+ b'\x00\x00\x00\x00\x00\x01\x00\x00\x00PK\x05\x06\x00\x00\x00\x00\x03\x00\x03\x00'
+ b'\xd2\x00\x00\x00\xd2\x01\x00\x00\x00\x00')
+
+ current_load_endian = get_default_load_endianness()
+
+ buf_be_no_bom = io.BytesIO(data_be_no_bom)
+
+ try:
+ set_default_load_endianness(None)
+ with self.assertWarnsRegex(UserWarning, "The default load endianness for checkpoints "
+ "without a byteorder mark on big endian machines "
+ "was changed from 'native' to 'little' endian"):
+ tensor_be_no_bom = torch.load(buf_be_no_bom)
+ finally:
+ set_default_load_endianness(current_load_endian)
+
@parametrize('weights_only', (True, False))
@unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows")
def test_serialization_mmap_loading(self, weights_only):
| torch.load() may store unexpected values into tensors on big-endian
### 🐛 Describe the bug
The current `torch.load()` interprets data as native-endian if a given pickle file has no `byteorder` record.
This behavior causes practical issues on big-endian machines when we load parameters for many DNN models saved on little-endian machines (e.g. x86_64). As a result, tensors would have incorrect values. Practically, it would be better to handle the pickle file w/o `byteorder` record as little-endian.
In addition, we could add an option to handle the pickle file w/o `byteorder` record as native-endian to keep the current behavior.
```python
import torch
x = torch.load(`path to pickle file w/o byteorder record`)
```
### Versions
```
Collecting environment information...
PyTorch version: 2.1.0a0+gite7681b5
Is debug build: False
CUDA used to build PyTorch: None
ROCM used to build PyTorch: N/A
OS: Ubuntu 22.04.2 LTS (s390x)
GCC version: (Ubuntu 11.3.0-1ubuntu1~22.04) 11.3.0
Clang version: Could not collect
CMake version: version 3.22.1
Libc version: glibc-2.35
Python version: 3.10.6 (main, Mar 10 2023, 10:55:28) [GCC 11.3.0] (64-bit runtime)
Python platform: Linux-5.15.0-71-generic-s390x-with-glibc2.35
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: False
CPU:
Architecture: s390x
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Big Endian
CPU(s): 8
On-line CPU(s) list: 0-7
Vendor ID: IBM/S390
Machine type: 3931
Thread(s) per core: 1
Core(s) per socket: 1
Socket(s) per book: 1
Book(s) per drawer: 1
Drawer(s): 8
CPU dynamic MHz: 5200
CPU static MHz: 5200
BogoMIPS: 26315.00
Dispatching mode: horizontal
Flags: esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te vx vxd vxe gs vxe2 vxp sort dflt vxp2 nnpa sie
Hypervisor: z/VM 7.2.0
Hypervisor vendor: IBM
Virtualization type: full
L1d cache: 1 MiB (8 instances)
L1i cache: 1 MiB (8 instances)
L2 cache: 256 MiB (8 instances)
L3 cache: 256 MiB
NUMA node(s): 1
NUMA node0 CPU(s): 0-7
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Retbleed: Not affected
Vulnerability Spec store bypass: Not affected
Vulnerability Spectre v1: Mitigation; __user pointer sanitization
Vulnerability Spectre v2: Mitigation; etokens
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] numpy==1.24.3
[pip3] torch==2.1.0a0+gite7681b5
[conda] Could not collect
```
cc @mruberry @mikaylagawarecki
| Hi, @kiszk thanks for the report! To clarify, how are you saving these objects, are you using `torch.save`? Did you disable [this argument](https://github.com/pytorch/pytorch/blob/main/torch/serialization.py#L457), I believe if `_disable_byteorder_record=False` (the default) it should be saving the endian-ness
Thank you for your comment. Your comment is correct.
Unfortunately, I do not know how the given file is saved since I did not create the original (problematic) pickle file myself.
Hm, it is interesting that the pickle file creator explicitly sets `_disable_byteorder_record=True`
Could a workaround here possibly be to add a byteorder record to the zipfile? e.g. something like
```
from torch.serialization import _open_zipfile_writer
with _open_zipfile_writer(f) as opened_zipfile:
zip_file.write_record('byteorder', 'little', len('little'))
torch.load(f)
```
Unfortunately, there are many zip files w/o a byteorder record practically. For example, many DNN models saved on x86 using previous versions of PyTorch do not have.
When we download those files from repositories using https, there is no chance to add a byteorder record.
Hey my apologies, I had not noticed that writing the byteorder record was a very recent addition and most checkpoints would not have it.
Following up on this, from offline discussion, when there is no byteorder mark we could either go with the path of
1) assume little endian
2) adding a kwarg (like `default_load_endian` as you have done)
(i) with default `little`
(ii) with default `native`
3) adding a global variable that indicates the default loading endianness
(i) with default `little`
(ii) with default `native`
(3)(ii) here is the solution that we feel makes the most sense here. Firstly setting native-endian as the default is BC-compatible and secondly using a global variable ensures that users who don't explicitly call`load` itself (e.g. they could be perhaps calling an API that might wrap load like `from_pretrained`) would still be able to toggle this.
| 2023-09-04T16:18:27 |
pytorch/pytorch | 108,596 | pytorch__pytorch-108596 | [
"103142"
] | 03e7f0b99dd5c1cb2e8418443751f5e08fc58d1f | diff --git a/torch/functional.py b/torch/functional.py
--- a/torch/functional.py
+++ b/torch/functional.py
@@ -760,8 +760,11 @@ def _unique_impl(input: Tensor, sorted: bool = True,
elements in the original input ended up in the returned unique list.
return_counts (bool): Whether to also return the counts for each unique
element.
- dim (int): the dimension to apply unique. If ``None``, the unique of the
- flattened input is returned. default: ``None``
+ dim (int, optional): the dimension to operate upon. If ``None``, the
+ unique of the flattened input is returned. Otherwise, each of the
+ tensors indexed by the given dimension is treated as one of the
+ elements to apply the unique operation upon. See examples for more
+ details. Default: ``None``
Returns:
(Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
@@ -799,6 +802,76 @@ def _unique_impl(input: Tensor, sorted: bool = True,
tensor([[0, 2],
[1, 2]])
+ >>> a = torch.tensor([
+ ... [
+ ... [1, 1, 0, 0],
+ ... [1, 1, 0, 0],
+ ... [0, 0, 1, 1],
+ ... ],
+ ... [
+ ... [0, 0, 1, 1],
+ ... [0, 0, 1, 1],
+ ... [1, 1, 1, 1],
+ ... ],
+ ... [
+ ... [1, 1, 0, 0],
+ ... [1, 1, 0, 0],
+ ... [0, 0, 1, 1],
+ ... ],
+ ... ])
+
+ >>> # If we call `torch.unique(a, dim=0)`, each of the tensors `a[idx, :, :]`
+ >>> # will be compared. We can see that `a[0, :, :]` and `a[2, :, :]` match
+ >>> # each other, so one of them will be removed.
+ >>> (a[0, :, :] == a[2, :, :]).all()
+ tensor(True)
+ >>> a_unique_dim0 = torch.unique(a, dim=0)
+ >>> a_unique_dim0
+ tensor([[[0, 0, 1, 1],
+ [0, 0, 1, 1],
+ [1, 1, 1, 1]],
+ [[1, 1, 0, 0],
+ [1, 1, 0, 0],
+ [0, 0, 1, 1]]])
+
+ >>> # Notice which sub-tensors from `a` match with the sub-tensors from
+ >>> # `a_unique_dim0`:
+ >>> (a_unique_dim0[0, :, :] == a[1, :, :]).all()
+ tensor(True)
+ >>> (a_unique_dim0[1, :, :] == a[0, :, :]).all()
+ tensor(True)
+
+ >>> # For `torch.unique(a, dim=1)`, each of the tensors `a[:, idx, :]` are
+ >>> # compared. `a[:, 0, :]` and `a[:, 1, :]` match each other, so one of
+ >>> # them will be removed.
+ >>> (a[:, 0, :] == a[:, 1, :]).all()
+ tensor(True)
+ >>> torch.unique(a, dim=1)
+ tensor([[[0, 0, 1, 1],
+ [1, 1, 0, 0]],
+ [[1, 1, 1, 1],
+ [0, 0, 1, 1]],
+ [[0, 0, 1, 1],
+ [1, 1, 0, 0]]])
+
+ >>> # For `torch.unique(a, dim=2)`, the tensors `a[:, :, idx]` are compared.
+ >>> # `a[:, :, 0]` and `a[:, :, 1]` match each other. Also, `a[:, :, 2]` and
+ >>> # `a[:, :, 3]` match each other as well. So in this case, two of the
+ >>> # sub-tensors will be removed.
+ >>> (a[:, :, 0] == a[:, :, 1]).all()
+ tensor(True)
+ >>> (a[:, :, 2] == a[:, :, 3]).all()
+ tensor(True)
+ >>> torch.unique(a, dim=2)
+ tensor([[[0, 1],
+ [0, 1],
+ [1, 0]],
+ [[1, 0],
+ [1, 0],
+ [1, 1]],
+ [[0, 1],
+ [0, 1],
+ [1, 0]]])
"""
if has_torch_function_unary(input):
return handle_torch_function(
| torch.unique is not work with `dim`, can not for batch unique
### 🐛 Describe the bug
```
import torch
t = torch.randn(2, 3, 4)
t[t>0.2] = 1
t[t<=0.2] = 0
print(torch.unique(t))
print(torch.unique(t, dim=-1))
```
the expected output shape for `print(torch.unique(t, dim=-1))` should be `[2, 3, 2]`, but it output the same shape with original tensor.
### Versions
PyTorch version: 2.0.1
Is debug build: False
CUDA used to build PyTorch: 11.8
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.6 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0
Clang version: Could not collect
CMake version: version 3.16.3
Libc version: glibc-2.31
Python version: 3.10.10 (main, Mar 21 2023, 18:45:11) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.13.0-30-generic-x86_64-with-glibc2.31
Is CUDA available: True
CUDA runtime version: Could not collect
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration:
GPU 0: NVIDIA GeForce RTX 3090
GPU 1: NVIDIA GeForce RTX 3090
Nvidia driver version: 520.61.05
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
Address sizes: 46 bits physical, 48 bits virtual
CPU(s): 36
On-line CPU(s) list: 0-35
Thread(s) per core: 2
Core(s) per socket: 18
Socket(s): 1
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 85
Model name: Intel(R) Core(TM) i9-10980XE CPU @ 3.00GHz
Stepping: 7
CPU MHz: 3000.000
CPU max MHz: 4800.0000
CPU min MHz: 1200.0000
BogoMIPS: 6000.00
Virtualization: VT-x
L1d cache: 576 KiB
L1i cache: 576 KiB
L2 cache: 18 MiB
L3 cache: 24.8 MiB
NUMA node0 CPU(s): 0-35
Vulnerability Itlb multihit: KVM: Mitigation: VMX disabled
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Mitigation; TSX disabled
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb cat_l3 cdp_l3 invpcid_single ssbd mba ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm mpx rdt_a avx512f avx512dq rdseed adx smap clflushopt clwb intel_pt avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts hwp hwp_act_window hwp_epp hwp_pkg_req avx512_vnni md_clear flush_l1d arch_capabilities
Versions of relevant libraries:
[pip3] mypy-extensions==1.0.0
[pip3] numpy==1.23.5
[pip3] pytorch-metric-learning==2.0.1
[pip3] torch==2.0.1
[pip3] torchaudio==2.0.2
[pip3] torchvision==0.15.2
[pip3] triton==2.0.0
[conda] blas 1.0 mkl
[conda] ffmpeg 4.3 hf484d3e_0 pytorch
[conda] mkl 2021.4.0 h06a4308_640
[conda] mkl-service 2.4.0 py310h7f8727e_0
[conda] mkl_fft 1.3.1 py310hd6ae3a3_0
[conda] mkl_random 1.2.2 py310h00e6091_0
[conda] numpy 1.23.5 py310hd5efca6_0
[conda] numpy-base 1.23.5 py310h8e6c178_0
[conda] pytorch 2.0.1 py3.10_cuda11.8_cudnn8.7.0_0 pytorch
[conda] pytorch-cuda 11.8 h7e8668a_5 pytorch
[conda] pytorch-metric-learning 2.0.1 pyhd8ed1ab_0 conda-forge
[conda] pytorch-mutex 1.0 cuda pytorch
[conda] torchaudio 2.0.2 py310_cu118 pytorch
[conda] torchtriton 2.0.0 py310 pytorch
[conda] torchvision 0.15.2 py310_cu118 pytorch
cc @ezyang @gchanan @zou3519 @albanD @svekars @carljparker
| From quick check, it seems that no value for dim= returns the expected shape right both on CPU and CUDA.
Raising priority to investigate.
Keeping high pri to investigate, but for consistency sake we want to preserve Python behavior of treating negative dims as counted from the end of the tensor dims
I think it is actually working properly, and I think that the assertion that "the expected output shape for `print(torch.unique(t, dim=-1))` should be `[2, 3, 2]`" is not correct. The output shape will depend on the values--even if all elements are only either 1 or 0.
The Pytorch documentation does not describe the behavior of the `dim` arg well. It only says "dim (int) - the dimension to apply unique. If None, the unique of the flattened input is returned", which is quite ambiguous for the `int` case.
But Numpy's documentation describes it in pretty good detail, particularly in the "Notes" section: https://numpy.org/doc/stable/reference/generated/numpy.unique.html
According to that explanation, given an input array of shape `(2, 3, 4)` (like the one given in the issue description) and `dim=-1`, `unique` will first create a temporary array where the last dimension is permuted to the first dimension, giving shape `(4, 2, 3)`. Then all the dimensions except for the first are temporarily flattened, giving shape `(4, 6)`. Now we have 4 individual vectors of size 6. The vectors (not the elements within the vectors) will be sorted, and then any duplicate vectors are thrown out, leaving us with either size `(1, 6)`, `(2, 6)`, `(3, 6)`, or `(4, 6)`, depending on how many duplicates there were. Then the original shapes of the remaining vectors will be restored from `(6,)` to `(2, 3)`. Finally the first dimension will be moved back to the last dimension.
The possible shapes of the final result are: `(2, 3, 4)` if the 4 vectors were all unique, `(2, 3, 3)` if there was one duplicate, `(2, 3, 2)` if there were two duplicates, or `(2, 3, 1)` if there were three duplicates.
Here's a specific example, like the one in the issue description:
```python
>>> import torch
>>> torch.manual_seed(123)
>>> t = torch.rand(2, 3, 4)
>>> t[t>0.2] = 1
>>> t[t<=0.2] = 0
>>> t
tensor([[[1., 1., 1., 1.],
[0., 1., 0., 0.],
[0., 1., 1., 1.]],
[[0., 0., 1., 1.],
[0., 1., 1., 1.],
[1., 1., 1., 1.]]])
```
Here, if we give `dim=-1`, then we'll have the 4 6-element vectors that we can see by drawing four vertical lines through the text printout of the array, namely `[1, 0, 0, 0, 0, 1]`, `[1, 1, 1, 0, 1, 1]`, `[1, 0, 1, 1, 1, 1]`, and `[1, 0, 1, 1, 1, 1]`. Those last two arrays are identical, so one of them will be removed, and the final result will have shape `(2, 3, 3)`. And that's what we get:
```python
>>> torch.unique(t, dim=-1)
tensor([[[1., 1., 1.],
[0., 0., 1.],
[0., 1., 1.]],
[[0., 1., 0.],
[0., 1., 1.],
[1., 1., 1.]]])
>>> torch.unique(t.cuda(), dim=-1)
tensor([[[1., 1., 1.],
[0., 0., 1.],
[0., 1., 1.]],
[[0., 1., 0.],
[0., 1., 1.],
[1., 1., 1.]]], device='cuda:0')
>>> import numpy as np
>>> np.unique(t, axis=-1)
array([[[1., 1., 1.],
[0., 0., 1.],
[0., 1., 1.]],
[[0., 1., 0.],
[0., 1., 1.],
[1., 1., 1.]]], dtype=float32)
```
I think probably we should improve the documentation to better describe this behavior
Also stumbled on this. The docs don't have any example of this :(
But it would be great to have unique supporting batch mode (maybe returning results in some sort of padded or masked format - e.g. `return_inverse = True` by itself can certainly be done in batch mode, and unique items by themselves can be returned in some padded/NestedTensor format)
Let's update the doc with more details similar to the numpy one then! | 2023-09-05T21:30:05 |
|
pytorch/pytorch | 109,608 | pytorch__pytorch-109608 | [
"109387"
] | ced78cc2a797229c3ebe2096e7e6d52ef3911221 | diff --git a/torch/_dynamo/variables/constant.py b/torch/_dynamo/variables/constant.py
--- a/torch/_dynamo/variables/constant.py
+++ b/torch/_dynamo/variables/constant.py
@@ -33,7 +33,7 @@ def __init__(self, value, **kwargs):
for disallowed_type, reason in _type_to_assert_reason.items():
assert not isinstance(value, disallowed_type), reason
- if isinstance(value, np.number):
+ if np is not None and isinstance(value, np.number):
self.value = value.item()
else:
self.value = value
diff --git a/torch/_inductor/codegen/cpp.py b/torch/_inductor/codegen/cpp.py
--- a/torch/_inductor/codegen/cpp.py
+++ b/torch/_inductor/codegen/cpp.py
@@ -9,7 +9,6 @@
from copy import copy, deepcopy
from typing import Dict, List
-import numpy
import sympy
import torch
@@ -2148,7 +2147,7 @@ def constant(val, dtype):
# VecKernel override dtype for constant
# Vectorization only support int32/fp32 now
# So if dtype = int64/fp64, we will cast it to int32/fp32 if possible
- i32_iinfo = numpy.iinfo(numpy.int32)
+ i32_iinfo = torch.iinfo(torch.int32)
if (
dtype == torch.int64
and val <= i32_iinfo.max
@@ -2156,12 +2155,12 @@ def constant(val, dtype):
):
opt_ctx.dtype = torch.int32
- f32_iinfo = numpy.finfo(numpy.float32)
+ f32_iinfo = torch.finfo(torch.float32)
if dtype == torch.double:
if (
(val <= f32_iinfo.max and val >= f32_iinfo.min)
- or (val == numpy.inf)
- or (val == -numpy.inf)
+ or (val == torch.inf)
+ or (val == -torch.inf)
):
opt_ctx.dtype = torch.float32
@@ -2206,7 +2205,7 @@ def can_use_int32():
vars_ranges = {k: ValueRanges(0, v - 1) for k, v in sizes.items()}
if not vars_ranges or len(vars_ranges) != len(free_symbols):
- i32_iinfo = numpy.iinfo(numpy.int32)
+ i32_iinfo = torch.iinfo(torch.int32)
return (
expr.is_number
and expr <= i32_iinfo.max
diff --git a/torch/_inductor/fx_passes/split_cat.py b/torch/_inductor/fx_passes/split_cat.py
--- a/torch/_inductor/fx_passes/split_cat.py
+++ b/torch/_inductor/fx_passes/split_cat.py
@@ -3,8 +3,6 @@
import operator
from typing import Callable, List, Sequence, Tuple, Union
-import numpy
-
import torch
from torch._dynamo.utils import counters
@@ -454,8 +452,7 @@ def get_simplified_split_ranges(
for user_input in user_inputs
if isinstance(user_input, tuple)
}
-
- cumulative_sizes = [0] + list(numpy.cumsum(split_sections))
+ cumulative_sizes = [0] + torch.cumsum(torch.tensor(split_sections), 0).tolist()
split_ranges = sorted(
[(cumulative_sizes[r[0]], cumulative_sizes[r[1] + 1]) for r in ranges]
)
@@ -578,7 +575,7 @@ def replace_split(
for i in range(len(split_ranges))
]
# Now assign the right getitem to the right input
- cumulative_sizes = [0] + list(numpy.cumsum(split_sections))
+ cumulative_sizes = [0] + torch.cumsum(torch.tensor(split_sections), 0).tolist()
new_user_inputs_list = []
for user_inputs in user_inputs_list:
new_user_inputs = []
| `torch.compile` can not be used out of the box in nightly/2.1
### 🐛 Describe the bug
Consider following trivial sample:
```python
import torch
def foo(x: torch.Tensor) -> torch.Tensor:
return torch.sin(x) + torch.cos(x)
if __name__=="__main__":
x = torch.rand(3, 3, device="cuda")
x_eager = foo(x)
x_pt2 = torch.compile(foo)(x)
print(torch.allclose(x_eager, x_pt2))
```
Following sample can be executed on vanilla `ubuntu:20.04` using 2.0.1, but fails with 2.1 release candidate, due to the spurious dependencies:
```shell
% python3 foo.py
...
File "/usr/local/lib/python3.8/dist-packages/torch/_dynamo/variables/base.py", line 96, in __call__
obj = type.__call__(cls, *args, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/torch/_dynamo/variables/constant.py", line 36, in __init__
if isinstance(value, np.number):
torch._dynamo.exc.InternalTorchDynamoError: 'NoneType' object has no attribute 'number'
from user code:
File "foo.py", line 4, in foo
return torch.sin(x) + torch.cos(x)
Please note, that this is a regression, i.e. it used to work in 2.0
### Versions
2.1/nightly
cc @ezyang @msaroufim @wconstab @bdhirsh @anijain2305
| 2023-09-19T15:08:06 |
||
pytorch/pytorch | 110,969 | pytorch__pytorch-110969 | [
"109186"
] | 209f2fa8ff86652f67d75c2f19bf9cb9942fd018 | diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py
--- a/torch/_torch_docs.py
+++ b/torch/_torch_docs.py
@@ -2232,22 +2232,25 @@ def merge_dicts(*dicts):
the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents
a single variable (Scalar or 1D) then its variance is returned.
-The unbiased sample covariance of the variables :math:`x` and :math:`y` is given by:
+The sample covariance of the variables :math:`x` and :math:`y` is given by:
.. math::
- \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{N~-~1}
+ \text{cov}(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{\max(0,~N~-~\delta N)}
-where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively.
+where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively, and
+:math:`\delta N` is the :attr:`correction`.
-If :attr:`fweights` and/or :attr:`aweights` are provided, the unbiased weighted covariance
+If :attr:`fweights` and/or :attr:`aweights` are provided, the weighted covariance
is calculated, which is given by:
.. math::
- \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}{\sum^{N}_{i = 1}w_i~-~1}
+ \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}
+ {\max(0,~\sum^{N}_{i = 1}w_i~-~\frac{\sum^{N}_{i = 1}w_ia_i}{\sum^{N}_{i = 1}w_i}~\delta N)}
-where :math:`w` denotes :attr:`fweights` or :attr:`aweights` based on whichever is provided, or
-:math:`w = fweights \times aweights` if both are provided, and
-:math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable.
+where :math:`w` denotes :attr:`fweights` or :attr:`aweights` (``f`` and ``a`` for brevity) based on whichever is
+provided, or :math:`w = f \times a` if both are provided, and
+:math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable. If not
+provided, ``f`` and/or ``a`` can be seen as a :math:`\mathbb{1}` vector of appropriate size.
Args:
input (Tensor): A 2D matrix containing multiple variables and observations, or a
@@ -2260,11 +2263,11 @@ def merge_dicts(*dicts):
will return the simple average. Defaults to ``1``.
fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of
times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
- Must have integral dtype. Ignored if ``None``. `Defaults to ``None``.
+ Must have integral dtype. Ignored if ``None``. Defaults to ``None``.
aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
These relative weights are typically large for observations considered “important” and smaller for
observations considered less “important”. Its numel must equal the number of columns of :attr:`input`.
- Must have floating point dtype. Ignored if ``None``. `Defaults to ``None``.
+ Must have floating point dtype. Ignored if ``None``. Defaults to ``None``.
Returns:
(Tensor) The covariance matrix of the variables.
@@ -10729,7 +10732,7 @@ def merge_dicts(*dicts):
The standard deviation (:math:`\sigma`) is calculated as
-.. math:: \sigma = \sqrt{\frac{1}{N - \delta N}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
+.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
@@ -10785,7 +10788,7 @@ def merge_dicts(*dicts):
The standard deviation (:math:`\sigma`) is calculated as
-.. math:: \sigma = \sqrt{\frac{1}{N - \delta N}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
+.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
@@ -12112,7 +12115,7 @@ def merge_dicts(*dicts):
The variance (:math:`\sigma^2`) is calculated as
-.. math:: \sigma^2 = \frac{1}{N - \delta N}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
+.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
@@ -12168,7 +12171,7 @@ def merge_dicts(*dicts):
The variance (:math:`\sigma^2`) is calculated as
-.. math:: \sigma^2 = \frac{1}{N - \delta N}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
+.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
sample mean, :math:`N` is the number of samples and :math:`\delta N` is
| torch.var outputs inf for correction>N
### 🐛 Describe the bug
torch.var
```
x = torch.arange(12, dtype=torch.float32).reshape(1,3,2,2)
torch.var(x, dim=2, correction=3)
```
output:
```
>>> torch.var(x, dim=2, correction=3)
tensor([[[inf, inf],
[inf, inf],
[inf, inf]]])
>>>
```
In above N=2 because dim=2 and above example dim=2 is 2
Now if we increase the correction value greater than dim value i.e. 2 in this case then it result in inf for all element.
But as per formula https://pytorch.org/docs/stable/generated/torch.var.html . it should have negative of summation.
### Versions
Versions
Versions of relevant libraries:
[pip3] flake8==5.0.4
[pip3] mypy-extensions==0.4.3
[pip3] numpy==1.24.3
[pip3] pytorch-lightning==1.9.4
[pip3] torch==2.0.1
[pip3] torch-tb-profiler==0.4.0
[pip3] torchaudio==2.0.1
[pip3] torchdata==0.6.1
[pip3] torchmetrics==0.11.1
cc @svekars @carljparker @albanD
| In the corresponding C++ code, the factor is actually implemented as `1 / max(0, N - correction)`. This explains the `inf` as `1 / 0 = inf`. I guess we have 3 options here:
- Fix the docs to reflect the `max(0, ...)` part
- Report a warning if `correction >= N` and keep the current `inf` behavior
- Throw an error if `correction >= N` and exit
Additionally, the above problem also applies to `torch.std`.
Thanks for looking into this.
I think that updating the doc to have the max(0, ...) is the simplest fix here. Would you be able to send a PR with this update?
> Thanks for looking into this. I think that updating the doc to have the max(0, ...) is the simplest fix here. Would you be able to send a PR with this update?
Sure, I'll prepare a PR later.
While preparing the PR, I noticed some other functions that suffer from a similar problem.
- `torch.var`
- `torch.var_mean`
- `torch.std`
- `torch.std_mean`
- `torch.cov`
Unfortunately, the behavior is slightly inconsistent, as only `torch.cov` throws a `UserWarning` if `correction>=observations`. Maybe we should add that to the other 4 functions too.
```
>>> torch.var(torch.randn(3), correction=3)
tensor(inf)
>>> torch.std(torch.randn(3), correction=3)
tensor(inf)
>>> torch.var_mean(torch.randn(3), correction=3)
(tensor(inf), tensor(-0.4473))
>>> torch.std_mean(torch.randn(3), correction=3)
(tensor(inf), tensor(0.7958))
>>> torch.cov(torch.randn(2, 3), correction=3)
<stdin>:1: UserWarning: cov(): degrees of freedom is <= 0 (Triggered internally at ../aten/src/ATen/native/Correlation.cpp:117.)
tensor([[inf, -inf],
[-inf, inf]])
```
throwing a warning sounds good as well. But that will require a bit more work to add the warning at the right place and add proper testing.
If you have time, please do both. If you have limited time, just updating the doc is the most important change I think.
For now, I created a PR for the docs change only. I'll try working on the warning too, but we can also create a new issue for that. | 2023-10-10T18:46:27 |
|
pytorch/pytorch | 111,151 | pytorch__pytorch-111151 | [
"108869"
] | 909fcf9b217530f7c793c3797e78945030efc79b | diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
@@ -91,20 +91,17 @@ def build_metadata(self,
for idx, placement in enumerate(self.placements):
# generate ShardMetadata for each placement device
chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
- if chunked_dim_size > 0:
- shard_size = list(tensor_sizes)
- current_offsets = [0] * tensor_num_dim
- current_offsets[self.dim] = split_size * idx # type: ignore[index]
- shard_size[self.dim] = chunked_dim_size # type: ignore[index]
-
- shard_metadata = ShardMetadata(
- shard_offsets=current_offsets,
- shard_sizes=shard_size,
- placement=placement,
- )
- shards_metadata.append(shard_metadata)
-
- # current_offsets[self.dim] += chunked_dim_size # type: ignore[index]
+ shard_size = list(tensor_sizes)
+ current_offsets = [0] * tensor_num_dim
+ current_offsets[self.dim] = split_size * idx # type: ignore[index]
+ shard_size[self.dim] = chunked_dim_size # type: ignore[index]
+
+ shard_metadata = ShardMetadata(
+ shard_offsets=current_offsets,
+ shard_sizes=shard_size,
+ placement=placement,
+ )
+ shards_metadata.append(shard_metadata)
return sharded_tensor_meta.ShardedTensorMetadata(
shards_metadata,
| Sharded checkpointing fails on load for certain tensor sizes
### 🐛 Describe the bug
Sharded checkpointing (particularly with FSDP for the optimizer state) uses ChunkShardingSpec to save/load tensors. ChunkShardingSpec's behavior is similar to torch.chunk and will result in some chunks of size 0.
This can be reproduced by trying to save a tensor of size 6 with 4 gpus. This tensor is sharded across the first 3 gpus. The resulting size of the chunks will look like [2, 2, 2, 0]. On save, it seems like ChunkShardingSpec is aware of which gpus contain shards, so it saves the tensor with shard metadata showing the size of chunks to be [2, 2, 2].
The problem occurs when attempting to load the sharded checkpoint. ChunkShardingSpec attempts to rebuild the metadata, this time being unaware of how many gpus originally contained shards. It knows that there is a tensor of size 6 and 4 gpus though, so it generates shard metadata with chunk sizes [2, 2, 2], [skipping the last gpu since it has size 0](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L94). Then when attempting to shard the tensor, the 4th gpu has no shard metadata, so a [local_tensor is never created](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L165), resulting in an [assertion error on the 4th rank](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L172) and a [type error on all other ranks](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/distributed_c10d.py#L808) because None is contained in the scatter_list.
There are a couple possible solutions to this.
1. Add shardmetadata for all gpus that allow for a tensor to be size 0
2. Change ChunkShardingSpec to distribute a tensor evenly across gpus (e.g. [2, 2, 1, 1] instead of [2, 2, 2, 0])
I've implemented and tested both solutions and both are backwards compatible with previously saved sharded checkpoints on versions 2.0.1, 2.1.0-rc3, and 8/27 nightly (e.g. save on 2.0.1 with the old ChunkShardingSpec and load on 2.0.1 with the new ChunkShardingSpec). Both solutions are also cross-version compatible for 2.0.1->2.1.0-rc3, 2.0.1->8/27 nightly, and 8/27 nightly->2.0.1 (e.g. save on 2.0.1 with the old ChunkShardingSpec and load on 2.1.0 with the new ChunkShardingSpec). The solutions might be version/cross-version compatible for other combinations, but I haven't tested those.
### Versions
This happens with pytorch 2.0.1, 2.1.0-rc3, and 8/27 nightly.
cc @mrshenli @pritamdamania87 @zhaojuanmao @satgera @rohan-varma @gqchen @aazzolini @osalpekar @jiayisuse @H-Huang @kwen2501 @awgu @penguinwu
| cc: @fegin @kumpera @wz337
Also this is a breaking bug with sharded checkpoints where sharded checkpoints can be saved but never loaded if a gpu doesn't contain a shard. I'm very surprised no one has encountered this issue before. | 2023-10-12T18:04:08 |
|
pytorch/pytorch | 111,694 | pytorch__pytorch-111694 | [
"110935",
"110597"
] | 5bcfb1b9b451a50e25b3bbc1f745aed55894fcb0 | diff --git a/torch/onnx/_type_utils.py b/torch/onnx/_type_utils.py
--- a/torch/onnx/_type_utils.py
+++ b/torch/onnx/_type_utils.py
@@ -170,7 +170,9 @@ def from_value(
SymbolicValueError: when value.type()'s info are empty and default is None
"""
- if not isinstance(value, (torch._C.Value, torch.Tensor)):
+ if not isinstance(value, (torch._C.Value, torch.Tensor)) or (
+ isinstance(value, torch._C.Value) and value.node().mustBeNone()
+ ):
# default value of type JitScalarType is returned when value is not valid
if default is None:
raise errors.OnnxExporterError(
diff --git a/torch/onnx/symbolic_helper.py b/torch/onnx/symbolic_helper.py
--- a/torch/onnx/symbolic_helper.py
+++ b/torch/onnx/symbolic_helper.py
@@ -476,8 +476,8 @@ def _if_scalar_type_as(self, tensor):
@_beartype.beartype
-def _is_none(x: _C.Value) -> bool:
- return x.node().mustBeNone()
+def _is_none(x: Any) -> bool:
+ return x is None or (x.node().mustBeNone() if isinstance(x, _C.Value) else False)
@_beartype.beartype
diff --git a/torch/onnx/symbolic_opset9.py b/torch/onnx/symbolic_opset9.py
--- a/torch/onnx/symbolic_opset9.py
+++ b/torch/onnx/symbolic_opset9.py
@@ -3785,7 +3785,7 @@ def new_empty(
g: jit_utils.GraphContext, self, sizes, dtype, layout, device, pin_memory=False
):
self_dtype = symbolic_helper._try_get_scalar_type(self)
- if dtype is None and self_dtype is not None:
+ if symbolic_helper._is_none(dtype) and self_dtype is not None:
dtype = self_dtype
return empty(g, sizes, dtype, layout, device, pin_memory)
@@ -3867,7 +3867,7 @@ def zeros_like(
memory_format=None,
):
shape = g.op("Shape", input)
- if dtype is None:
+ if symbolic_helper._is_none(dtype):
scalar_type = _type_utils.JitScalarType.from_value(
input, _type_utils.JitScalarType.FLOAT
)
@@ -3886,7 +3886,8 @@ def new_zeros(
g: jit_utils.GraphContext, self, sizes, dtype, layout, device, pin_memory=False
):
self_dtype = symbolic_helper._try_get_scalar_type(self)
- if dtype is None and self_dtype is not None:
+
+ if symbolic_helper._is_none(dtype) and self_dtype is not None:
dtype = self_dtype
return zeros(g, sizes, dtype, layout, device, pin_memory)
@@ -3929,7 +3930,7 @@ def ones_like(
memory_format=None,
):
shape = g.op("Shape", input)
- if dtype is None:
+ if symbolic_helper._is_none(dtype):
scalar_type = _type_utils.JitScalarType.from_value(
input, _type_utils.JitScalarType.FLOAT
)
@@ -3948,7 +3949,7 @@ def new_ones(
g: jit_utils.GraphContext, self, sizes, dtype, layout, device, pin_memory=False
):
self_dtype = symbolic_helper._try_get_scalar_type(self)
- if dtype is None and self_dtype is not None:
+ if symbolic_helper._is_none(dtype) and self_dtype is not None:
dtype = self_dtype
return ones(g, sizes, dtype, layout, device, pin_memory)
@@ -4025,7 +4026,7 @@ def new_full(
pin_memory=False,
):
self_dtype = symbolic_helper._try_get_scalar_type(self)
- if dtype is None and self_dtype is not None:
+ if symbolic_helper._is_none(dtype) and self_dtype is not None:
dtype = self_dtype
return full(g, size, fill_value, dtype, layout, device, pin_memory)
| diff --git a/test/onnx/test_pytorch_onnx_onnxruntime.py b/test/onnx/test_pytorch_onnx_onnxruntime.py
--- a/test/onnx/test_pytorch_onnx_onnxruntime.py
+++ b/test/onnx/test_pytorch_onnx_onnxruntime.py
@@ -6511,6 +6511,21 @@ def forward(self, x):
self.run_test(Zero_(), x, input_names=["x"], dynamic_axes={"x": [0, 1, 2]})
self.run_test(Zero_(), x, remained_onnx_input_idx=[])
+ @skipIfUnsupportedMinOpsetVersion(9)
+ def test_new_zeros_with_dtype(self):
+ class MyModel(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.emb = torch.nn.Embedding(50, 64)
+
+ def forward(self, x):
+ inp = x.new_zeros(x.shape)
+ return self.emb(inp)
+
+ model = MyModel()
+ x = torch.Tensor([[2, 5, 6], [3, 2, 5]]).to(torch.int64)
+ self.run_test(model, x, input_names=["x"], dynamic_axes={"x": [0, 1]})
+
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_ones(self):
class OnesModel(torch.nn.Module):
| [ONNX] Fix aten::new_zeros export
Fixes export in https://github.com/pytorch/pytorch/issues/110597
cc @justinchuby
[PyTorch 2.1 regression] TorchScript behavior changed from 2.0.1 (and older) to 2.1
### 🐛 Describe the bug
The ONNX export caught a behavior change on TorchScript API after PyTorch 2.1 was released. Before, a `Torch._C.Value.node.mustBeNone()` returned `False` for a model with `aten::new_zeros` ops while after PyTorch 2.1 it returns `True`, changing the execution path of the ONNX model export.
There is nothing in the [Pytorch 2.1 Release Notes](https://github.com/pytorch/pytorch/releases/tag/v2.1.0) that mentions a behavior change of this nature
Reproduction:
```
import torch.nn as nn
import torch
import onnxruntime as ort
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.emb = nn.Embedding(50, 64)
def forward(self, x):
inp = x.new_zeros(x.shape)
return self.emb(inp)
model = MyModel()
inp = torch.Tensor([[2, 5, 6], [3, 2, 5]]).to(torch.int64)
torch.onnx.export(model, (inp,), "model.onnx", opset_version=9)
```
The repro uses `torch.onnx.export` because it calls `torch.jit.trace` and provides an easy entry point to print `new_zeros` arguments.
The ONNX error is as shown:
```bash
/opt/pytorch/torch/onnx/utils.py:1703: UserWarning: The exported ONNX model failed ONNX shape inference. The model will not be executable by the ONNX Runtime. If this is unintended and you believe there is a bug, please report an issue at https://github.com/pytorch/pytorch/issues. Error reported by strict ONNX shape inference: [ShapeInferenceError] (op_type:Gather, node name: /emb/Gather): indices typestr: Tind, has unsupported type: tensor(float) (Triggered internally at /opt/pytorch/torch/csrc/jit/serialization/export.cpp:1445.)
_C._check_onnx_proto(proto)
Traceback (most recent call last):
File "repro_jit_type.py", line 18, in <module>
torch.onnx.export(model, (inp,), "model.onnx", opset_version=9)
File "/opt/conda/envs/ptca/lib/python3.8/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 419, in __init__
self._create_inference_session(providers, provider_options, disabled_optimizers)
File "/opt/conda/envs/ptca/lib/python3.8/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 460, in _create_inference_session
sess = C.InferenceSession(session_options, self._model_path, True, self._read_config_from_model)
onnxruntime.capi.onnxruntime_pybind11_state.InvalidGraph: [ONNXRuntimeError] : 10 : INVALID_GRAPH : Load model from model.onnx failed:This is an invalid model. Type Error: Type 'tensor(float)' of input parameter (/Constant_output_0) of operator (Gather) in node (/emb/Gather) is invalid.
```
The error above doesn't point to TorchScript, but shows the execution path has changed for ONNX exporter.
Looking into it further and printing the `new_zeros`'s `dtype` argument (from within the [ONNX exporter code](https://github.com/pytorch/pytorch/blob/144cda7f068854dd870c9567781aa2aca6d5e4cf/torch/onnx/symbolic_opset9.py#L388) which has an easy entry point to the `new_zeros` TorchScript node), we can see that on PyTorch 2.1, the `dtype` arg of `new_zeros` is captured as:
```python
> /opt/pytorch/torch/onnx/symbolic_opset9.py(3889)new_zeros()
-> self_dtype = symbolic_helper._try_get_scalar_type(self)
(Pdb) dtype
15 defined in (%15 : NoneType = prim::Constant(), scope: __main__.MyModel::
)
```
However, in PyTorch 2.0.1 and older, it was:
```python
> /opt/conda/envs/ptca/lib/python3.8/site-packages/torch/onnx/symbolic_opset9.py(3708)new_zeros()
-> self_dtype = symbolic_helper._try_get_scalar_type(self)
(Pdb) dtype
15 defined in (%15 : Long(device=cpu) = onnx::Constant[value={4}](), scope: __main__.MyModel::
)
```
### Versions
PyTorch 2.1
cc @ezyang @gchanan @zou3519 @kadeng @EikanWang @jgong5 @wenzhe-nrv @sanchitintel
| <!-- drci-comment-start -->
## :link: Helpful Links
### :test_tube: See artifacts and rendered test results at [hud.pytorch.org/pr/110935](https://hud.pytorch.org/pr/110935)
* :page_facing_up: Preview [Python docs built from this PR](https://docs-preview.pytorch.org/pytorch/pytorch/110935/index.html)
* :page_facing_up: Preview [C++ docs built from this PR](https://docs-preview.pytorch.org/pytorch/pytorch/110935/cppdocs/index.html)
* :question: Need help or want to give feedback on the CI? Visit the [bot commands wiki](https://github.com/pytorch/pytorch/wiki/Bot-commands) or our [office hours](https://github.com/pytorch/pytorch/wiki/Dev-Infra-Office-Hours)
Note: Links to docs will display an error until the docs builds have been completed.
## :x: 1 New Failure
As of commit 5d318d5fcca6e06317a0ce9c248b89ebb194caa5 with merge base 37567fdf311549dedf74731b359edc6b077f30a0 (<sub><sub><img alt="image" width=70 src="https://img.shields.io/date/1696925223?label=&color=FFFFFF&style=flat-square"></sub></sub>):
<details open><summary><b>NEW FAILURE</b> - The following job has failed:</summary><p>
* [pull / linux-focal-py3.8-clang10-onnx / test (default, 1, 2, linux.2xlarge)](https://hud.pytorch.org/pr/pytorch/pytorch/110935#17568957963) ([gh](https://github.com/pytorch/pytorch/actions/runs/6470973050/job/17568957963))
</p></details>
This comment was automatically generated by Dr. CI and updates every 15 minutes.
<!-- drci-comment-end -->
<a href="https://easycla.lfx.linuxfoundation.org/#/?version=2"><img src="https://s3.amazonaws.com/cla-project-logo-prod/cla-signed.svg" alt="CLA Signed" align="left" height="28" width="328" ></a><br/><br />The committers listed above are authorized under a signed CLA.<ul><li>:white_check_mark: login: fxmarty (4e9117f0c5b9d05e84f06cb80f2b4beeb7e7a7ac, 5d318d5fcca6e06317a0ce9c248b89ebb194caa5)</li></ul>
Could you enable the test in test_op_consistency and fix CLA? Thanks!
Thank you I added the test in test_op_consistency (which is not passing - independent of this PR, happy to address any suggestion here) and signed CLA.
@fxmarty I am testing https://github.com/pytorch/pytorch/pull/110956 which pushes your fix to `from_value` while we don't hear from PyTorch
What is different from the original PR:
* Generic code: The `torch._C.Value.node().mustBeNone()` is encapsulated into the high level API `JitScalarType.from_value` so users don't have to worry about it
* Specific to new_zeros: When checking `dtype`, we always must use `JitScalarType` for all arguments. `new_zeros` calls `from_value` for `self` argument but not for `dtype` too, which skips all the handling provided by `JitScalarType`. Other operators had the same issue and were fixed here too
> Could you enable the test in test_op_consistency and fix CLA? Thanks!
@justinchuby adding `new_zeros` there raised those errors. Any idea on why?
```bash
onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool SUBFAIL [0.0000s] [ 0%]
onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool SUBFAIL [0.0000s] [ 0%]
onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool SUBFAIL [0.0000s] [ 0%]
onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool SUBFAIL [0.0000s] [ 0%]
onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool SUBFAIL [0.0000s] [ 0%]
onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool SUBFAIL [0.0000s] [ 0%]
onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool SUBFAIL [0.0000s] [ 0%]
onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool PASSED [0.1885s] [ 0%]
=================================== FAILURES ===================================
_ TestOnnxModelOutputConsistency_opset9CPU.test_output_match_new_zeros_cpu_bool (inputs='(tensor(False), ())', kwargs='{}', opset=9, sample_num=0) _
Traceback (most recent call last):
File "onnx/test_op_consistency.py", line 317, in test_output_match
self.run_test(model, inputs, rtol=rtol, atol=atol)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 204, in run_test
_run_test(model, tracing_remained_onnx_input_idx)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 164, in _run_test
return run_model_test(
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 84, in run_model_test
return verification.verify(*args, options=options, **kwargs)
File "<@beartype(torch.onnx.verification.verify) at 0x7f2ef0a3e160>", line 352, in verify
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 890, in verify
_compare_onnx_pytorch_model(
File "<@beartype(torch.onnx.verification._compare_onnx_pytorch_model) at 0x7f2ef0a271f0>", line 160, in _compare_onnx_pytorch_model
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 451, in _compare_onnx_pytorch_model
compare_onnx_pytorch_model_with_input(input_args, input_kwargs)
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 443, in compare_onnx_pytorch_model_with_input
onnx_outs = _run_onnx(onnx_session, onnx_inputs)
File "<@beartype(torch.onnx.verification._run_onnx) at 0x7f2ef0a994c0>", line 16, in _run_onnx
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 172, in _run_onnx
raise ValueError(
ValueError: got too many positional inputs. inputs: [array(False)]. kw_inputs: {}. input names: [].
_ TestOnnxModelOutputConsistency_opset9CPU.test_output_match_new_zeros_cpu_bool (inputs='(tensor([[ True, True, False, True, True],\n [ True, True, True, True, True],\n [False, False, True, False, False],\n [False, False, False, True, False],\n [ True, True, False, False, True]]), (2, 0))', kwargs='{}', opset=9, sample_num=1) _
Traceback (most recent call last):
File "onnx/test_op_consistency.py", line 317, in test_output_match
self.run_test(model, inputs, rtol=rtol, atol=atol)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 204, in run_test
_run_test(model, tracing_remained_onnx_input_idx)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 164, in _run_test
return run_model_test(
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 84, in run_model_test
return verification.verify(*args, options=options, **kwargs)
File "<@beartype(torch.onnx.verification.verify) at 0x7f2ef0a3e160>", line 352, in verify
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 890, in verify
_compare_onnx_pytorch_model(
File "<@beartype(torch.onnx.verification._compare_onnx_pytorch_model) at 0x7f2ef0a271f0>", line 160, in _compare_onnx_pytorch_model
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 451, in _compare_onnx_pytorch_model
compare_onnx_pytorch_model_with_input(input_args, input_kwargs)
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 443, in compare_onnx_pytorch_model_with_input
onnx_outs = _run_onnx(onnx_session, onnx_inputs)
File "<@beartype(torch.onnx.verification._run_onnx) at 0x7f2ef0a994c0>", line 16, in _run_onnx
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 172, in _run_onnx
raise ValueError(
ValueError: got too many positional inputs. inputs: [array([[ True, True, False, True, True],
[ True, True, True, True, True],
[False, False, True, False, False],
[False, False, False, True, False],
[ True, True, False, False, True]]), array(2), array(0)]. kw_inputs: {}. input names: ['onnx::Unsqueeze_1', 'onnx::Unsqueeze_2'].
_ TestOnnxModelOutputConsistency_opset9CPU.test_output_match_new_zeros_cpu_bool (inputs='(tensor([], size=(0, 5, 0), dtype=torch.bool), (3, 2, 2))', kwargs='{}', opset=9, sample_num=2) _
Traceback (most recent call last):
File "onnx/test_op_consistency.py", line 317, in test_output_match
self.run_test(model, inputs, rtol=rtol, atol=atol)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 204, in run_test
_run_test(model, tracing_remained_onnx_input_idx)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 164, in _run_test
return run_model_test(
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 84, in run_model_test
return verification.verify(*args, options=options, **kwargs)
File "<@beartype(torch.onnx.verification.verify) at 0x7f2ef0a3e160>", line 352, in verify
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 890, in verify
_compare_onnx_pytorch_model(
File "<@beartype(torch.onnx.verification._compare_onnx_pytorch_model) at 0x7f2ef0a271f0>", line 160, in _compare_onnx_pytorch_model
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 451, in _compare_onnx_pytorch_model
compare_onnx_pytorch_model_with_input(input_args, input_kwargs)
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 443, in compare_onnx_pytorch_model_with_input
onnx_outs = _run_onnx(onnx_session, onnx_inputs)
File "<@beartype(torch.onnx.verification._run_onnx) at 0x7f2ef0a994c0>", line 16, in _run_onnx
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 172, in _run_onnx
raise ValueError(
ValueError: got too many positional inputs. inputs: [array([], shape=(0, 5, 0), dtype=bool), array(3), array(2), array(2)]. kw_inputs: {}. input names: ['onnx::Unsqueeze_1', 'onnx::Unsqueeze_2', 'onnx::Unsqueeze_3'].
_ TestOnnxModelOutputConsistency_opset9CPU.test_output_match_new_zeros_cpu_bool (inputs='(tensor([ True, True, True, False, True]), (2, 3))', kwargs="{'dtype': torch.bool, 'device': 'cpu'}", opset=9, sample_num=3) _
Traceback (most recent call last):
File "onnx/test_op_consistency.py", line 317, in test_output_match
self.run_test(model, inputs, rtol=rtol, atol=atol)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 204, in run_test
_run_test(model, tracing_remained_onnx_input_idx)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 164, in _run_test
return run_model_test(
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 84, in run_model_test
return verification.verify(*args, options=options, **kwargs)
File "<@beartype(torch.onnx.verification.verify) at 0x7f2ef0a3e160>", line 352, in verify
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 890, in verify
_compare_onnx_pytorch_model(
File "<@beartype(torch.onnx.verification._compare_onnx_pytorch_model) at 0x7f2ef0a271f0>", line 160, in _compare_onnx_pytorch_model
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 451, in _compare_onnx_pytorch_model
compare_onnx_pytorch_model_with_input(input_args, input_kwargs)
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 443, in compare_onnx_pytorch_model_with_input
onnx_outs = _run_onnx(onnx_session, onnx_inputs)
File "<@beartype(torch.onnx.verification._run_onnx) at 0x7f2ef0a994c0>", line 16, in _run_onnx
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 172, in _run_onnx
raise ValueError(
ValueError: got too many positional inputs. inputs: [array([ True, True, True, False, True]), array(2), array(3)]. kw_inputs: {}. input names: ['onnx::Unsqueeze_1', 'onnx::Unsqueeze_2'].
_ TestOnnxModelOutputConsistency_opset9CPU.test_output_match_new_zeros_cpu_bool (inputs='(tensor([False, True, False, True, True]), (10,))', kwargs="{'dtype': torch.float64}", opset=9, sample_num=4) _
Traceback (most recent call last):
File "onnx/test_op_consistency.py", line 317, in test_output_match
self.run_test(model, inputs, rtol=rtol, atol=atol)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 204, in run_test
_run_test(model, tracing_remained_onnx_input_idx)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 164, in _run_test
return run_model_test(
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 84, in run_model_test
return verification.verify(*args, options=options, **kwargs)
File "<@beartype(torch.onnx.verification.verify) at 0x7f2ef0a3e160>", line 352, in verify
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 890, in verify
_compare_onnx_pytorch_model(
File "<@beartype(torch.onnx.verification._compare_onnx_pytorch_model) at 0x7f2ef0a271f0>", line 160, in _compare_onnx_pytorch_model
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 451, in _compare_onnx_pytorch_model
compare_onnx_pytorch_model_with_input(input_args, input_kwargs)
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 443, in compare_onnx_pytorch_model_with_input
onnx_outs = _run_onnx(onnx_session, onnx_inputs)
File "<@beartype(torch.onnx.verification._run_onnx) at 0x7f2ef0a994c0>", line 16, in _run_onnx
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 172, in _run_onnx
raise ValueError(
ValueError: got too many positional inputs. inputs: [array([False, True, False, True, True]), array(10)]. kw_inputs: {}. input names: ['onnx::Unsqueeze_1'].
_ TestOnnxModelOutputConsistency_opset9CPU.test_output_match_new_zeros_cpu_bool (inputs='(tensor([False, True, True, False, False]), (1, 1, 12))', kwargs="{'device': 'cpu'}", opset=9, sample_num=5) _
Traceback (most recent call last):
File "onnx/test_op_consistency.py", line 317, in test_output_match
self.run_test(model, inputs, rtol=rtol, atol=atol)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 204, in run_test
_run_test(model, tracing_remained_onnx_input_idx)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 164, in _run_test
return run_model_test(
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 84, in run_model_test
return verification.verify(*args, options=options, **kwargs)
File "<@beartype(torch.onnx.verification.verify) at 0x7f2ef0a3e160>", line 352, in verify
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 890, in verify
_compare_onnx_pytorch_model(
File "<@beartype(torch.onnx.verification._compare_onnx_pytorch_model) at 0x7f2ef0a271f0>", line 160, in _compare_onnx_pytorch_model
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 451, in _compare_onnx_pytorch_model
compare_onnx_pytorch_model_with_input(input_args, input_kwargs)
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 443, in compare_onnx_pytorch_model_with_input
onnx_outs = _run_onnx(onnx_session, onnx_inputs)
File "<@beartype(torch.onnx.verification._run_onnx) at 0x7f2ef0a994c0>", line 16, in _run_onnx
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 172, in _run_onnx
raise ValueError(
ValueError: got too many positional inputs. inputs: [array([False, True, True, False, False]), array(1), array(1), array(12)]. kw_inputs: {}. input names: ['onnx::Unsqueeze_1', 'onnx::Unsqueeze_2', 'onnx::Unsqueeze_3'].
_ TestOnnxModelOutputConsistency_opset9CPU.test_output_match_new_zeros_cpu_bool (inputs='(tensor([ True, False, True, True, True]), (2, 2, 2))', kwargs="{'dtype': torch.float64, 'device': 'cpu'}", opset=9, sample_num=6) _
Traceback (most recent call last):
File "onnx/test_op_consistency.py", line 317, in test_output_match
self.run_test(model, inputs, rtol=rtol, atol=atol)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 204, in run_test
_run_test(model, tracing_remained_onnx_input_idx)
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 164, in _run_test
return run_model_test(
File "/var/lib/jenkins/workspace/test/onnx/onnx_test_common.py", line 84, in run_model_test
return verification.verify(*args, options=options, **kwargs)
File "<@beartype(torch.onnx.verification.verify) at 0x7f2ef0a3e160>", line 352, in verify
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 890, in verify
_compare_onnx_pytorch_model(
File "<@beartype(torch.onnx.verification._compare_onnx_pytorch_model) at 0x7f2ef0a271f0>", line 160, in _compare_onnx_pytorch_model
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 451, in _compare_onnx_pytorch_model
compare_onnx_pytorch_model_with_input(input_args, input_kwargs)
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 443, in compare_onnx_pytorch_model_with_input
onnx_outs = _run_onnx(onnx_session, onnx_inputs)
File "<@beartype(torch.onnx.verification._run_onnx) at 0x7f2ef0a994c0>", line 16, in _run_onnx
File "/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/onnx/verification.py", line 172, in _run_onnx
raise ValueError(
ValueError: got too many positional inputs. inputs: [array([ True, False, True, True, True]), array(2), array(2), array(2)]. kw_inputs: {}. input names: ['onnx::Unsqueeze_1', 'onnx::Unsqueeze_2', 'onnx::Unsqueeze_3'].
- generated xml file: /var/lib/jenkins/workspace/test/test-reports/python-pytest/onnx.test_op_consistency/onnx.test_op_consistency-38452ac968537a1e.xml -
=========================== short test summary info ============================
SUBFAIL [0.0000s] onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool - ValueError: got too many positional inputs. inputs: [array(False)]. kw_inputs: {}. input names: [].
SUBFAIL [0.0000s] onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool - ValueError: got too many positional inputs. inputs: [array([[ True, True, False, True, True],
[ True, True, True, True, True],
[False, False, True, False, False],
[False, False, False, True, False],
[ True, True, False, False, True]]), array(2), array(0)]. kw_inputs: {}. input names: ['onnx::Unsqueeze_1', 'onnx::Unsqueeze_2'].
SUBFAIL [0.0000s] onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool - ValueError: got too many positional inputs. inputs: [array([], shape=(0, 5, 0), dtype=bool), array(3), array(2), array(2)]. kw_inputs: {}. input names: ['onnx::Unsqueeze_1', 'onnx::Unsqueeze_2', 'onnx::Unsqueeze_3'].
SUBFAIL [0.0000s] onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool - ValueError: got too many positional inputs. inputs: [array([ True, True, True, False, True]), array(2), array(3)]. kw_inputs: {}. input names: ['onnx::Unsqueeze_1', 'onnx::Unsqueeze_2'].
SUBFAIL [0.0000s] onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool - ValueError: got too many positional inputs. inputs: [array([False, True, False, True, True]), array(10)]. kw_inputs: {}. input names: ['onnx::Unsqueeze_1'].
SUBFAIL [0.0000s] onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool - ValueError: got too many positional inputs. inputs: [array([False, True, True, False, False]), array(1), array(1), array(12)]. kw_inputs: {}. input names: ['onnx::Unsqueeze_1', 'onnx::Unsqueeze_2', 'onnx::Unsqueeze_3'].
/opt/conda/envs/py_3.8/lib/python3.8/site-packages/torch/__init__.py:635: UserWarning: torch.set_default_tensor_type() is deprecated as of PyTorch 2.1, please use torch.set_default_dtype() and torch.set_default_device() as alternatives. (Triggered internally at /var/lib/jenkins/workspace/torch/csrc/tensor/python_tensor.cpp:453.)
_C._set_default_tensor_type(t)
SUBFAIL [0.0000s] onnx/test_op_consistency.py::TestOnnxModelOutputConsistency_opset9CPU::test_output_match_new_zeros_cpu_bool - ValueError: got too many positional inputs. inputs: [array([ True, False, True, True, True]), array(2), array(2), array(2)]. kw_inputs: {}. input names: ['onnx::Unsqueeze_1', 'onnx::Unsqueeze_2', 'onnx::Unsqueeze_3'].
!!!!!!!!!!!!!!!!!!!!!!!!!! stopping after 7 failures !!!!!!!!!!!!!!!!!!!!!!!!!!!
================== 7 failed, 1 passed, 62 deselected in 0.31s ==================
```
I am not very sure. It looks like the exported model was expected 3 inputs but the test provided 4.
Potentially, you may xfail the bool dtypes with an `xfail(..., dtypes=(torch.bool,))` entry to unblock this.
> I am not very sure. It looks like the exported model was expected 3 inputs but the test provided 4.
I think I know why. The tests for `aten::new_zeros` are exported without inputs, because all inputs are probably folded into graph constants. To fix that, the tests should run with `dynamic_axes` specified in the `torch.onnx.export` call
https://github.com/pytorch/pytorch/pull/110956 is being merged to fix this. @fxmarty has been added as co-author on the new PR
[model.onnx.zip](https://github.com/pytorch/pytorch/files/12819009/model.onnx.zip)
Adding the failing model exported from `torch.onnx.export` on 2.1 for reference
Hi @justinchuby @BowenBao @thiagocrepaldi, this issue is related to the dtype `indices` is exported to.
Minimal reproduction:
```python
import torch.nn as nn
import torch
import onnxruntime as ort
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.emb = nn.Embedding(50, 64)
def forward(self, x):
inp = x.new_zeros(x.shape)
return self.emb(inp)
model = MyModel()
inp = torch.Tensor([[2, 5, 6], [3, 2, 5]]).to(torch.int64)
torch.onnx.export(model, (inp,), "model.onnx", opset_version=9)
session = ort.InferenceSession("model.onnx", providers=["CPUExecutionProvider"])
```
Debugging, it appears that the issue is due to this where `dtype` is not None
https://github.com/pytorch/pytorch/blob/e9ebda29d87ce0916ab08c06ab26fd3766a870e5/torch/onnx/symbolic_opset9.py#L3707-L3708
and when calling `zeros` and in turn its decorator `symbolic_helper.parse_args`, we used to go into this controlflow for the argument `dtype`,
https://github.com/pytorch/pytorch/blob/e9ebda29d87ce0916ab08c06ab26fd3766a870e5/torch/onnx/symbolic_helper.py#L82-L83
while in 2.1.0 we go into this controlflow
https://github.com/pytorch/pytorch/blob/e9ebda29d87ce0916ab08c06ab26fd3766a870e5/torch/onnx/symbolic_helper.py#L78-L79
This in the `zeros` function, with 2.0.1 we rightfully call `scalar_type = _type_utils.JitScalarType(dtype)` while in 2.1.0 we call `scalar_type = _type_utils.JitScalarType.FLOAT`.
https://github.com/pytorch/pytorch/blob/e9ebda29d87ce0916ab08c06ab26fd3766a870e5/torch/onnx/symbolic_opset9.py#L3690-L3693
It is unclear to me why `mustBeNone` for this node has changed between 2.0.1 and 2.1, do you have any idea? The node is `15 defined in (%15 : NoneType = prim::Constant(), scope: __main__.MyModel::)` (printed with 2.1.0) with the above example.
Maybe the issue is with torch.jit.trace and not ONNX?
Thank you!
Actually yes, the issue is in the captured `ScriptModule`. Now in 2.1.0 the `dtype` arg of `new_zeros` is captured as `%17 : NoneType = prim::Constant()`, while it used to be capted as `%17 : int = prim::Constant[value=4]() # <tmp 4>+34:11:0`
An easy solution is to replace this check https://github.com/pytorch/pytorch/blob/e9ebda29d87ce0916ab08c06ab26fd3766a870e5/torch/onnx/symbolic_opset9.py#L3707-L3708
by
```python
if dtype.node().mustBeNone() and self_dtype is not None:
dtype = self_dtype
```
Is that fine or is the change in `torch.jit.trace` a bug and the issue should be fixed upstream?
Thanks for discovering the issue and for the detailed analysis! Whether it is a bug in jit can be investigated (from briefly scanning the code it seems fine for dtype to be none). But the change in exporter makes sense to me.
Thank you. I'll do the change for `new_zeros` then, though the change in jit.trace may impact (or may not) other op as well.
> Actually yes, the issue is in the captured `ScriptModule`. Now in 2.1.0 the `dtype` arg of `new_zeros` is captured as `%17 : NoneType = prim::Constant()`, while it used to be capted as `%17 : int = prim::Constant[value=4]() # <tmp 4>+34:11:0`
>
> An easy solution is to replace this check
>
> https://github.com/pytorch/pytorch/blob/e9ebda29d87ce0916ab08c06ab26fd3766a870e5/torch/onnx/symbolic_opset9.py#L3707-L3708
>
> by
>
> ```python
> if dtype.node().mustBeNone() and self_dtype is not None:
> dtype = self_dtype
> ```
>
> Is that fine or is the change in `torch.jit.trace` a bug and the issue should be fixed upstream?
@ezyang do you know someone that could help us track a potential regression on TorchScript?
@fxmarty I have updated the issue description to capture the root cause you have identified in a previous [comment](https://github.com/pytorch/pytorch/issues/110597#issuecomment-1750488721)
Feel free to update it accordingly. The important is to show that the ONNX exporter detected the behavior change, not caused it so that TorchScript friends can help us here
cc @davidberard98 can you take a look at the priority of this please?
Tentative high pri for now due to regression
I wonder if difference is caused by dynamic shapes?
Seems unlikely? This is all TS...
The `dtype`, `layout` & `device` arguments of `new_zeros` are being traced as `NoneType`. I'll report back on Friday. Thanks!
was curious where this came from and ran a bisect. Bisect is still running but I'm pretty sure it's from this PR, which basically makes this exact change: https://github.com/pytorch/pytorch/pull/97564.
will take a look at the linked PRs in the next day or two.
@albanD not familiar with priority levels - perhaps you or someone else from core can judge based on the change in the referenced PR?
cc @bdhirsh is your PR expected to impact all forms of tracing this way?
@albanD yep - the purpose of that PR was to avoid burning in the defaults of `*_like` factory functions during tracing. And this burning in previously happened at the python binding layer. So all traces that live below the python layer (make_fx, and also torchscript tracing and friends) will be affected by it.
In theory, the semantics of `aten.ones_like(x)` and `aten.ones_like(x, device='cpu')` are the same. But I should have marked that PR as potentially BC-breaking for backends / consumers of that IR 😕.
> @albanD yep - the purpose of that PR was to avoid burning in the defaults of `*_like` factory functions during tracing. And this burning in previously happened at the python binding layer. So all traces that live below the python layer (make_fx, and also torchscript tracing and friends) will be affected by it.
>
> In theory, the semantics of `aten.ones_like(x)` and `aten.ones_like(x, device='cpu')` are the same. But I should have marked that PR as potentially BC-breaking for backends / consumers of that IR 😕.
Thank you.
Are we considering a change to make this a BC again (maybe for 2.1.1) or the new breaking behavior will stand as is? | 2023-10-20T19:49:44 |
pytorch/pytorch | 111,955 | pytorch__pytorch-111955 | [
"110680"
] | 3788d86e3e5185eda9416f4dd5e0e64db7d531cd | diff --git a/torch/_decomp/decompositions.py b/torch/_decomp/decompositions.py
--- a/torch/_decomp/decompositions.py
+++ b/torch/_decomp/decompositions.py
@@ -3582,9 +3582,9 @@ def matmul(tensor1, tensor2):
and dim_tensor2 == 3
and batch_tensor1[0] != batch_tensor2[0]
):
- if batch_tensor1[0] == 1 and tensor1.requires_grad():
+ if batch_tensor1[0] == 1 and tensor1.requires_grad:
return matmul(tensor1.squeeze(0), tensor2)
- if batch_tensor2[0] == 1 and tensor2.requires_grad():
+ if batch_tensor2[0] == 1 and tensor2.requires_grad:
return matmul(tensor1, tensor2.squeeze(0))
# expand the batch portion (i.e. cut off matrix dimensions and expand rest)
| diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -6137,7 +6137,9 @@ def sample_inputs_matmul(op_info, device, dtype, requires_grad, is_rmatmul=False
((0, 0), (S, 0, 0)),
((S, S, M, M), (S, S, M, S)),
((S, S, M, M), (M,)),
- ((M,), (S, S, M, S)))
+ ((M,), (S, S, M, S)),
+ ((S, S, S), (1, S, S))
+ )
for lhs_shape, rhs_shape in test_cases:
lhs = make_arg(lhs_shape)
rhs = make_arg(rhs_shape)
| failling to compile function with matmul
### 🐛 Describe the bug
When updating the pytorch on kornia [we found an issue within @/matmul when trying to compile the center crop 3d function](https://github.com/kornia/kornia/pull/2605/files#r1348152549)
`torch._dynamo.exc.TorchRuntimeError: Failed running call_function <built-in function matmul>(*(FakeTensor(..., size=(4, 4, 4)), FakeTensor(..., size=(1, 4, 4))), **{}):`
- The CI error trace: https://github.com/kornia/kornia/pull/2605/checks#step:5:21835
- The falling line on end of the trace: https://github.com/kornia/kornia/blob/5ee78d20e127e69d2519431090f696377c310f0d/kornia/geometry/conversions.py#L1149
The compile is working before for it, but I wasn't able to reduce it to a minimum reproducible example. I also didn't found related issues.
### Error logs
```output
kornia/geometry/transform/crop3d.py:223: in center_crop3d
return crop_by_boxes3d(
kornia/geometry/transform/crop3d.py:301: in crop_by_boxes3d
validate_bbox3d(src_box)
kornia/geometry/transform/crop3d.py:302: in <resume in crop_by_boxes3d>
validate_bbox3d(dst_box)
kornia/geometry/transform/crop3d.py:313: in <resume in crop_by_boxes3d>
bbox = infer_bbox_shape3d(dst_box)
kornia/geometry/transform/crop3d.py:313: in <resume in crop_by_boxes3d>
bbox = infer_bbox_shape3d(dst_box)
kornia/geometry/transform/crop3d.py:314: in <resume in crop_by_boxes3d>
if not ((bbox[0] == bbox[0][0]).all() and (bbox[1] == bbox[1][0]).all() and (bbox[2] == bbox[2][0]).all()):
kornia/geometry/transform/crop3d.py:314: in <resume in crop_by_boxes3d>
if not ((bbox[0] == bbox[0][0]).all() and (bbox[1] == bbox[1][0]).all() and (bbox[2] == bbox[2][0]).all()):
kornia/geometry/transform/crop3d.py:323: in <resume in crop_by_boxes3d>
(int(bbox[0][0].item()), int(bbox[1][0].item()), int(bbox[2][0].item())),
kornia/geometry/transform/crop3d.py:323: in <resume in crop_by_boxes3d>
(int(bbox[0][0].item()), int(bbox[1][0].item()), int(bbox[2][0].item())),
kornia/geometry/transform/crop3d.py:323: in <resume in crop_by_boxes3d>
(int(bbox[0][0].item()), int(bbox[1][0].item()), int(bbox[2][0].item())),
venv/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py:490: in catch_errors
return callback(frame, cache_entry, hooks, frame_state)
venv/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py:641: in _convert_frame
result = inner_convert(frame, cache_size, hooks, frame_state)
venv/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py:133: in _fn
return fn(*args, **kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py:389: in _convert_frame_assert
return _compile(
venv/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py:569: in _compile
guarded_code = compile_inner(code, one_graph, hooks, transform)
venv/lib/python3.10/site-packages/torch/_dynamo/utils.py:189: in time_wrapper
r = func(*args, **kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py:491: in compile_inner
out_code = transform_code_object(code, transform)
venv/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py:1028: in transform_code_object
transformations(instructions, code_options)
venv/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py:458: in transform
tracer.run()
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:2074: in run
super().run()
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:724: in run
and self.step()
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:688: in step
getattr(self, inst.opname)(inst)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:392: in wrapper
return inner_fn(self, inst)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:1167: in CALL_FUNCTION_KW
self.call_function(fn, args, kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:562: in call_function
self.push(fn.call_function(self, args, kwargs))
venv/lib/python3.10/site-packages/torch/_dynamo/variables/functions.py:261: in call_function
return super().call_function(tx, args, kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/variables/functions.py:90: in call_function
return tx.inline_user_function_return(
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:598: in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:2179: in inline_call
return cls.inline_call_(parent, func, args, kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:2286: in inline_call_
tracer.run()
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:724: in run
and self.step()
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:688: in step
getattr(self, inst.opname)(inst)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:392: in wrapper
return inner_fn(self, inst)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:1167: in CALL_FUNCTION_KW
self.call_function(fn, args, kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:562: in call_function
self.push(fn.call_function(self, args, kwargs))
venv/lib/python3.10/site-packages/torch/_dynamo/variables/functions.py:261: in call_function
return super().call_function(tx, args, kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/variables/functions.py:90: in call_function
return tx.inline_user_function_return(
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:598: in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:2179: in inline_call
return cls.inline_call_(parent, func, args, kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:2286: in inline_call_
tracer.run()
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:724: in run
and self.step()
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:688: in step
getattr(self, inst.opname)(inst)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:392: in wrapper
return inner_fn(self, inst)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:1115: in CALL_FUNCTION
self.call_function(fn, args, {})
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:562: in call_function
self.push(fn.call_function(self, args, kwargs))
venv/lib/python3.10/site-packages/torch/_dynamo/variables/functions.py:261: in call_function
return super().call_function(tx, args, kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/variables/functions.py:90: in call_function
return tx.inline_user_function_return(
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:598: in inline_user_function_return
result = InliningInstructionTranslator.inline_call(self, fn, args, kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:2179: in inline_call
return cls.inline_call_(parent, func, args, kwargs)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:2286: in inline_call_
tracer.run()
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:724: in run
and self.step()
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:688: in step
getattr(self, inst.opname)(inst)
venv/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py:168: in impl
self.push(fn_var.call_function(self, self.popn(nargs), {}))
venv/lib/python3.10/site-packages/torch/_dynamo/variables/builtin.py:570: in call_function
return wrap_fx_proxy(tx, proxy, **options)
venv/lib/python3.10/site-packages/torch/_dynamo/variables/builder.py:1187: in wrap_fx_proxy
return wrap_fx_proxy_cls(
venv/lib/python3.10/site-packages/torch/_dynamo/variables/builder.py:1274: in wrap_fx_proxy_cls
example_value = get_fake_value(proxy.node, tx)
venv/lib/python3.10/site-packages/torch/_dynamo/utils.py:1376: in get_fake_value
raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None
venv/lib/python3.10/site-packages/torch/_dynamo/utils.py:1337: in get_fake_value
return wrap_fake_exception(
venv/lib/python3.10/site-packages/torch/_dynamo/utils.py:916: in wrap_fake_exception
return fn()
venv/lib/python3.10/site-packages/torch/_dynamo/utils.py:1338: in <lambda>
lambda: run_node(tx.output, node, args, kwargs, nnmodule)
venv/lib/python3.10/site-packages/torch/_dynamo/utils.py:1410: in run_node
raise RuntimeError(fn_str + str(e)).with_traceback(e.__traceback__) from e
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tracer = <torch._dynamo.output_graph.OutputGraph object at 0x7fa8a002bc10>
node = matmul
args = (FakeTensor(..., size=(4, 4, 4)), FakeTensor(..., size=(1, 4, 4)))
kwargs = {}, nnmodule = None
def run_node(tracer, node, args, kwargs, nnmodule):
"""
Runs a given node, with the given args and kwargs.
Behavior is dicatated by a node's op.
run_node is useful for extracting real values out of nodes.
See get_real_value for more info on common usage.
Note: The tracer arg is only used for 'get_attr' ops
Note: The nnmodule arg is only used for 'call_module' ops
Nodes that are not call_function, call_method, call_module, or get_attr will
raise an AssertionError.
"""
op = node.op
try:
if op == "call_function":
> return node.target(*args, **kwargs)
E torch._dynamo.exc.TorchRuntimeError: Failed running call_function <built-in function matmul>(*(FakeTensor(..., size=(4, 4, 4)), FakeTensor(..., size=(1, 4, 4))), **{}):
E unsupported operand type(s) for @: 'FakeTensor' and 'FakeTensor'
E
E from user code:
E File "/tmp/kornia/kornia/geometry/transform/crop3d.py", line 320, in <resume in crop_by_boxes3d>
E patches: torch.Tensor = crop_by_transform_mat3d(
E File "/tmp/kornia/kornia/geometry/transform/crop3d.py", line 357, in crop_by_transform_mat3d
E patches: torch.Tensor = warp_affine3d(
E File "/tmp/kornia/kornia/geometry/transform/imgwarp.py", line 867, in warp_affine3d
E dst_norm_trans_src_norm: Tensor = normalize_homography3d(M_4x4, size_src, size_out) # Bx4x4
E File "/tmp/kornia/kornia/geometry/conversions.py", line 1149, in normalize_homography3d
E dst_norm_trans_src_norm: Tensor = dst_norm_trans_dst_pix @ (dst_pix_trans_src_pix @ src_pix_trans_src_norm)
```
### Minified repro
_No response_
### Versions
python 3.10.13
torch 2.1.0 (git version: 7bcf7da3a268b435777fe87c7794c382f444e86d)
ubuntu 22.04
cc @ezyang @gchanan @zou3519 @kadeng @msaroufim @wconstab @bdhirsh @anijain2305
| 🤔
How did our tests not catch this? This is a pretty blatant error. I guess our op infos are just missing cases?
https://github.com/pytorch/pytorch/blob/main/torch/_decomp/decompositions.py#L3781
```
@torch.compile
def f(a, b):
return torch.matmul(a, b)
f(torch.randn(4, 4, 4), torch.randn(1, 4, 4))
```
cc: @ezyang @lezcano @albanD | 2023-10-24T20:09:48 |
pytorch/pytorch | 112,792 | pytorch__pytorch-112792 | [
"110832"
] | 8a178f153ef5dc6693069ee8cd2ff5ce5992220d | diff --git a/torch/_meta_registrations.py b/torch/_meta_registrations.py
--- a/torch/_meta_registrations.py
+++ b/torch/_meta_registrations.py
@@ -4961,6 +4961,7 @@ def meta__scaled_dot_product_efficient_backward(
num_heads = query.size(1)
max_q = query.size(2)
head_dim = query.size(3)
+ head_dim_v = value.size(3)
max_k = key.size(2)
@@ -4977,7 +4978,7 @@ def meta__scaled_dot_product_efficient_backward(
device=key.device,
)
grad_v = torch.empty_permuted(
- (batch_size, num_heads, max_k, head_dim),
+ (batch_size, num_heads, max_k, head_dim_v),
(0, 2, 1, 3),
dtype=value.dtype,
device=value.device,
diff --git a/torch/_subclasses/fake_utils.py b/torch/_subclasses/fake_utils.py
--- a/torch/_subclasses/fake_utils.py
+++ b/torch/_subclasses/fake_utils.py
@@ -146,6 +146,18 @@ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
r_out, fake_out, check_strides=self.check_strides
)
except Exception as e:
+ if (
+ func is aten._scaled_dot_product_flash_attention.default
+ and idx in (6, 7)
+ and "Devices" in repr(e)
+ ):
+ continue
+ if (
+ func is aten._scaled_dot_product_efficient_attention.default
+ and idx in (2, 3)
+ and "Devices" in repr(e)
+ ):
+ continue
error_message = (
f"{context} mismatched tensor metadata: {e}"
if len(r_flat) == 1
| diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -27,7 +27,7 @@
toleranceOverride, tol)
from torch.testing._internal.common_cuda import (
SM53OrLater, SM60OrLater, SM80OrLater, with_tf32_off, TEST_CUDNN,
- _get_torch_cuda_version, _get_torch_rocm_version, PLATFORM_SUPPORTS_FUSED_SDPA
+ _get_torch_cuda_version, _get_torch_rocm_version,
)
from torch.testing._internal.common_utils import (
make_fullrank_matrices_with_distinct_singular_values,
@@ -13499,11 +13499,6 @@ def reference_flatten(input, start_dim=0, end_dim=-1):
device_type='cpu'),
# OpInfo was implemented with a lambda
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
- # See [Note] SDPA returns Philox Offset and Seed as tensors that will live on CPU when not in cuda graph capture
- DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_amp',
- device_type='cuda', dtypes=(torch.float32,), active_if=PLATFORM_SUPPORTS_FUSED_SDPA),
- DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_no_amp',
- device_type='cuda', dtypes=(torch.float32,), active_if=PLATFORM_SUPPORTS_FUSED_SDPA),
# TODO Need to understand what this is testing and why it doesn't work
DecorateInfo(unittest.skip("Skipped"), 'TestDecomp', 'test_comprehensive'),
DecorateInfo(unittest.skip('output is non-deterministic (when dropout_p > 0)'), 'TestCommon', 'test_compare_cpu'),
| Backpropagation of `torch.compile`d module with scaled_dot_product_attention fails unless q, k, v shapes are the same
### 🐛 Describe the bug
When calling `torch.compile` on a module that uses `nn.functional.scaled_dot_product_attention(q, k, v, ...)`, backward passes do not work unless `q`, `k`, and `v` all have the same shape. This error only happens if CUDA is used; CPU appears to be fine.
```python
import torch
from torch import nn
class BadModule(nn.Module):
def __init__(self):
super().__init__()
self.l = nn.Linear(8, 16, bias=False)
def forward(self, x):
return torch.nn.functional.scaled_dot_product_attention(x, x, self.l(x), is_causal=True)
m = torch.compile(BadModule().to('cuda'))
x = torch.rand(1, 2, 4, 8).to('cuda') # (batch, num_heads, seq_len, embed_dim)
y = m(x) # forward pass
z = y.mean().backward() # fake backward pass
```
resulting error:
```
torch._dynamo.exc.BackendCompilerFailed: backend='inductor' raised:
RuntimeError: Function ScaledDotProductEfficientAttentionBackward0 returned an invalid gradient at index 2 - got [1, 2, 4, 8] but expected shape compatible with [1, 2, 4, 16]
```
It works fine if the call to `torch.compile(...)` is removed or on the CPU backend.
### Versions
```
PyTorch version: 2.1.0+cu118 Is debug build: False
CUDA used to build PyTorch: 11.8
ROCM used to build PyTorch: N/A
OS: Arch Linux (x86_64)
GCC version: (GCC) 13.1.1 20230429
Clang version: 15.0.7
CMake version: version 3.26.3
Libc version: glibc-2.37
Python version: 3.11.6 | packaged by conda-forge | (main, Oct 3 2023, 10:40:35) [GCC 12.3.0] (64-bit runtime)
Python platform: Linux-5.15.90.1-microsoft-standard-WSL2-x86_64-with-glibc2.37
Is CUDA available: True
CUDA runtime version: 11.8.89
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: GPU 0: NVIDIA GeForce RTX 3080 Laptop GPU
Nvidia driver version: 516.94
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 48 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 16
On-line CPU(s) list: 0-15
Vendor ID: AuthenticAMD
Model name: AMD Ryzen 9 5900HX with Radeon Graphics
CPU family: 25
Model: 80
Thread(s) per core: 2
Core(s) per socket: 8
Socket(s): 1
Stepping: 0
BogoMIPS: 6587.45
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl tsc_reliable nonstop_tsc cpuid extd_apicid pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy svm cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext perfctr_core ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves clzero xsaveerptr arat npt nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold v_vmsave_vmload umip vaes vpclmulqdq rdpid fsrm
Virtualization: AMD-V
Hypervisor vendor: Microsoft
Virtualization type: full
L1d cache: 256 KiB (8 instances)
L1i cache: 256 KiB (8 instances)
L2 cache: 4 MiB (8 instances)
L3 cache: 16 MiB (1 instance)
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Retbleed: Not affected
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Retpolines, IBPB conditional, IBRS_FW, STIBP conditional, RSB filling, PBRSB-eIBRS Not affected
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] numpy==1.24.1
[pip3] torch==2.1.0+cu118
[pip3] torchaudio==2.1.0+cu118
[pip3] torchvision==0.16.0+cu118
[pip3] triton==2.1.0
[conda] numpy 1.24.1 pypi_0 pypi
[conda] torch 2.1.0+cu118 pypi_0 pypi
[conda] torchaudio 2.1.0+cu118 pypi_0 pypi
[conda] torchvision 0.16.0+cu118 pypi_0 pypi
[conda] triton 2.1.0 pypi_0 pypi
```
cc @ezyang @albanD @zou3519 @gqchen @pearu @nikitaved @soulitzer @Lezcano @Varal7 @jbschlosser @bhosmer @cpuhrsch @erichan1 @drisspg @mikaylagawarecki @msaroufim @bdhirsh @anijain2305 @chauhang @wconstab
| 2023-11-02T21:53:17 |
|
pytorch/pytorch | 114,309 | pytorch__pytorch-114309 | [
"112997"
] | 193f87857ea61c9b529d2c46dd3da0e089ec38bd | diff --git a/tools/amd_build/build_amd.py b/tools/amd_build/build_amd.py
--- a/tools/amd_build/build_amd.py
+++ b/tools/amd_build/build_amd.py
@@ -90,7 +90,14 @@
"aten/src/ATen/native/nested/cuda/*",
"aten/src/ATen/native/sparse/cuda/*",
"aten/src/ATen/native/quantized/cuda/*",
- "aten/src/ATen/native/transformers/cuda/*",
+ "aten/src/ATen/native/transformers/cuda/attention_backward.cu",
+ "aten/src/ATen/native/transformers/cuda/attention.cu",
+ "aten/src/ATen/native/transformers/cuda/sdp_utils.cpp",
+ "aten/src/ATen/native/transformers/cuda/sdp_utils.h",
+ "aten/src/ATen/native/transformers/cuda/mem_eff_attention/debug_utils.h",
+ "aten/src/ATen/native/transformers/cuda/mem_eff_attention/gemm_kernel_utils.h",
+ "aten/src/ATen/native/transformers/cuda/mem_eff_attention/pytorch_utils.h",
+ "aten/src/ATen/native/transformers/cuda/flash_attn/flash_api.h",
"aten/src/THC/*",
"aten/src/ATen/test/*",
# CMakeLists.txt isn't processed by default, but there are a few
diff --git a/torch/utils/hipify/cuda_to_hip_mappings.py b/torch/utils/hipify/cuda_to_hip_mappings.py
--- a/torch/utils/hipify/cuda_to_hip_mappings.py
+++ b/torch/utils/hipify/cuda_to_hip_mappings.py
@@ -8572,6 +8572,8 @@
C10_MAPPINGS = collections.OrderedDict(
[
("CUDA_VERSION", ("TORCH_HIP_VERSION", API_PYTORCH)),
+ ("CUDA_LAUNCH_BLOCKING=1", ("AMD_SERIALIZE_KERNEL=3", API_C10)),
+ ("CUDA_LAUNCH_BLOCKING", ("AMD_SERIALIZE_KERNEL", API_C10)),
("cuda::compat::", ("hip::compat::", API_C10)),
("c10/cuda/CUDAAlgorithm.h", ("c10/hip/HIPAlgorithm.h", API_C10)),
("c10/cuda/CUDADeviceAssertion.h", ("c10/hip/HIPDeviceAssertion.h", API_C10)),
| diff --git a/test/test_transformers.py b/test/test_transformers.py
--- a/test/test_transformers.py
+++ b/test/test_transformers.py
@@ -20,6 +20,8 @@
from typing import List, Tuple, Optional
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
+ TEST_WITH_ROCM,
+ skipIfRocm,
TEST_FAIRSEQ,
run_tests,
parametrize,
@@ -117,6 +119,18 @@ def query_key_value_clones(query: torch.Tensor, key: torch.Tensor, value: torch.
value_ref = value.clone().detach().to(dtype).requires_grad_(value.requires_grad)
return query_ref, key_ref, value_ref
+def get_platform_specific_sdpa():
+ ret = []
+ if PLATFORM_SUPPORTS_FLASH_ATTENTION:
+ ret.append(SDPBackend.FLASH_ATTENTION)
+ if PLATFORM_SUPPORTS_MEM_EFF_ATTENTION:
+ ret.append(SDPBackend.EFFICIENT_ATTENTION)
+ if not ret:
+ # Add a placeholder, an empty list causes "An empty arg_values was passed to @parametrize"
+ ret.append(SDPBackend.EFFICIENT_ATTENTION)
+ return ret
+
+PLATFORM_SPECIFIC_SDPA = get_platform_specific_sdpa()
def rand_sdpa_tensor(shape: SdpaShape, device: str, dtype: torch.dtype, type: str,
requires_grad: bool = False, packed: bool = False) -> torch.Tensor:
@@ -1212,6 +1226,7 @@ def ones_tensor(*shape):
_ = mha_f(qkv_f, qkv_f, qkv_f, attn_mask=mask, need_weights=False, is_causal=True)
torch.cuda.synchronize()
+ @skipIfRocm # Missing EFFICIENT_ATTENTION
@unittest.skipIf(
not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Platform does not supposrt fused SDPA or pre-SM80 hardware"
)
@@ -1277,9 +1292,7 @@ def test_dispatch_fails_no_backend(self, device):
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Does not support fused scaled dot product attention")
@parametrize(
"kernel",
- [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION]
- if PLATFORM_SUPPORTS_FLASH_ATTENTION
- else [SDPBackend.EFFICIENT_ATTENTION],
+ PLATFORM_SPECIFIC_SDPA,
)
def test_invalid_fused_inputs_dim_3(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
@@ -1297,9 +1310,7 @@ def test_invalid_fused_inputs_dim_3(self, device, kernel: SDPBackend):
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Does not support fused scaled dot product attention")
@parametrize(
"kernel",
- [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION]
- if PLATFORM_SUPPORTS_FLASH_ATTENTION
- else [SDPBackend.EFFICIENT_ATTENTION],
+ PLATFORM_SPECIFIC_SDPA,
)
def test_invalid_fused_inputs_broadcast(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
@@ -1315,8 +1326,7 @@ def test_invalid_fused_inputs_broadcast(self, device, kernel: SDPBackend):
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Does not support fused scaled dot product attention")
- @parametrize("kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION] if
- PLATFORM_SUPPORTS_FLASH_ATTENTION else [SDPBackend.EFFICIENT_ATTENTION])
+ @parametrize("kernel", PLATFORM_SPECIFIC_SDPA)
def test_invalid_sequence_lengths(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
# Passing in a q,k,v with 0 length sequences will error
@@ -1330,8 +1340,7 @@ def test_invalid_sequence_lengths(self, device, kernel: SDPBackend):
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Does not support fused scaled dot product attention")
- @parametrize("kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION] if
- PLATFORM_SUPPORTS_FLASH_ATTENTION else [SDPBackend.EFFICIENT_ATTENTION])
+ @parametrize("kernel", PLATFORM_SPECIFIC_SDPA)
def test_invalid_last_dim_stride(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
# Passing in a q,k,v with 0 length sequences will error
@@ -1361,9 +1370,7 @@ def test_invalid_fused_inputs_head_dim(self, device, kernel: SDPBackend):
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Does not support fused scaled dot product attention")
@parametrize(
"kernel",
- [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION]
- if PLATFORM_SUPPORTS_FLASH_ATTENTION
- else [SDPBackend.EFFICIENT_ATTENTION],
+ PLATFORM_SPECIFIC_SDPA,
)
def test_invalid_fused_inputs_invalid_dtype(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
@@ -1436,6 +1443,7 @@ def test_flash_autocast_fp32_bfloat16(self, device):
_ = torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, False)
+ # Note: do not truncate the list according to platforms. These tests should always raise errors.
@parametrize("kernel", [SDPBackend.MATH, SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION])
def test_invalid_inputs_different_datatypes(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
@@ -1467,7 +1475,8 @@ def test_invalid_inputs_1_dimensional_inputs(self, device, kernel: SDPBackend):
self.assertRaises(RuntimeError, lambda: F.scaled_dot_product_attention(query, key, value))
@onlyCUDA
- @unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
+ @skipIfRocm # Missing EFFICIENT_ATTENTION
+ @unittest.skipIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Fused SDPA was not built for this system")
def test_fused_kernels_nested_broadcasting_error_cases(self, device):
# one of k,v needs to be broadcasted and other has non consistent seq_len dim
rand_nested_tensor = partial(rand_sdpa_tensor, type="nested", device=device, dtype=torch.float32)
@@ -1788,6 +1797,9 @@ def convert_flash_attn_S_to_softmax(self, S, query_padding_mask, key_padding_mas
query_padding_mask: (batch_size, seqlen_q)
key_padding_mask: (batch_size, seqlen_k)
"""
+ if TEST_WITH_ROCM:
+ return S
+
b, h, seqlen_q, seqlen_k = S.shape
warps_n = 4
blocksize_m, blocksize_n = _get_block_size(S.device, head_dim, causal)
@@ -1954,6 +1966,7 @@ def test_scaled_dot_product_attention_fused_kernels_packed(self, device, type: s
self.assertEqual(actual.contiguous(), math_ref.contiguous(), atol=2e-3, rtol=1e-2)
+ @skipIfRocm # Missing nested and EFFICIENT_ATTENTION
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("type", ["dense", "nested"])
@parametrize("fused_kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION] if
@@ -2066,6 +2079,7 @@ def test_sdp_mem_efficient_grad_against_math(self, device, contiguous_inputs: bo
# Cast up and compare
self.assertEqual(qkv.grad, qkv_lp.grad.to(torch.float64), atol=1e-5, rtol=1e-5)
+ @skipIfRocm # Small matrices
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Flash Attention was not built for this system")
@parametrize("contiguous_inputs", [True, False])
@parametrize("is_causal", [True, False])
@@ -2118,6 +2132,7 @@ def test_sdp_flash_attention_grad_against_math(self, device, contiguous_inputs:
rtol = 7e-4 if dtype == torch.float16 else 7e-3
self.assertEqual(qkv.grad, qkv_lp.grad.to(torch.float64), atol=atol, rtol=rtol)
+ @skipIfRocm # Missing nested and EFFICIENT_ATTENTION
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Platform does not support fused SDPA")
@parametrize("type", ["dense", "nested"])
def test_fused_sdp_choice(self, device, type: str):
@@ -2464,6 +2479,15 @@ def _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len, p, seed, offset,
def test_flash_attention_vs_math_ref_grads(self, device, batch_size: int, seq_len_q: int, seq_len_k: int,
head_dim: int, is_causal: bool, dropout_p: float, dtype: torch.dtype,
scale: str):
+ if TEST_WITH_ROCM:
+ def is_power_of_2(n):
+ return n & (n - 1) == 0
+ if not is_power_of_2(seq_len_q) or not is_power_of_2(seq_len_k) or not is_power_of_2(head_dim):
+ self.skipTest("Flash attention on ROCM only supports power of two seq_len_q seq_len_k headdim, for now.")
+ if head_dim < 16 or seq_len_q < 16 or seq_len_k < 16:
+ self.skipTest("Flash attention on ROCM only supports power of two seq_len_q, seq_len_k, headdim >= 16, for now.")
+ if head_dim > 128:
+ self.skipTest("Flash attention on ROCM only supports power of two headdim <= 128, for now.")
if isSM86or89Device and head_dim in range(193, 256 + 1):
self.skipTest("Flash attention on sm86 and sm89 for headdim > 192 currently disabled")
@@ -2540,7 +2564,7 @@ def test_flash_attention_vs_math_ref_grads(self, device, batch_size: int, seq_le
out_lp_ref.backward(upstream_grad.to(out_lp_ref.dtype))
# See [Note] Fused Tolerances above
- output_fudge_factor = 3 if head_dim % 8 != 0 else 1
+ output_fudge_factor = 3 if head_dim % 8 != 0 or TEST_WITH_ROCM else 1
output_ref_atol, output_ref_rtol = get_tolerances(out_ref, out_lp_ref, output_fudge_factor)
# TODO: Investigate why grad_q needs larger tolerances
@@ -2559,6 +2583,7 @@ def test_flash_attention_vs_math_ref_grads(self, device, batch_size: int, seq_le
self.assertEqual(value.grad, value_ref.grad.to(value.grad.dtype),
atol=grad_v_ref_atol, rtol=grad_v_ref_rtol)
+ @skipIfRocm # FIXME: "capturing stream has unjoined work"
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Does not support SDPA or pre-SM80 hardware")
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [256, 512, 1024])
@@ -2568,7 +2593,7 @@ def test_flash_attention_vs_math_ref_grads(self, device, batch_size: int, seq_le
@parametrize("dropout_p", [0.0, 0.22])
@parametrize("dtype", [torch.float16,])
@parametrize("scale", [None, "l1"])
- @parametrize("fused_kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION])
+ @parametrize("fused_kernel", PLATFORM_SPECIFIC_SDPA)
def test_fused_attention_vs_math_ref_grads_cudagraph(self, device, batch_size: int, seq_len_q: int, seq_len_k: int,
head_dim: int,
is_causal: bool,
@@ -2721,6 +2746,7 @@ def get_dropout_mask(output, fused_kernel, batch_size, n_heads, q_len, kv_len, d
self.assertEqual(value.grad, value_ref.grad.to(value.grad.dtype),
atol=grad_v_ref_atol, rtol=grad_v_ref_rtol)
+ @skipIfRocm # Nested Tensor
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("fused_kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION] if
PLATFORM_SUPPORTS_FLASH_ATTENTION else [SDPBackend.EFFICIENT_ATTENTION])
@@ -2755,6 +2781,7 @@ def test_fused_kernels_seq_len_1_inputs(self, device, fused_kernel):
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(torch.float16), atol=1e-3, rtol=1e-2)
+ @skipIfRocm # Nested tensor
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_ATTENTION, "Fused SDPA was not built for this system")
@parametrize("kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION] if
PLATFORM_SUPPORTS_FLASH_ATTENTION else [SDPBackend.EFFICIENT_ATTENTION])
@@ -2878,6 +2905,7 @@ def test_fused_kernels_nested_broadcasting_query_dense(self, device):
self.assertEqual(actual.contiguous(), math_ref.contiguous(), atol=1e-3, rtol=1e-2)
@onlyCUDA
+ @skipIfRocm # Nested tensor
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Does not support SDPA or pre-SM80 hardware")
@parametrize("batch_size", [8, 32])
@parametrize("max_seq_len_q", [32, 256])
@@ -3036,6 +3064,7 @@ def run_test(self, device, compile, make_q, make_kv, attn_bias=None,
torch.testing.assert_close(key.grad, key_prototype.grad, rtol=grad_tolerances.rtol, atol=grad_tolerances.atol)
torch.testing.assert_close(value.grad, value_prototype.grad, rtol=grad_tolerances.rtol, atol=grad_tolerances.atol)
+ @skipIfRocm # No support for the second variant for now
@parametrize("causal_variant", [CausalVariant.UPPER_LEFT, CausalVariant.LOWER_RIGHT])
@parametrize(
"shape",
@@ -3064,6 +3093,7 @@ def test_causal_variants(self, device, causal_variant: CausalVariant, shape: Lis
self.run_test(device, False, make_q_tensor, make_kv_tensor, attn_bias, forw_tol, grad_tol)
+ @skipIfRocm # No support for the second variant for now
@parametrize("causal_variant", [CausalVariant.UPPER_LEFT, CausalVariant.LOWER_RIGHT])
@parametrize(
"shape",
diff --git a/torch/testing/_internal/common_cuda.py b/torch/testing/_internal/common_cuda.py
--- a/torch/testing/_internal/common_cuda.py
+++ b/torch/testing/_internal/common_cuda.py
@@ -6,6 +6,7 @@
from torch.testing._internal.common_utils import LazyVal, TEST_NUMBA, TEST_WITH_ROCM, TEST_CUDA, IS_WINDOWS
import inspect
import contextlib
+import os
CUDA_ALREADY_INITIALIZED_ON_IMPORT = torch.cuda.is_initialized()
@@ -28,7 +29,23 @@
SM80OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0))
SM90OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (9, 0))
-PLATFORM_SUPPORTS_FLASH_ATTENTION: bool = LazyVal(lambda: TEST_CUDA and (not TEST_WITH_ROCM) and (not IS_WINDOWS) and SM80OrLater)
+def evaluate_gfx90a_exact():
+ if not torch.cuda.is_available():
+ return False
+ gcn_arch_name = torch.cuda.get_device_properties('cuda').gcnArchName
+ arch = os.environ.get('PYTORCH_DEBUG_FLASH_ATTENTION_GCN_ARCH_OVERRIDE', gcn_arch_name)
+ return arch == 'gfx90a:sramecc+:xnack-'
+
+GFX90A_Exact = LazyVal(lambda: evaluate_gfx90a_exact())
+
+def evaluate_platform_supports_flash_attention():
+ if TEST_WITH_ROCM:
+ return evaluate_gfx90a_exact()
+ if TEST_CUDA:
+ return not IS_WINDOWS and SM80OrLater
+ return False
+
+PLATFORM_SUPPORTS_FLASH_ATTENTION: bool = LazyVal(lambda: evaluate_platform_supports_flash_attention())
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION: bool = LazyVal(lambda: TEST_CUDA and not TEST_WITH_ROCM)
# This condition always evaluates to PLATFORM_SUPPORTS_MEM_EFF_ATTENTION but for logical clarity we keep it separate
PLATFORM_SUPPORTS_FUSED_ATTENTION: bool = LazyVal(lambda: PLATFORM_SUPPORTS_FLASH_ATTENTION or PLATFORM_SUPPORTS_MEM_EFF_ATTENTION)
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -14261,6 +14261,15 @@ def reference_flatten(input, start_dim=0, end_dim=-1):
device_type='cpu'),
DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace',
device_type='cpu'),
+ # TODO: Do not work even on MI200 because of stride mismatching.
+ DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace',
+ device_type='cuda', dtypes=[torch.float16, torch.bfloat16],
+ active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace',
+ device_type='cuda', dtypes=[torch.float16, torch.bfloat16],
+ active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_crossref_backward_amp',
+ device_type='cuda', active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION),
# When changing input from Tensor to CompositeCompliantTensor, input.requires_grad() changes from true to false
DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward',
device_type='cpu'),
| Add support for Flash Attention for AMD/ROCm
### 🚀 The feature, motivation and pitch
Enable support for Flash Attention Memory Efficient and SDPA kernels for AMD GPUs.
At present using these gives below warning with latest nightlies (torch==2.2.0.dev20231105+rocm5.6, pytorch-triton-rocm==2.1.0+34f8189eae):
> model.py:187: UserWarning: 1Torch was not compiled with flash attention. (Triggered internally at ../aten/src/ATen/native/transformers/hip/sdp_utils.cpp:253.)
y = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0)
model.py:187: UserWarning: 1Torch was not compiled with memory efficient attention. (Triggered internally at ../aten/src/ATen/native/transformers/hip/sdp_utils.cpp:291.)
y = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0)
ROCm already has an implementation of Tri's FA here: https://github.com/ROCmSoftwarePlatform/flash-attention/tree/flash_attention_for_rocm2#amd-gpurocm-support
### Alternatives
User's have to manually install the ROCm version of FA and use that in their code, vs using the native PyTorch APIs.
### Additional context
The ROCM build currently has the FA related flags turned off by default: https://github.com/pytorch/pytorch/blob/main/CMakeLists.txt#L741-L750
cc @jeffdaily @sunway513 @jithunnair-amd @pruthvistony @ROCmSupport @dllehr-amd @jataylo @hongxiayang
| 2023-11-21T22:25:39 |
|
pytorch/pytorch | 115,753 | pytorch__pytorch-115753 | [
"114628"
] | 1518578b54bc6881fd7eaff9bc2489b39132c35a | diff --git a/torch/export/exported_program.py b/torch/export/exported_program.py
--- a/torch/export/exported_program.py
+++ b/torch/export/exported_program.py
@@ -466,14 +466,13 @@ def update_arg(old_arg, new_ph):
for inp_dim1, inp_dim2 in self.equality_constraints
]
- state_dict = self.state_dict.copy()
lift_constant_tensor_pass(gm, new_graph_signature)
_replace_sym_size_ops_pass(gm)
exported_program = ExportedProgram(
gm,
gm.graph,
new_graph_signature,
- state_dict,
+ self.state_dict,
new_range_constraints,
new_equality_constraints,
copy.deepcopy(self.module_call_graph),
| diff --git a/test/export/test_export.py b/test/export/test_export.py
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -1454,7 +1454,7 @@ def forward(self, x):
m = M()
with unittest.mock.patch("torch._export.DECOMP_TABLE", None):
ep = export(m, inp)
-
+ state_dict = ep.state_dict
FileCheck().check_count(
"torch.ops.aten.t.default", 1, exactly=True
@@ -1469,6 +1469,7 @@ def forward(self, x):
"torch.ops.aten.t.default", 0, exactly=True
).run(core_aten_ep.graph_module.code)
self.assertTrue(torch.allclose(core_aten_ep(*inp), m(*inp)))
+ self.assertEqual(id(state_dict), id(ep.state_dict))
def test_export_decomps_dynamic(self):
class M(torch.nn.Module):
| `run_decompositions` in `ExportedProgram` doesn't keep the same model state_dict
### 🐛 Describe the bug
After running `run_decompositions`, the exported program is not the same.
https://github.com/pytorch/pytorch/blob/e0d2a24967218d7c39e24f66bb6c4836c9d1d427/torch/export/exported_program.py#L351-L361
However, `the state_dict` is also updated.
https://github.com/pytorch/pytorch/blob/e0d2a24967218d7c39e24f66bb6c4836c9d1d427/torch/export/exported_program.py#L458-L472
Should decomposition happen in-place, instead of creating a whole new one?
### Versions
main branch
cc @avikchaudhuri @gmagogsfm @zhxchen17 @tugsbayasgalan @angelayi @suo @ydwu4 @thiagocrepaldi
| I think running decompositions should create a new exported program. What do you mean by the state dict is updated? Do you happen to have an example code?
> I think running decompositions should create a new exported program. What do you mean by the state dict is updated? Do you happen to have an example code?
I will write a full repro soon, but the issue is that we save the original `torch.export.ExportedProgram` during ONNX export (through `torch.onnx.dynamo_export`) for future use. This is convenient as we don't need to add `ExportedProgram` as input to our APIs for later use when we want to run the ONNX model. We can fetch all initializers and buffers through `ONNXProgram.exported_program.state_dict`, similar to how `ExportedProgram.__call__` does to feed all lifted params/buffers to the model.forward as input
During the ONNX export process, we call `ExportedProgram.run_decompositions` which returns a new `ExportedProgram` with a *copy of the original `state_dict`* (instead of an in-place-updated `state_dict`). The reference to the user `ExportedProgram` (and its `state_dict`) previously saved mismatches (the "pointer" to) the one that is used to convert the model to ONNX.
In the code below
```python
class CustomModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer(
"my_buffer", torch.tensor(4.0), persistent=persistent
)
def forward(self, x, b):
output = x + b
(
self.my_buffer.add_(1.0) + 3.0
) # Mutate buffer through in-place addition
return output
inputs = (torch.randn(3, 3), torch.randn(3, 3))
ep = torch.export.export(CustomModule(), args=inputs)
onnx_program = torch.onnx.dynamo_export(ep, *inputs)
```
ideally, any state_dict change made to `ep` would be immediately perceived by `onnx_program._exported_program` because we share the reference to that object. but as `run_decompositions` create a new `state_dict`, the `onnx_program._exported_program.state_dict` differs from what is returned by `run_decompositions`
Assuming `torch.export.ExportedProgram` will be used as based for other backends to lower to other IRs, having the guarantee we will always have the same instance of `ExportedProgram` (and its `state_dict`) can ease backends life when evaluating the state_dict of the new IR against the ExportedProgram. Makes sense?
Solution: After transformation on the ExportedProgram, we check if the state_dict has been updated and if not, we will just link the old state_dict. In this case, it shouldn't update the state_dict | 2023-12-13T17:30:18 |
pytorch/pytorch | 116,517 | pytorch__pytorch-116517 | [
"114591"
] | be254276d238c41911398e722354e01dc8b676d7 | diff --git a/torch/utils/tensorboard/__init__.py b/torch/utils/tensorboard/__init__.py
--- a/torch/utils/tensorboard/__init__.py
+++ b/torch/utils/tensorboard/__init__.py
@@ -1,5 +1,5 @@
import tensorboard
-from packaging.version import Version
+from torch._vendor.packaging.version import Version
if not hasattr(tensorboard, "__version__") or Version(
tensorboard.__version__
| Missing packaging dependency in torch 2.1.x
### 🐛 Describe the bug
Hi,
[torch.utils.tensorboard requires "packaging"](https://github.com/pytorch/pytorch/blob/fa1ccc34c4f65756bc50c3e3ab135c88b175b18c/torch/utils/tensorboard/__init__.py#L2C1-L3C1) to be installed but that dependency is [missing on torch 2.1.x](https://github.com/pytorch/pytorch/blob/v2.1.2-rc1/requirements.txt).
Here's some example code:
```python
from torch.utils.tensorboard import SummaryWriter
```
The links above point to a RC version of 2.1.2 but this is also the case for 2.1.1. Would it be possible to make a patch release to add the dependency?
### Versions
Python version: 3.9.16 (main, Dec 7 2022, 10:16:11) [Clang 14.0.0 (clang-140[0.0.29.202](http://0.0.29.202/)
)] (64-bit runtime)
Python platform: macOS-13.6.1-x86_64-i386-64bit
Is CUDA available: False
CUDA runtime version: No CUDA
CUDA_MODULE_LOADING set to: N/A
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Intel(R) Core(TM) i5-7287U CPU @ 3.30GHz
Versions of relevant libraries:
[pip3] numpy==1.23.5
[pip3] torch==2.1.1
[pip3] torchvision==0.16.1
[conda] Could not collect
| Worth mentioning that the change is already on main so should just be missing a release.
@sachahu1 is this a new dependency (that tensorboard have added) or was it always missing?
Please note, that in main this was already reverted in favor of just vendoring `packaging.Version` class, see https://github.com/pytorch/pytorch/pull/114108
To clarify: this does not sound like a regression i.e. imo does not clarify for cherry-pick, but should be fixed in trunk (if tensorboard is still maintained)
@malfet This looks like it's a torch dependency and is used to check the tensorboard package version. If `packaging` isn't going to be added to the torch dependencies then the code should probably be fixed to remove the missing dependency.
Does seem like using `packaging.Version` for this use-case might be overkill if it isn't used elsewhere? I think removing the dependency from the code might be the preferred option.
Here's the code I'm referring to:
```python
from packaging.version import Version
if not hasattr(tensorboard, "__version__") or Version(
tensorboard.__version__
) < Version("1.15"):
raise ImportError("TensorBoard logging requires TensorBoard version 1.15 or above")
del Version
```
link: [`torch.utils.tensorboard.__init__.py`](https://github.com/pytorch/pytorch/blob/bcfca41a2a8d80ca186e2550e4bca1a52e7873ff/torch/utils/tensorboard/__init__.py#L2-L9) | 2023-12-28T20:12:52 |
|
pytorch/pytorch | 119,315 | pytorch__pytorch-119315 | [
"118269"
] | 8ac9b20d4b090c213799e81acf48a55ea8d437d6 | diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py
--- a/torch/_torch_docs.py
+++ b/torch/_torch_docs.py
@@ -9217,8 +9217,22 @@ def merge_dicts(*dicts):
.. math::
\text{{out}}_{{i}} \sim \mathcal{{N}}(0, 1)
+For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
+unit variance as
+
+.. math::
+ \text{{out}}_{{i}} \sim \mathcal{{CN}}(0, 1)
+
+This is equivalent to separately sampling the real :math:`(\operatorname{{Re}})` and imaginary
+:math:`(\operatorname{{Im}})` part of :math:`\text{{out}}_i` as
+
+.. math::
+ \operatorname{{Re}}(\text{{out}}_{{i}}) \sim \mathcal{{N}}(0, \frac{{1}}{{2}}),\quad
+ \operatorname{{Im}}(\text{{out}}_{{i}}) \sim \mathcal{{N}}(0, \frac{{1}}{{2}})
+
The shape of the tensor is defined by the variable argument :attr:`size`.
+
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
@@ -9239,6 +9253,8 @@ def merge_dicts(*dicts):
>>> torch.randn(2, 3)
tensor([[ 1.5954, 2.8929, -1.0923],
[ 1.1719, -0.4709, -0.1996]])
+
+.. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
""".format(
**factory_common_args
),
@@ -9250,8 +9266,8 @@ def merge_dicts(*dicts):
randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` that is filled with
-random numbers from a normal distribution with mean 0 and variance 1.
-``torch.randn_like(input)`` is equivalent to
+random numbers from a normal distribution with mean 0 and variance 1. Please refer to :func:`torch.randn` for the
+sampling process of complex dtypes. ``torch.randn_like(input)`` is equivalent to
``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
| Incomplete documentation for `torch.randn` on complex dtypes
### 📚 The doc issue
https://pytorch.org/docs/stable/generated/torch.randn.html does not clearly specify behavior on complex data types `torch.complex64` and `torch.complex128`.
The current notation $\textrm{out}_{i} \sim \mathcal{N} (0, 1)$ may lead to users thinking that both the real and imaginary parts are sampled from a unit Gaussian, but in fact they are each i.i.d. sampled from $\mathcal{N} (0, 1/2)$, leading to the complex signal being (correctly, for a large class of applications) sampled from the complex random normal distribution.
The following basic snippet validates this:
```
import torch
x = torch.randn(1000, 1000, dtype=torch.complex64)
print('Complex signal power using native complex: ', torch.var(x))
print('Real part signal power using native complex: ', torch.var(torch.real(x)))
print('Imag part signal power using native complex: ', torch.var(torch.imag(x)))
```
It will produce:
```
Complex signal power using native complex: tensor(0.9998)
Real part signal power using native complex: tensor(0.5005)
Imag part signal power using native complex: tensor(0.4993)
```
The same holds for `torch.complex128`.
### Suggest a potential alternative/fix
The documentation for `torch.randn` could be updated to include something similar to the following clarifications:
- For complex `dtype`, the tensor is i.i.d. sampled from a complex normal distribution with zero mean and unit variance as: $\textrm{out}_{i} \sim \mathcal{CN} (0, 1) $
- $\mathcal{CN}$ is the standard notation for the complex normal distribution (https://en.wikipedia.org/wiki/Complex_normal_distribution).
cc @svekars @brycebortree @ezyang @anjali411 @dylanbespalko @mruberry @Lezcano @nikitaved
| Thanks for the well researched issue report. Do you think you could send us the doc PR? | 2024-02-06T20:49:10 |
|
pytorch/pytorch | 119,388 | pytorch__pytorch-119388 | [
"114345"
] | 8ac9b20d4b090c213799e81acf48a55ea8d437d6 | diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py
--- a/torch/_torch_docs.py
+++ b/torch/_torch_docs.py
@@ -4428,7 +4428,7 @@ def merge_dicts(*dicts):
Note that either of the following must be true:
1. :attr:`count` is a positive non-zero number, and the total number of bytes
-in the buffer is less than :attr:`offset` plus :attr:`count` times the size
+in the buffer is more than :attr:`offset` plus :attr:`count` times the size
(in bytes) of :attr:`dtype`.
2. :attr:`count` is negative, and the length (number of bytes) of the buffer
| Incorrect line in description of torch.frombuffer() method
### 📚 The doc issue
As per the documentation on `frombuffer` method of `torch`:
```
torch.frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) → [Tensor](https://pytorch.org/docs/stable/tensors.html#torch.Tensor)
```
As per the documentation, either of the following conditions should be true:
```
1. count is a positive non-zero number, and the total number of bytes in the buffer is less than offset plus count times the size (in bytes) of [dtype](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype).
2. count is negative, and the length (number of bytes) of the buffer subtracted by the offset is a multiple of the size (in bytes) of [dtype](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype).
```
In condition number 1, it seems that it is wrongly written that `total number of bytes in the buffer is less than offset plus count times the size (in bytes) of dtype`. This should be changed to say that `total number of bytes in the buffer is **more** than offset plus count times the size (in bytes) of dtype`, so that when we read the buffer, there is ample data to read taking into account the `(offset + count) * dtype`, and we don't accidentally end up reading memory slots dedicated to something else than the buffer.
### Suggest a potential alternative/fix
_No response_
cc @svekars @carljparker @albanD
| Thanks for the report, feel free to send a PR to fix the typo, thanks!
Hi, thanks for your reply. I would love to submit a PR. Please allow me some time though...Thanks
Hello, I see this issue has been inactive and the type is still present in the [documentation](https://pytorch.org/docs/stable/generated/torch.frombuffer.html).
@mg104 Are you still working or this, or can I fix the typo?
Hi @albanD. Since I've got no reply from the issue author, can I go ahead and submit a PR for this?
Sounds good!
| 2024-02-07T17:26:11 |
|
pytorch/pytorch | 119,804 | pytorch__pytorch-119804 | [
"118837"
] | b9814bc525110890f5f463b5eab4f50ab89d73f2 | diff --git a/torch/distributed/checkpoint/state_dict.py b/torch/distributed/checkpoint/state_dict.py
--- a/torch/distributed/checkpoint/state_dict.py
+++ b/torch/distributed/checkpoint/state_dict.py
@@ -678,25 +678,25 @@ def get_state_dict(
optimizer parameter IDs to the canonical FQNs.
Example:
+ >>> # xdoctest: +SKIP
+ >>> import torch
+ >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
+ >>> from torch.nn.parallel import DistributedDataParallel as DDP
+ >>> from torch.distributed.checkpoint.state_dict import get_state_dict
- import torch
- from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
- from torch.nn.parallel import DistributedDataParallel as DDP
- from torch.distributed.checkpoint.state_dict import get_state_dict
-
- fsdp_model = FSDP(copy.deepcopy(model))
- fsdp_optim = torch.optim.Adam(model.parameters(), lr=1e-3)
- ddp_model = DDP(copy.deepcopy(model))
- ddp_optim = torch.optim.Adam(model.parameters(), lr=1e-3)
+ >>> fsdp_model = FSDP(copy.deepcopy(model))
+ >>> fsdp_optim = torch.optim.Adam(model.parameters(), lr=1e-3)
+ >>> ddp_model = DDP(copy.deepcopy(model))
+ >>> ddp_optim = torch.optim.Adam(model.parameters(), lr=1e-3)
- ddp_state_dict, ddp_optim_state_dict = get_state_dict(ddp_model, ddp_optim)
- fsdp_state_dict, fsdp_optim_state_dict = get_state_dict(fsdp_model, fsdp_optim)
+ >>> ddp_state_dict, ddp_optim_state_dict = get_state_dict(ddp_model, ddp_optim)
+ >>> fsdp_state_dict, fsdp_optim_state_dict = get_state_dict(fsdp_model, fsdp_optim)
- # if we simply call ddp_model.state_dict() and fsdp_model.state_dict(),
- # the asserts will fail.
- assert ddp_state_dict == fsdp_state_dict
- assert ddp_optim_state == fsdp_optim_state_dict
+ >>> # if we simply call ddp_model.state_dict() and fsdp_model.state_dict(),
+ >>> # the asserts will fail.
+ >>> assert ddp_state_dict == fsdp_state_dict
+ >>> assert ddp_optim_state == fsdp_optim_state_dict
Args:
@@ -711,6 +711,8 @@ def get_state_dict(
Returns:
``Tuple`` that contain model state_dict and optimizer state_dict.
+
+ :rtype: typing.Tuple[typing.Dict[str, ValueType], OptimizerStateType]
"""
with gc_context():
| [DCP] Some Distributed state_dict related APIs docstring not formatting correctly in pytorch docs page
### 📚 The doc issue
https://pytorch.org/docs/stable/distributed.checkpoint.html
One example:
the example code and the return type of ```get_state_dict``` are not showing up correctly. Need to fix this by 2.2.1.

### Suggest a potential alternative/fix
_No response_
cc @mrshenli @pritamdamania87 @zhaojuanmao @satgera @rohan-varma @gqchen @aazzolini @osalpekar @jiayisuse @H-Huang @kwen2501 @awgu @penguinwu @fegin @XilunWu @wanchaol @fduwjj @tianyu-l @wconstab @yf225 @LucasLLC
| 2024-02-13T18:06:56 |
||
pytorch/pytorch | 121,250 | pytorch__pytorch-121250 | [
"120722"
] | 6c8c5ad5eaf47a62fafbb4a2747198cbffbf1ff0 | diff --git a/torch/distributed/fsdp/_optim_utils.py b/torch/distributed/fsdp/_optim_utils.py
--- a/torch/distributed/fsdp/_optim_utils.py
+++ b/torch/distributed/fsdp/_optim_utils.py
@@ -2083,10 +2083,5 @@ def _set_optim_use_dtensor(
"DeviceMesh is not compatible with LOCAL_STATE_DICT.",
"Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.",
)
- elif state_dict_type == StateDictType.FULL_STATE_DICT:
- logger.warning(
- "Found both state_dict_type FULL_STATE_DICT and device_mesh. " # noqa: G004
- "Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict."
- )
else:
state_dict_settings.optim_state_dict_config._use_dtensor = True
diff --git a/torch/distributed/fsdp/_state_dict_utils.py b/torch/distributed/fsdp/_state_dict_utils.py
--- a/torch/distributed/fsdp/_state_dict_utils.py
+++ b/torch/distributed/fsdp/_state_dict_utils.py
@@ -292,11 +292,6 @@ def _full_pre_state_dict_hook(
"""
if getattr(fsdp_state, "_device_mesh", False):
parent_mesh = _mesh_resources.get_parent_mesh(fsdp_state._device_mesh)
- if parent_mesh:
- raise RuntimeError(
- f"Found FSDP's device_mesh {fsdp_state._device_mesh} has a parent device_mesh {parent_mesh}.",
- "We do not support FULL_STATE_DICT for 2D FSDP + TP. Please use FSDP SHARDED_STATE_DICT instead.",
- )
_common_pre_state_dict_hook(module, fsdp_state)
_common_unshard_pre_state_dict_hook(
@@ -804,11 +799,6 @@ def _set_use_dtensor(fsdp_state: _FSDPState) -> None:
"DeviceMesh is not compatible with LOCAL_STATE_DICT.",
"Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.",
)
- elif state_dict_type == StateDictType.FULL_STATE_DICT:
- logger.warning(
- "Found both state_dict_type FULL_STATE_DICT and device_mesh. " # noqa: G004
- "Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict."
- )
else:
fsdp_state._state_dict_config._use_dtensor = True
| diff --git a/test/distributed/fsdp/test_fsdp_dtensor_state_dict.py b/test/distributed/fsdp/test_fsdp_dtensor_state_dict.py
--- a/test/distributed/fsdp/test_fsdp_dtensor_state_dict.py
+++ b/test/distributed/fsdp/test_fsdp_dtensor_state_dict.py
@@ -313,30 +313,6 @@ def test_raises_warning_or_errors(self):
with FSDP.state_dict_type(model, StateDictType.LOCAL_STATE_DICT):
optim_state_dict = FSDP.optim_state_dict(model, optim)
- with self.assertLogs(
- "torch.distributed.fsdp._state_dict_utils", level="WARNING"
- ) as log:
- with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT):
- state_dict = model.state_dict()
- self.assertEqual(len(log.records), 1)
- self.assertEqual(len(log.output), 1)
- self.assertIn(
- "Found both state_dict_type FULL_STATE_DICT and device_mesh.",
- log.output[0],
- )
-
- with self.assertLogs(
- "torch.distributed.fsdp._optim_utils", level="WARNING"
- ) as log:
- with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT):
- state_dict = FSDP.optim_state_dict(model, optim)
- self.assertEqual(len(log.records), 1)
- self.assertEqual(len(log.output), 1)
- self.assertIn(
- "Found both state_dict_type FULL_STATE_DICT and device_mesh.",
- log.output[0],
- )
-
instantiate_parametrized_tests(TestFSDPWithDeviceMeshAndDTensor)
if __name__ == "__main__":
| Allow Full State Dict with 2D FSDP + TP
### 🐛 Describe the bug
Torch does not allow 2D FSDP + TP to get FULL_STATE_DICT. However, if I remove checks here:
https://github.com/pytorch/pytorch/blob/3f62b05d31d4b29d60874b05adc0e5aedbad3722/torch/distributed/fsdp/_state_dict_utils.py#L303-L310
and warnings here:
https://github.com/pytorch/pytorch/blob/3f62b05d31d4b29d60874b05adc0e5aedbad3722/torch/distributed/fsdp/_state_dict_utils.py#L811-L815
it seems to work fine.
Is there a reason we need these checks? Can we remove them given things seem to work fine?
### Versions
Nightly.
cc @mrshenli @pritamdamania87 @zhaojuanmao @satgera @rohan-varma @gqchen @aazzolini @osalpekar @jiayisuse @H-Huang @kwen2501 @awgu @penguinwu @fegin @XilunWu @wanchaol @fduwjj @wz337 @tianyu-l @wconstab @yf225
| @pytorchbot label "oncall: distributed"
Hey @mvpatel2000. Thanks for raising the issue. Initially we wanted to disable it because the full_state_dict is only full state dict on data parallel dimension. So to avoid the discrepancy in naming and semantics, we disable it to let users just use SHARDED_STATE_DICT in 2D case.
Just to make sure that we understand your use case, so you do want to save the full FSDP state_dict (but still sharded on TP dimension), right?
cc. @fegin
@wz337 hm... in our case it seems to gather everything (including TP dimension)... Why would it not do that?
> @wz337 hm... in our case it seems to gather everything (including TP dimension)... Why would it not do that?
Hmm. Interesting... We didn't enable the full_state_dict path and nor did we test it before. iirc, the full_state_dict path currently is not aware of 2D and DTensorExtension.
Hmm. You are saying without making any change, simply removing the assert gives you the full_state_dict as you expect?
I think dcp's get_state_dict need to be able to get a full_state_dict if user want to? the full_state_dict option on the code pointer is from FSDP, so it might be FSDP only without aware of TP?
> Hmm. You are saying without making any change, simply removing the assert gives you the full_state_dict as you expect?
Yep!
We have some model which is FSDP + TPed. We then want to gather the model in order to package it up for easy sharing, e.g. into a Huggingface checkpoint. You can see how we do this here:
https://github.com/mosaicml/llm-foundry/blob/main/llmfoundry/callbacks/hf_checkpointer.py#L230
Currently, we do:
```
use_full_state_dict = not state.is_model_ddp and isinstance(state_dict_model, FSDP)
state_dict_context = fsdp_state_dict_type_context(
original_model, state_dict_type='full') if use_full_state_dict else contextlib.nullcontext()
with state_dict_context:
state_dict = state_dict_model.state_dict()
```
With the new API, we can do:
```
from torch.distributed.checkpoint.state_dict import StateDictOptions, get_model_state_dict
state_dict = get_model_state_dict(
state_dict_model,
options=StateDictOptions(
full_state_dict=True,
cpu_offload=True,
),
)
```
and this seems to work fine! We then create a new model which does not have TP applied, load in the model state dict, and export via Huggingface APIs
Oh ok.
> > Hmm. You are saying without making any change, simply removing the assert gives you the full_state_dict as you expect?
> > Yep!
>
> We have some model which is FSDP + TPed. We then want to gather the model in order to package it up for easy sharing, e.g. into a Huggingface checkpoint. You can see how we do this here: https://github.com/mosaicml/llm-foundry/blob/main/llmfoundry/callbacks/hf_checkpointer.py#L230
>
> Currently, we do:
>
> ```
> use_full_state_dict = not state.is_model_ddp and isinstance(state_dict_model, FSDP)
> state_dict_context = fsdp_state_dict_type_context(
> original_model, state_dict_type='full') if use_full_state_dict else contextlib.nullcontext()
>
> with state_dict_context:
> state_dict = state_dict_model.state_dict()
> ```
>
> With the new API, we can do:
>
> ```
> from torch.distributed.checkpoint.state_dict import StateDictOptions, get_model_state_dict
>
> state_dict = get_model_state_dict(
> state_dict_model,
> options=StateDictOptions(
> full_state_dict=True,
> cpu_offload=True,
> ),
> )
> ```
>
> and this seems to work fine! We then create a new model which does not have TP applied, load in the model state dict, and export via Huggingface APIs
Oh. Ok. Thanks for the code pointer. So to clarify, there are two layers here, FSDP state dict and DCP's distributed state dict (I am referring to `get_model_state_dict`).
When we call the generic `get_model_state_dict` with full_state_dict, it calls into `_gather_state_dict`, which will all_gather the full tensor for you regardless of whether it is 1D or 2D. This is the reason that you are seeing the correct result. https://github.com/pytorch/pytorch/blob/e2ee87d48bd2a70a18920c800da9fd0522be619d/torch/distributed/_state_dict_utils.py#L171
If you use the context manager full_state_dict for FSDP and remove the `RuntimeError`, it would give you 1D DTensor state_dict. So in order to avoid confusion, we disable it in FSDP's full_state_dict originally.
As long as users are using `get_model_state_dict` to do state_dict retrieval, I think I am fine with removing the warning and RuntimeError. @fegin Wdyt?
Ah got it, thanks for the clarification!
It'd be great if we could remove the error/warning then as suggested. It makes our life a LOT easier to easily export models.
> Ah got it, thanks for the clarification!
>
> It'd be great if we could remove the error/warning then as suggested. It makes our life a LOT easier to easily export models.
Kk. Thanks for reporting the issue. I don't see any issue of removing the RuntimeError and Warning. Just created the PR to let CI run and will get the team to review it and provide feedback if any.
@wz337 there is one more issue though. [here](https://github.com/pytorch/pytorch/blob/4cf6d1172b25a0a7b773b40cd2401b9a6dad0fc1/torch/distributed/checkpoint/state_dict.py#L399-L401) it only offloads to CPU **AFTER** getting **ALL** the state_dict. It easily gets OOM if there are huge DTensor in the state_dict. Can we run the following code each time we get the DTensor in state_dict ? I reported this issue [here](https://github.com/pytorch/pytorch/issues/121062).
```
return _gather_state_dict(
state_dict, cpu_offload=info.cpu_offload, ranks_only=ranks_only
)
```
We had to add the following hook to avoid OOM before calling `get_model_state_dict `
```
cpu_offload = True
def dtensor_to_tensor_hook(
module: nn.Module,
state_dict: Dict[str, Any],
prefix: str,
*args: Any,
) -> Dict[str, Any]:
dtensor_fqns = []
for fqn in state_dict.keys():
tensor = state_dict[fqn]
if isinstance(tensor, DTensor):
dtensor_fqns.append(fqn)
tensor = tensor.full_tensor()
if dist.get_global_rank() == 0:
if cpu_offload:
tensor = tensor.cpu()
state_dict[fqn] = tensor
if dist.get_global_rank() != 0:
for fqn in dtensor_fqns:
del state_dict[fqn]
return state_dict
hooks = []
for name, module in state_dict_model.named_modules():
if isinstance(module, FSDP):
hooks.append(
module._register_state_dict_hook(
dtensor_to_tensor_hook))
``` | 2024-03-05T18:30:08 |
pytorch/pytorch | 123,073 | pytorch__pytorch-123073 | [
"118849"
] | 8602990e3ff349e94820b2b675e0415638f450db | diff --git a/torch/distributed/device_mesh.py b/torch/distributed/device_mesh.py
--- a/torch/distributed/device_mesh.py
+++ b/torch/distributed/device_mesh.py
@@ -60,6 +60,7 @@ class _MeshEnv:
def __init__(self) -> None:
self.mesh_stack: List[DeviceMesh] = []
self.child_to_parent_mapping: Dict[DeviceMesh, DeviceMesh] = {}
+ self.parent_to_child_mapping: Dict[DeviceMesh, Dict[str, DeviceMesh]] = {}
def get_current_mesh(self) -> "DeviceMesh":
if len(self.mesh_stack) == 0:
@@ -69,6 +70,13 @@ def get_current_mesh(self) -> "DeviceMesh":
def create_child_mesh(
self, device_mesh: "DeviceMesh", mesh_dim: int, mesh_dim_name: str
) -> "DeviceMesh":
+ # Directly return the child mesh if it is already created.
+ child_mesh_mappings = self.parent_to_child_mapping.get(device_mesh)
+ if child_mesh_mappings:
+ sub_mesh = child_mesh_mappings.get(mesh_dim_name)
+ if sub_mesh:
+ return sub_mesh
+
# swap the current dim to the last dim then reshape to flatten out other
# dims, so we can just extract the list of ranks which contains cur_rank.
cur_rank = device_mesh.get_rank()
@@ -88,6 +96,9 @@ def create_child_mesh(
res_sub_mesh._dim_group_infos = [device_mesh._dim_group_infos[mesh_dim]] # type: ignore[possibly-undefined]
# Assign the current DeviceMesh as the parent of the child DeviceMesh.
self.child_to_parent_mapping[res_sub_mesh] = device_mesh
+ self.parent_to_child_mapping.setdefault(device_mesh, {})[
+ mesh_dim_name
+ ] = res_sub_mesh
return res_sub_mesh
def get_parent_mesh(self, device_mesh: "DeviceMesh") -> Optional["DeviceMesh"]:
@@ -378,7 +389,6 @@ def __getitem__(self, mesh_dim_name: str) -> "DeviceMesh":
mesh_dim = _mesh_resources.get_mesh_dim_by_name(self, mesh_dim_name)
submesh = _mesh_resources.create_child_mesh(self, mesh_dim, mesh_dim_name)
-
return submesh
def get_group(
| diff --git a/test/distributed/test_device_mesh.py b/test/distributed/test_device_mesh.py
--- a/test/distributed/test_device_mesh.py
+++ b/test/distributed/test_device_mesh.py
@@ -14,6 +14,7 @@
from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh
from torch.distributed.distributed_c10d import (
+ _world,
get_global_rank,
get_world_size,
init_process_group,
@@ -320,6 +321,24 @@ def test_get_item_1d(self):
with self.assertRaisesRegex(RuntimeError, "Invalid mesh_dim_name"):
dp_mesh = mesh["dim0"]
+ @with_comms
+ @run_with_both_funcol_impls
+ def test_cache_and_reuse_submesh_slice_result(self):
+ mesh = init_device_mesh(self.device_type, (2, 4), mesh_dim_names=("dp", "tp"))
+
+ dp_mesh = mesh["dp"]
+ ref_pg_count = _world.group_count
+
+ # When we call the "dp" slice second time, it should not create any new pg.
+ # As we are just using the cached result so the pg count should be the same.
+ dp_mesh_2 = mesh["dp"]
+ self.assertEqual(ref_pg_count, _world.group_count)
+
+ # When we call the "tp" slice, it should create a new pg, as the "tp" slice is called
+ # for the first time.
+ tp_mesh = mesh["tp"]
+ self.assertTrue(_world.group_count > ref_pg_count)
+
@instantiate_parametrized_tests
class TestMeshEnv(DTensorTestBase):
@@ -482,9 +501,11 @@ def test_all_gather_uneven(self):
torch.chunk(big_tensor, device_mesh.size(), dim=shard_dim)
)
unpadded_list = [
- shard_placement._unpad_tensor(big_tensor_chunks[i], pad_sizes[i])
- if pad_sizes[i] > 0
- else big_tensor_chunks[i]
+ (
+ shard_placement._unpad_tensor(big_tensor_chunks[i], pad_sizes[i])
+ if pad_sizes[i] > 0
+ else big_tensor_chunks[i]
+ )
for i, big_tensor in enumerate(big_tensor_chunks)
]
all_gathered_tensor = torch.cat(unpadded_list, dim=shard_dim)
| [DeviceMesh] we should cache and reuse submesh slicing results
### 🚀 The feature, motivation and pitch
as titled, things like `device_mesh["dp"]` should be cached and reused, instead of creating new one again everytime we call it.
### Alternatives
_No response_
### Additional context
_No response_
cc @mrshenli @pritamdamania87 @zhaojuanmao @satgera @rohan-varma @gqchen @aazzolini @osalpekar @jiayisuse @H-Huang @kwen2501 @awgu @penguinwu @fegin @XilunWu @fduwjj @wz337 @tianyu-l @wconstab @yf225
| cc @wz337 | 2024-04-01T05:44:15 |
pytorch/pytorch | 126,559 | pytorch__pytorch-126559 | [
"124546"
] | ee68b41571287aaecf4216f752fb592496fea49e | diff --git a/torch/distributed/checkpoint/state_dict.py b/torch/distributed/checkpoint/state_dict.py
--- a/torch/distributed/checkpoint/state_dict.py
+++ b/torch/distributed/checkpoint/state_dict.py
@@ -152,8 +152,11 @@ def _get_fqns(
Returns:
The canonical FQNs based on the model traversal.
"""
+
+ # Remove the checkpoint prefix, if it exists.
+ name = name.replace(_CHECKPOINT_PREFIX, "")
if "." not in name:
- return {name.replace(_CHECKPOINT_PREFIX, "")}
+ return {name}
obj_names = name.split(".")
fqn_obj_names = []
@@ -170,8 +173,6 @@ def _get_fqns(
flat_param = getattr(curr_obj, FLAT_PARAM)
if prefix:
prefix = f"{prefix}."
- # FSDP already handles removal of checkpoint prefix, so we can return
- # directly
return {f"{prefix}{fqn}" for fqn in flat_param._fqns}
curr_obj = getattr(curr_obj, FSDP_WRAPPED_MODULE)
if curr_obj_name != FSDP_WRAPPED_MODULE:
| Not loading optimizer state separately from checkpoint causes errors with FQNs
### 🐛 Describe the bug
With distributed checkpointing + FSDP, and with `use_orig_params = False` and activation checkpointing, when loading the optimizer state through the `torch.distributed.checkpoint.load()` function, resuming from checkpoint throws the following error:
```
│ /usr/lib/python3/dist-packages/torch/distributed/checkpoint/state_dict.py:83 │
│ 4 in set_optimizer_state_dict │
│ │
│ 831 │ │ info = _verify_options(model, optimizers, optim_only=True, op │
│ 832 │ │ │
│ 833 │ │ _verify_state_dict({}, optim_state_dict, info) │
│ ❱ 834 │ │ _load_optim_state_dict(model, optimizers, optim_state_dict, i │
│ 835 │
│ 836 │
│ 837 def set_state_dict( │
│ │
│ /usr/lib/python3/dist-packages/torch/distributed/checkpoint/state_dict.py:55 │
│ 6 in _load_optim_state_dict │
│ │
│ 553 │ │ return │
│ 554 │ │
│ 555 │ for optim in optimizers: │
│ ❱ 556 │ │ optim_state_dict = _split_optim_state_dict(model, optim, stat │
│ 557 │ │ if info.fsdp_modules: │
│ 558 │ │ │ with info.fsdp_context(): │
│ 559 │ │ │ │ optim_state_dict = FSDP.optim_state_dict_to_load( │
│ │
│ /usr/lib/python3/dist-packages/torch/distributed/checkpoint/state_dict.py:52 │
│ 6 in _split_optim_state_dict │
│ │
│ 523 │ │ │ │ assert isinstance(params, list) │
│ 524 │ │ │ │ params.append(fqn) │
│ 525 │ │ │ │ if param.requires_grad: │
│ ❱ 526 │ │ │ │ │ state[fqn] = cast(DictValueType, optim_state_dict │
│ 527 │ │ │ │ for loaded_param_group in cast(ListDictValueType, opt │
│ 528 │ │ │ │ │ params = loaded_param_group[PARAMS] │
│ 529 │ │ │ │ │ assert isinstance(params, list) │
╰──────────────────────────────────────────────────────────────────────────────╯
KeyError: 'model.transformer.blocks.0._flat_param'
```
Deeper investigation reveals that for some reason, the FQNs of the parameters with activation checkpointing turned on do not correspond with those in the state itself. Turning activation checkpointing on makes the FQNs for FSDP-wrapped modules the `flat_params`, while without activation checkpointing, the FQNs correspond to the original parameter names.
I'm not sure what the right behavior with FQNs here is, given that `use_orig_params = False`, but **calling `load_sharded_optimizer_state_dict` to load the optimizer state separately addresses the issue**. Additionally, **turning activation checkpointing off also addresses the issue**.
In the [docs](https://pytorch.org/tutorials/recipes/distributed_checkpoint_recipe.html#loading) for distributed checkpointing there's an example where the optimizer is not loaded separately, but in [this comment](https://github.com/pytorch/pytorch/blob/f9fce110af428a600e597291a0ab80e43dc39e93/torch/distributed/checkpoint/optimizer.py#L215) in the repo, the recommendation with FSDP is to load the optimizer state separately.
Could this discrepancy be addressed, and if it is a bug with torch, could it be addressed? I'm very confused what role activation checkpointing is playing here as well. Thanks!
### Versions
```
PyTorch version: 2.3.0.dev20231215+cu121
Is debug build: False
CUDA used to build PyTorch: 12.1
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.6 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0
Clang version: Could not collect
CMake version: version 3.26.3
Libc version: glibc-2.31
Python version: 3.10.13 (main, Aug 25 2023, 13:20:03) [GCC 9.4.0] (64-bit runtime)
Python platform: Linux-5.19.17-coreweave-x86_64-with-glibc2.31
Is CUDA available: True
CUDA runtime version: 12.1.105
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration:
GPU 0: NVIDIA H100 80GB HBM3
GPU 1: NVIDIA H100 80GB HBM3
GPU 2: NVIDIA H100 80GB HBM3
GPU 3: NVIDIA H100 80GB HBM3
GPU 4: NVIDIA H100 80GB HBM3
GPU 5: NVIDIA H100 80GB HBM3
GPU 6: NVIDIA H100 80GB HBM3
GPU 7: NVIDIA H100 80GB HBM3
Nvidia driver version: 535.154.05
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.0
/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.0
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.0
/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.0
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
Address sizes: 46 bits physical, 57 bits virtual
CPU(s): 128
On-line CPU(s) list: 0-127
Thread(s) per core: 2
Core(s) per socket: 32
Socket(s): 2
NUMA node(s): 2
Vendor ID: GenuineIntel
CPU family: 6
Model: 143
Model name: Intel(R) Xeon(R) Platinum 8462Y+
Stepping: 8
Frequency boost: enabled
CPU MHz: 2801.000
CPU max MHz: 2801.0000
CPU min MHz: 800.0000
BogoMIPS: 5600.00
Virtualization: VT-x
L1d cache: 3 MiB
L1i cache: 2 MiB
L2 cache: 128 MiB
L3 cache: 120 MiB
NUMA node0 CPU(s): 0-31,64-95
NUMA node1 CPU(s): 32-63,96-127
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Retbleed: Not affected
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb cat_l3 cat_l2 cdp_l3 invpcid_single cdp_l2 ssbd mba ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm rdt_a avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb intel_pt avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local split_lock_detect avx_vnni avx512_bf16 wbnoinvd dtherm ida arat pln pts hfi avx512vbmi umip pku ospke waitpkg avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg tme avx512_vpopcntdq la57 rdpid bus_lock_detect cldemote movdiri movdir64b enqcmd fsrm md_clear serialize tsxldtrk pconfig arch_lbr ibt amx_bf16 avx512_fp16 amx_tile amx_int8 flush_l1d arch_capabilities
Versions of relevant libraries:
[pip3] numpy==1.24.1
[pip3] onnx==1.14.0
[pip3] onnxruntime==1.15.1
[pip3] pytorch-ranger==0.1.1
[pip3] pytorch-triton==2.2.0+e28a256d71
[pip3] torch==2.3.0.dev20231215+cu121
[pip3] torch-optimizer==0.3.0
[pip3] torchmetrics==1.0.3
[pip3] torchvision==0.18.0.dev20231215+cu121
[pip3] triton==2.2.0
[pip3] triton_pre_mlir==2.0.0
[conda] Could not collect
```
cc @mrshenli @pritamdamania87 @zhaojuanmao @satgera @rohan-varma @gqchen @aazzolini @osalpekar @jiayisuse @H-Huang @kwen2501 @awgu @penguinwu @fegin @XilunWu @wanchaol @fduwjj @wz337 @tianyu-l @wconstab @yf225 @chauhang @d4l3k
| @pytorchbot label "oncall: distributed"
Maybe @wz337 or @fegin?
This is still an issue in the latest torch nightly, just tested it.
Just submitted [this PR](https://github.com/pytorch/pytorch/pull/124698) that should resolve the issue. More info in the PR description. | 2024-05-17T18:31:35 |
|
pytorch/pytorch | 127,219 | pytorch__pytorch-127219 | [
"122792"
] | 81b88543f0077e183e3394b31126a3834f6a712c | diff --git a/torch/distributed/checkpoint/state_dict.py b/torch/distributed/checkpoint/state_dict.py
--- a/torch/distributed/checkpoint/state_dict.py
+++ b/torch/distributed/checkpoint/state_dict.py
@@ -215,6 +215,8 @@ def recurse(module: nn.Module, curr_fqn: str) -> Generator:
for name, obj in chain(
module.named_buffers(recurse=False), module.named_parameters(recurse=False)
):
+ if name in module._non_persistent_buffers_set:
+ continue
new_fqn = f"{curr_fqn}{name}"
yield new_fqn, obj
| diff --git a/test/distributed/checkpoint/test_state_dict.py b/test/distributed/checkpoint/test_state_dict.py
--- a/test/distributed/checkpoint/test_state_dict.py
+++ b/test/distributed/checkpoint/test_state_dict.py
@@ -556,6 +556,17 @@ def _test_activation_ckpt_fqns_fsdp1(self, use_orig_params: bool) -> None:
self.assertEqual(original_keys, new_keys)
+ @with_comms
+ @skip_if_lt_x_gpu(1)
+ def test_non_persistent_buffers(self) -> None:
+ model = CompositeParamModel(device=torch.device("cuda"))
+ model.register_buffer(
+ "dont_save_me", torch.rand(100, device="cuda"), persistent=False
+ )
+ ddp_model = DDP(copy.deepcopy(model))
+ set_model_state_dict(ddp_model, get_model_state_dict(ddp_model))
+ self.assertEqual(model.state_dict(), get_model_state_dict(ddp_model))
+
@with_comms
@skip_if_lt_x_gpu(2)
def test_fsdp_root_not_initialized(self) -> None:
| [DCP] `set_model_state_dict` errors on compiled module with non-persistent buffer
```
"""
torchrun --standalone --nproc_per_node=2 repro_dcp_compile.py
"""
import os
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.distributed.checkpoint.state_dict import get_model_state_dict, set_model_state_dict
class Model(nn.Module):
def __init__(self):
super().__init__()
self.lin1 = nn.Linear(4, 4)
self.lin2 = nn.Linear(4, 4)
self.register_buffer("buf", torch.randn((4,)), persistent=False)
self.weight = nn.Parameter(torch.randn((4, 4)))
if __name__ == "__main__":
dist.init_process_group(backend="nccl")
gpu_id = int(os.environ["LOCAL_RANK"])
device = f"cuda:{gpu_id}"
torch.cuda.set_device(device)
model = Model()
model = torch.compile(model)
sharded_sd = get_model_state_dict(model)
set_model_state_dict(model, sharded_sd)
```
```
[rank0]: Traceback (most recent call last):
[rank0]: File "/data/users/andgu/pytorch/repro_dcp_compile.py", line 36, in <module>
[rank0]: set_model_state_dict(model, sharded_sd)
[rank0]: File "/data/users/andgu/pytorch/torch/distributed/checkpoint/state_dict.py", line 853, in set_model_state_dict
[rank0]: return _load_model_state_dict(model, model_state_dict, info)
[rank0]: File "/data/users/andgu/pytorch/torch/distributed/checkpoint/state_dict.py", line 416, in _load_model_state_dict
[rank0]: state_dict[fqn_with_prefix] = state_dict.pop(fqn)
[rank0]: KeyError: 'buf'
```
`set_model_state_dict` calls into `_load_model_state_dict`, which iterates over `named_buffers()`. For a compiled module, `fqns` and `fqns_with_prefix` always mismatch, so `_load_model_state_dict` will try to reassign from the FQN without prefix to the one with prefix. However, this does not account for non-persistent buffers not existing in the state dict.
One solution could be just to continue `if fqn not in state_dict`.
cc @LucasLLC
| 2024-05-27T09:01:45 |
|
encode/django-rest-framework | 326 | encode__django-rest-framework-326 | [
"316"
] | cef379db065711bd2f1b0805d28a56f7a80cef37 | diff --git a/rest_framework/runtests/runtests.py b/rest_framework/runtests/runtests.py
--- a/rest_framework/runtests/runtests.py
+++ b/rest_framework/runtests/runtests.py
@@ -32,7 +32,7 @@ def main():
else:
print usage()
sys.exit(1)
- failures = test_runner.run_tests(['rest_framework' + test_case])
+ failures = test_runner.run_tests(['tests' + test_case])
sys.exit(failures)
diff --git a/rest_framework/runtests/settings.py b/rest_framework/runtests/settings.py
--- a/rest_framework/runtests/settings.py
+++ b/rest_framework/runtests/settings.py
@@ -91,6 +91,7 @@
# 'django.contrib.admindocs',
'rest_framework',
'rest_framework.authtoken',
+ 'rest_framework.tests'
)
STATIC_URL = '/static/'
@@ -100,14 +101,6 @@
if django.VERSION < (1, 3):
INSTALLED_APPS += ('staticfiles',)
-# OAuth support is optional, so we only test oauth if it's installed.
-try:
- import oauth_provider
-except ImportError:
- pass
-else:
- INSTALLED_APPS += ('oauth_provider',)
-
# If we're running on the Jenkins server we want to archive the coverage reports as XML.
import os
if os.environ.get('HUDSON_URL', None):
| diff --git a/rest_framework/tests/__init__.py b/rest_framework/tests/__init__.py
--- a/rest_framework/tests/__init__.py
+++ b/rest_framework/tests/__init__.py
@@ -1,13 +0,0 @@
-"""
-Force import of all modules in this package in order to get the standard test
-runner to pick up the tests. Yowzers.
-"""
-import os
-
-modules = [filename.rsplit('.', 1)[0]
- for filename in os.listdir(os.path.dirname(__file__))
- if filename.endswith('.py') and not filename.startswith('_')]
-__test__ = dict()
-
-for module in modules:
- exec("from rest_framework.tests.%s import *" % module)
diff --git a/rest_framework/tests/models.py b/rest_framework/tests/models.py
--- a/rest_framework/tests/models.py
+++ b/rest_framework/tests/models.py
@@ -40,7 +40,7 @@ class RESTFrameworkModel(models.Model):
Base for test models that sets app_label, so they play nicely.
"""
class Meta:
- app_label = 'rest_framework'
+ app_label = 'tests'
abstract = True
diff --git a/rest_framework/tests/tests.py b/rest_framework/tests/tests.py
new file mode 100644
--- /dev/null
+++ b/rest_framework/tests/tests.py
@@ -0,0 +1,13 @@
+"""
+Force import of all modules in this package in order to get the standard test
+runner to pick up the tests. Yowzers.
+"""
+import os
+
+modules = [filename.rsplit('.', 1)[0]
+ for filename in os.listdir(os.path.dirname(__file__))
+ if filename.endswith('.py') and not filename.startswith('_')]
+__test__ = dict()
+
+for module in modules:
+ exec("from rest_framework.tests.%s import *" % module)
| Tests should not rely on error messages being in English
djangorestframework.tests.validators.TestFormValidation.test_modelform_validation_failed_due_to_field_error_returns_appropriate_message
djangorestframework.tests.validators.TestFormValidation.test_modelform_validation_failed_due_to_multiple_errors_returns_appropriate_message
djangorestframework.tests.validators.TestFormValidation.test_modelform_validation_failed_due_to_no_content_returns_appropriate_message
djangorestframework.tests.validators.TestFormValidation.test_validation_failed_due_to_field_error_returns_appropriate_message
djangorestframework.tests.validators.TestFormValidation.test_validation_failed_due_to_multiple_errors_returns_appropriate_message
djangorestframework.tests.validators.TestFormValidation.test_validation_failed_due_to_no_content_returns_appropriate_message
Traceback (most recent call last):
File "/var/lib/jenkins/jobs/S3G/workspace/venv/lib/python2.6/site-packages/djangorestframework/tests/validators.py", line 263, in test_modelform_validation_failed_due_to_field_error_returns_appropriate_message
self.validation_failed_due_to_field_error_returns_appropriate_message(validator)
File "/var/lib/jenkins/jobs/S3G/workspace/venv/lib/python2.6/site-packages/djangorestframework/tests/validators.py", line 168, in validation_failed_due_to_field_error_returns_appropriate_message
self.assertEqual(exc.response.raw_content, {'field_errors': {'qwerty': ['This field is required.']}})
AssertionError: {u'field_errors': {'qwerty': [u'Ce champ est obligatoire.']}} != {'field_errors': {'qwerty': ['This field is required.']}}
- {u'field_errors': {'qwerty': [u'Ce champ est obligatoire.']}}
- {'field_errors': {'qwerty': ['This field is required.']}}
Please pardon my French ;)
| I think the deal here is that all test cases inside REST framework ought to inherit from a single base test case which ensures that any test settings are setup appropriately.
Any pull requests towards this aim would be much appreciated.
Same root cause as #185.
In the meantime I guess that the fix would be to make sure your test settings are appropriate for testing REST framework.
Note that even eg. some Django contrib apps include similar tests-not-fully-sandboxed issues, so this is a bit of a awkward area.
| 2012-10-27T17:45:02 |
encode/django-rest-framework | 366 | encode__django-rest-framework-366 | [
"346"
] | 3e0319389a4e7714e0658bd38cb1e9fb01cf662d | diff --git a/rest_framework/response.py b/rest_framework/response.py
--- a/rest_framework/response.py
+++ b/rest_framework/response.py
@@ -45,3 +45,13 @@ def status_text(self):
# TODO: Deprecate and use a template tag instead
# TODO: Status code text for RFC 6585 status codes
return STATUS_CODE_TEXT.get(self.status_code, '')
+
+ def __getstate__(self):
+ """
+ Remove attributes from the response that shouldn't be cached
+ """
+ state = super(Response, self).__getstate__()
+ for key in ('accepted_renderer', 'renderer_context', 'data'):
+ if key in state:
+ del state[key]
+ return state
diff --git a/rest_framework/runtests/settings.py b/rest_framework/runtests/settings.py
--- a/rest_framework/runtests/settings.py
+++ b/rest_framework/runtests/settings.py
@@ -21,6 +21,12 @@
}
}
+CACHES = {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
+ }
+}
+
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
| diff --git a/rest_framework/tests/renderers.py b/rest_framework/tests/renderers.py
--- a/rest_framework/tests/renderers.py
+++ b/rest_framework/tests/renderers.py
@@ -1,6 +1,8 @@
+import pickle
import re
from django.conf.urls.defaults import patterns, url, include
+from django.core.cache import cache
from django.test import TestCase
from django.test.client import RequestFactory
@@ -83,6 +85,7 @@ def get(self, request, **kwargs):
urlpatterns = patterns('',
url(r'^.*\.(?P<format>.+)$', MockView.as_view(renderer_classes=[RendererA, RendererB])),
url(r'^$', MockView.as_view(renderer_classes=[RendererA, RendererB])),
+ url(r'^cache$', MockGETView.as_view()),
url(r'^jsonp/jsonrenderer$', MockGETView.as_view(renderer_classes=[JSONRenderer, JSONPRenderer])),
url(r'^jsonp/nojsonrenderer$', MockGETView.as_view(renderer_classes=[JSONPRenderer])),
url(r'^html$', HTMLView.as_view()),
@@ -416,3 +419,89 @@ def assertXMLContains(self, xml, string):
self.assertTrue(xml.startswith('<?xml version="1.0" encoding="utf-8"?>\n<root>'))
self.assertTrue(xml.endswith('</root>'))
self.assertTrue(string in xml, '%r not in %r' % (string, xml))
+
+
+# Tests for caching issue, #346
+class CacheRenderTest(TestCase):
+ """
+ Tests specific to caching responses
+ """
+
+ urls = 'rest_framework.tests.renderers'
+
+ cache_key = 'just_a_cache_key'
+
+ @classmethod
+ def _get_pickling_errors(cls, obj, seen=None):
+ """ Return any errors that would be raised if `obj' is pickled
+ Courtesy of koffie @ http://stackoverflow.com/a/7218986/109897
+ """
+ if seen == None:
+ seen = []
+ try:
+ state = obj.__getstate__()
+ except AttributeError:
+ return
+ if state == None:
+ return
+ if isinstance(state,tuple):
+ if not isinstance(state[0],dict):
+ state=state[1]
+ else:
+ state=state[0].update(state[1])
+ result = {}
+ for i in state:
+ try:
+ pickle.dumps(state[i],protocol=2)
+ except pickle.PicklingError:
+ if not state[i] in seen:
+ seen.append(state[i])
+ result[i] = cls._get_pickling_errors(state[i],seen)
+ return result
+
+ def http_resp(self, http_method, url):
+ """
+ Simple wrapper for Client http requests
+ Removes the `client' and `request' attributes from as they are
+ added by django.test.client.Client and not part of caching
+ responses outside of tests.
+ """
+ method = getattr(self.client, http_method)
+ resp = method(url)
+ del resp.client, resp.request
+ return resp
+
+ def test_obj_pickling(self):
+ """
+ Test that responses are properly pickled
+ """
+ resp = self.http_resp('get', '/cache')
+
+ # Make sure that no pickling errors occurred
+ self.assertEqual(self._get_pickling_errors(resp), {})
+
+ # Unfortunately LocMem backend doesn't raise PickleErrors but returns
+ # None instead.
+ cache.set(self.cache_key, resp)
+ self.assertTrue(cache.get(self.cache_key) is not None)
+
+ def test_head_caching(self):
+ """
+ Test caching of HEAD requests
+ """
+ resp = self.http_resp('head', '/cache')
+ cache.set(self.cache_key, resp)
+
+ cached_resp = cache.get(self.cache_key)
+ self.assertIsInstance(cached_resp, Response)
+
+ def test_get_caching(self):
+ """
+ Test caching of GET requests
+ """
+ resp = self.http_resp('get', '/cache')
+ cache.set(self.cache_key, resp)
+
+ cached_resp = cache.get(self.cache_key)
+ self.assertIsInstance(cached_resp, Response)
+ self.assertEqual(cached_resp.content, resp.content)
| Caching in DRF?
Awesome work on v2, I'm really liking what I've seen so far. Big thumbs up!
One problem arose when I was migrating from tastypie to DRF. I use Django's cache framework to cache everything on the site I'm developing (it's very read-heavy), but when I try to access the API I get `TypeError: can't pickle _Input objects`. It works just fine with caching disabled.
It should be noted that I use nginx + uwsgi instead of the internal dev server.
Here's the traceback:
```
Traceback (most recent call last):
File ".../lib/python2.7/site-packages/django/core/handlers/wsgi.py", line 241, in __call__
response = self.get_response(request)
File ".../lib/python2.7/site-packages/django/core/handlers/base.py", line 192, in get_response
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
File ".../lib/python2.7/site-packages/django/core/handlers/base.py", line 221, in handle_uncaught_exception
return debug.technical_500_response(request, *exc_info)
File ".../lib/python2.7/site-packages/django/core/handlers/base.py", line 188, in get_response
response = middleware_method(request, response)
File ".../lib/python2.7/site-packages/django/middleware/cache.py", line 112, in process_response
lambda r: self.cache.set(cache_key, r, timeout)
File ".../lib/python2.7/site-packages/django/template/response.py", line 91, in add_post_render_callback
callback(self)
File ".../lib/python2.7/site-packages/django/middleware/cache.py", line 112, in <lambda>
lambda r: self.cache.set(cache_key, r, timeout)
File ".../lib/python2.7/site-packages/redis_cache/cache.py", line 218, in set
result = self._set(key, pickle.dumps(value), int(timeout), client, _add_only)
File "/usr/local/Cellar/python/2.7.3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/copy_reg.py", line 70, in _reduce_ex
raise TypeError, "can't pickle %s objects" % base.__name__
TypeError: can't pickle _Input objects
```
`_Input` object is of type `uwsgi._Input`
| Wow, that's an interesting one.
So Django's caching layer adds a post_render_callback to the response which is used to cache the response once it's been rendered. So far so good. The problem is that it uses pickle to do so, which is blowing up for Response objects (where it would pass for HttpResponse objects).
I've not looked at the pickle API enough to know exactly how this needs to work, but seems like we need to implement `__getstate__` on Response to do something other than the default behavior.
I reckon we can probably get this sorted, just needs a bit of digging to figure out how to make Response objects properly pickle-able.
First step would be to throw together some failing tests.
I believe the fix would be to ensure that Response.**getstate** calls the superclass, then pops off .accepted_renderer / .renderer_context / .data from the dict before returning, (since those bits of may not be pickable.)
That should work okay, because the restored, unpicked Response will include the required `.content`, it just won't include the intermediate state it used along the way to get.
There's probably a slightly nicer solution that _does_ correctly restore all the state, but I think this should get the job done and cover all sensible use-cases.
Sounds great. I was just about to write some tests for this, but maybe you're already on your way with a fix?
@jmagnusson No, not started on anything. Be mighty pleased if you could tackle it, or provide tests.
Okay, I'll see what I can do. Hopefully I will get some time to do it later this week :-)
| 2012-11-04T19:55:03 |
encode/django-rest-framework | 396 | encode__django-rest-framework-396 | [
"380"
] | da1aa5542cff3295d3a53821d4afe02911094181 | diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -522,7 +522,10 @@ def to_native(self, obj):
view_name = self.view_name
request = self.context.get('request', None)
format = self.format or self.context.get('format', None)
- kwargs = {self.pk_url_kwarg: obj.pk}
+ pk = getattr(obj, 'pk', None)
+ if pk is None:
+ return
+ kwargs = {self.pk_url_kwarg: pk}
try:
return reverse(view_name, kwargs=kwargs, request=request, format=format)
except:
| diff --git a/rest_framework/tests/hyperlinkedserializers.py b/rest_framework/tests/hyperlinkedserializers.py
--- a/rest_framework/tests/hyperlinkedserializers.py
+++ b/rest_framework/tests/hyperlinkedserializers.py
@@ -2,7 +2,7 @@
from django.test import TestCase
from django.test.client import RequestFactory
from rest_framework import generics, status, serializers
-from rest_framework.tests.models import Anchor, BasicModel, ManyToManyModel, BlogPost, BlogPostComment, Album, Photo
+from rest_framework.tests.models import Anchor, BasicModel, ManyToManyModel, BlogPost, BlogPostComment, Album, Photo, OptionalRelationModel
factory = RequestFactory()
@@ -67,6 +67,11 @@ class AlbumDetail(generics.RetrieveAPIView):
model = Album
+class OptionalRelationDetail(generics.RetrieveAPIView):
+ model = OptionalRelationModel
+ model_serializer_class = serializers.HyperlinkedModelSerializer
+
+
urlpatterns = patterns('',
url(r'^basic/$', BasicList.as_view(), name='basicmodel-list'),
url(r'^basic/(?P<pk>\d+)/$', BasicDetail.as_view(), name='basicmodel-detail'),
@@ -76,7 +81,8 @@ class AlbumDetail(generics.RetrieveAPIView):
url(r'^posts/(?P<pk>\d+)/$', BlogPostDetail.as_view(), name='blogpost-detail'),
url(r'^comments/$', BlogPostCommentListCreate.as_view(), name='blogpostcomment-list'),
url(r'^albums/(?P<title>\w[\w-]*)/$', AlbumDetail.as_view(), name='album-detail'),
- url(r'^photos/$', PhotoListCreate.as_view(), name='photo-list')
+ url(r'^photos/$', PhotoListCreate.as_view(), name='photo-list'),
+ url(r'^optionalrelation/(?P<pk>\d+)/$', OptionalRelationDetail.as_view(), name='optionalrelationmodel-detail'),
)
@@ -211,3 +217,26 @@ def test_create_photo(self):
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.post.photo_set.count(), 1)
self.assertEqual(self.post.photo_set.all()[0].description, 'A test photo')
+
+
+class TestOptionalRelationHyperlinkedView(TestCase):
+ urls = 'rest_framework.tests.hyperlinkedserializers'
+
+ def setUp(self):
+ """
+ Create 1 OptionalRelationModel intances.
+ """
+ OptionalRelationModel().save()
+ self.objects = OptionalRelationModel.objects
+ self.detail_view = OptionalRelationDetail.as_view()
+ self.data = {"url": "http://testserver/optionalrelation/1/", "other": None}
+
+ def test_get_detail_view(self):
+ """
+ GET requests to RetrieveAPIView with optional relations should return None
+ for non existing relations.
+ """
+ request = factory.get('/optionalrelationmodel-detail/1')
+ response = self.detail_view(request, pk=1).render()
+ self.assertEquals(response.status_code, status.HTTP_200_OK)
+ self.assertEquals(response.data, self.data)
diff --git a/rest_framework/tests/models.py b/rest_framework/tests/models.py
--- a/rest_framework/tests/models.py
+++ b/rest_framework/tests/models.py
@@ -149,3 +149,8 @@ def info(self):
# Model for issue #324
class BlankFieldModel(RESTFrameworkModel):
title = models.CharField(max_length=100, blank=True)
+
+
+# Model for issue #380
+class OptionalRelationModel(RESTFrameworkModel):
+ other = models.ForeignKey('OptionalRelationModel', blank=True, null=True)
| HyperlinkedRelatedField breaks for Null related field values
If a model's related field allows nulls and has a null value, HyperlinkedRelatedField throws an error.
File "... /python2.7/site-packages/rest_framework/fields.py", line 521, in to_native
kwargs = {self.pk_url_kwarg: obj.pk}
AttributeError: 'NoneType' object has no attribute 'pk'
If appears that the value is null that is sent as "obj". In HyperlinkedRelatedField.to_native, if obj is None, shouldn't it just return None so the returned data is null?
| +1. Yes, that'd be sensible.
I think this may be related with bug #384...
| 2012-11-10T11:27:13 |
encode/django-rest-framework | 422 | encode__django-rest-framework-422 | [
"421"
] | 9973cf329a2133a900256b53236348ef3c870842 | diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -700,6 +700,23 @@ def from_native(self, value):
return smart_unicode(value)
+class URLField(CharField):
+ type_name = 'URLField'
+
+ def __init__(self, **kwargs):
+ kwargs['max_length'] = kwargs.get('max_length', 200)
+ kwargs['validators'] = [validators.URLValidator()]
+ super(URLField, self).__init__(**kwargs)
+
+
+class SlugField(CharField):
+ type_name = 'SlugField'
+
+ def __init__(self, *args, **kwargs):
+ kwargs['max_length'] = kwargs.get('max_length', 50)
+ super(SlugField, self).__init__(*args, **kwargs)
+
+
class ChoiceField(WritableField):
type_name = 'ChoiceField'
widget = widgets.Select
diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -427,6 +427,10 @@ def get_field(self, model_field):
kwargs['choices'] = model_field.flatchoices
return ChoiceField(**kwargs)
+ max_length = getattr(model_field, 'max_length', None)
+ if max_length:
+ kwargs['max_length'] = max_length
+
field_mapping = {
models.FloatField: FloatField,
models.IntegerField: IntegerField,
@@ -437,6 +441,8 @@ def get_field(self, model_field):
models.DateField: DateField,
models.EmailField: EmailField,
models.CharField: CharField,
+ models.URLField: URLField,
+ models.SlugField: SlugField,
models.TextField: CharField,
models.CommaSeparatedIntegerField: CharField,
models.BooleanField: BooleanField,
| diff --git a/rest_framework/tests/serializer.py b/rest_framework/tests/serializer.py
--- a/rest_framework/tests/serializer.py
+++ b/rest_framework/tests/serializer.py
@@ -239,6 +239,14 @@ def test_null_is_true_fields(self):
self.assertEquals(serializer.is_valid(), True)
self.assertEquals(serializer.errors, {})
+ def test_modelserializer_max_length_exceeded(self):
+ data = {
+ 'title': 'x' * 201,
+ }
+ serializer = ActionItemSerializer(data=data)
+ self.assertEquals(serializer.is_valid(), False)
+ self.assertEquals(serializer.errors, {'title': [u'Ensure this value has at most 200 characters (it has 201).']})
+
class MetadataTests(TestCase):
def test_empty(self):
| ModelSerializers should respect the max_length attribue of a Model Field
The current implementation causes a DatabaseError because the passed value is too large.
| 2012-11-16T21:48:43 |
|
encode/django-rest-framework | 438 | encode__django-rest-framework-438 | [
"376"
] | b0bad35ef0972ec26ff808d81b1f43f16683898d | diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -54,6 +54,8 @@ def initialize(self, parent, field_name):
self.parent = parent
self.root = parent.root or parent
self.context = self.root.context
+ if self.root.partial:
+ self.required = False
def field_from_native(self, data, files, field_name, into):
"""
diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -91,12 +91,13 @@ class Meta(object):
_options_class = SerializerOptions
_dict_class = SortedDictWithMetadata # Set to unsorted dict for backwards compatibility with unsorted implementations.
- def __init__(self, instance=None, data=None, files=None, context=None, **kwargs):
+ def __init__(self, instance=None, data=None, files=None, context=None, partial=False, **kwargs):
super(BaseSerializer, self).__init__(**kwargs)
self.opts = self._options_class(self.Meta)
self.fields = copy.deepcopy(self.base_fields)
self.parent = None
self.root = None
+ self.partial = partial
self.context = context or {}
| diff --git a/rest_framework/tests/serializer.py b/rest_framework/tests/serializer.py
--- a/rest_framework/tests/serializer.py
+++ b/rest_framework/tests/serializer.py
@@ -115,6 +115,18 @@ def test_update(self):
self.assertTrue(serializer.object is expected)
self.assertEquals(serializer.data['sub_comment'], 'And Merry Christmas!')
+ def test_partial_update(self):
+ msg = 'Merry New Year!'
+ partial_data = {'content': msg}
+ serializer = CommentSerializer(self.comment, data=partial_data)
+ self.assertEquals(serializer.is_valid(), False)
+ serializer = CommentSerializer(self.comment, data=partial_data, partial=True)
+ expected = self.comment
+ self.assertEqual(serializer.is_valid(), True)
+ self.assertEquals(serializer.object, expected)
+ self.assertTrue(serializer.object is expected)
+ self.assertEquals(serializer.data['content'], msg)
+
def test_model_fields_as_expected(self):
"""
Make sure that the fields returned are the same as defined
| Add `partial=True` argument to not require serializer fields on update
This is a feature request.
Would it be possible to ignore the required flag on fields during deserialization for the purpose of an update? It seems like marking fields as required makes a lot of sense for creating a new model, but not as much sense when simply updating an existing model.
**Edit**: Tweaked title slightly - @tomchristie
| Proper usage of the `PUT` method would require the fields to be included.
What you're suggesting is the expected behavior of the `PATCH` method.
I'm going to close this ticket as a dup of #333.
If you strongly require this behavior for `PUT` I'd still consider this a duplicate - the right thing to do would be to get #333 implemented for `PATCH` support in REST framework and then override your views to instead call `.patch` from your `.put` methods.
Make sense?
Your reasoning and argument makes perfect sense. But I was actually wanting to suggest a change at the serializer level. I was thinking it would be nice to be able to do:
``` python
# data is partial data (i.e. missing some required fields)
serializer = MySerializer(obj, data, partial=True) # doesn't cause validation errors
serializer = MySerializer(obj, data) # still causes validation errors
serializer = MySerializer(data=data) # still causes validation errors
```
Probably not a huge win over the following:
``` python
data = MySerializer(obj).data
data.update(partial_data)
serializer = MySerializer(obj, data)
```
@maspwr - Yup, your `partial=True` flag is _exactly_ the serializer API change I was hoping for in order to support PATCH. Anyway you're right - it makes sense to have that as a separate ticket (and precursor to PATCH implmentation.)
I think it'd be easy enough - the .required flag on serializer fields could simply be ignored if partial is set to `True` on the serializer. I'd happily accept a pull req for that.
OK, great! I'll look into it then.
Great! Lemme know if there's anything I can do to help.
Note that you can get to the top level serializer instance from the child field by accessing `self.root`, so you'll probably be checking something like `self.root.partial`.
| 2012-11-20T19:04:03 |
encode/django-rest-framework | 451 | encode__django-rest-framework-451 | [
"431"
] | 6a5f4f2a90ab19a8586a9d762c9b2618e8db5c30 | diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -118,6 +118,17 @@ def get_default_fields(self):
"""
return {}
+ def get_excluded_fieldnames(self):
+ """
+ Returns the fieldnames that should not be validated.
+ """
+ excluded_fields = list(self.opts.exclude)
+ for field in self.fields.keys() + self.get_default_fields().keys():
+ if self.opts.fields:
+ if field not in self.opts.fields + self.opts.exclude:
+ excluded_fields.append(field)
+ return excluded_fields
+
def get_fields(self):
"""
Returns the complete set of fields for the object as a dict.
@@ -217,10 +228,17 @@ def perform_validation(self, attrs):
except ValidationError as err:
self._errors[field_name] = self._errors.get(field_name, []) + list(err.messages)
- try:
- attrs = self.validate(attrs)
- except ValidationError as err:
- self._errors['non_field_errors'] = err.messages
+ # We don't run .validate() because field-validation failed and thus `attrs` may not be complete.
+ # which in turn can cause inconsistent validation errors.
+ if not self._errors:
+ try:
+ attrs = self.validate(attrs)
+ except ValidationError as err:
+ if hasattr(err, 'message_dict'):
+ for field_name, error_messages in err.message_dict.items():
+ self._errors[field_name] = self._errors.get(field_name, []) + list(error_messages)
+ elif hasattr(err, 'messages'):
+ self._errors['non_field_errors'] = err.messages
return attrs
@@ -432,10 +450,6 @@ def get_field(self, model_field):
kwargs['choices'] = model_field.flatchoices
return ChoiceField(**kwargs)
- max_length = getattr(model_field, 'max_length', None)
- if max_length:
- kwargs['max_length'] = max_length
-
field_mapping = {
models.FloatField: FloatField,
models.IntegerField: IntegerField,
@@ -459,6 +473,16 @@ def get_field(self, model_field):
except KeyError:
return ModelField(model_field=model_field, **kwargs)
+ def validate(self, attrs):
+ copied_attrs = copy.deepcopy(attrs)
+ restored_object = self.restore_object(copied_attrs, instance=getattr(self, 'object', None))
+ self.perform_model_validation(restored_object)
+ return attrs
+
+ def perform_model_validation(self, restored_object):
+ # Call Django's full_clean() which in turn calls: Model.clean_fields(), Model.clean(), Model.validat_unique()
+ restored_object.full_clean(exclude=list(self.get_excluded_fieldnames()))
+
def restore_object(self, attrs, instance=None):
"""
Restore the model instance.
| diff --git a/rest_framework/tests/models.py b/rest_framework/tests/models.py
--- a/rest_framework/tests/models.py
+++ b/rest_framework/tests/models.py
@@ -61,7 +61,7 @@ class BasicModel(RESTFrameworkModel):
class SlugBasedModel(RESTFrameworkModel):
text = models.CharField(max_length=100)
- slug = models.SlugField(max_length=32)
+ slug = models.SlugField(max_length=32, blank=True)
class DefaultValueModel(RESTFrameworkModel):
@@ -160,7 +160,7 @@ class Photo(RESTFrameworkModel):
# Model for issue #324
class BlankFieldModel(RESTFrameworkModel):
- title = models.CharField(max_length=100, blank=True)
+ title = models.CharField(max_length=100, blank=True, null=True)
# Model for issue #380
diff --git a/rest_framework/tests/serializer.py b/rest_framework/tests/serializer.py
--- a/rest_framework/tests/serializer.py
+++ b/rest_framework/tests/serializer.py
@@ -1,7 +1,7 @@
import datetime
from django.test import TestCase
from rest_framework import serializers
-from rest_framework.tests.models import (ActionItem, Anchor, BasicModel,
+from rest_framework.tests.models import (Album, ActionItem, Anchor, BasicModel,
BlankFieldModel, BlogPost, Book, CallableDefaultValueModel, DefaultValueModel,
ManyToManyModel, Person, ReadOnlyManyToManyModel)
@@ -48,7 +48,7 @@ class Meta:
class ActionItemSerializer(serializers.ModelSerializer):
-
+
class Meta:
model = ActionItem
@@ -62,6 +62,12 @@ class Meta:
read_only_fields = ('age',)
+class AlbumsSerializer(serializers.ModelSerializer):
+
+ class Meta:
+ model = Album
+
+
class BasicTests(TestCase):
def setUp(self):
self.comment = Comment(
@@ -169,7 +175,7 @@ def setUp(self):
'content': 'x' * 1001,
'created': datetime.datetime(2012, 1, 1)
}
- self.actionitem = ActionItem('Some to do item',
+ self.actionitem = ActionItem(title='Some to do item',
)
def test_create(self):
@@ -276,6 +282,17 @@ def test_default_modelfield_max_length_exceeded(self):
self.assertEquals(serializer.is_valid(), False)
self.assertEquals(serializer.errors, {'info': [u'Ensure this value has at most 12 characters (it has 13).']})
+ def test_validate_unique(self):
+ """
+ Just check if serializers.ModelSerializer.perform_model_validation() handles unique checks via .full_clean()
+ """
+ serializer = AlbumsSerializer(data={'title': 'a'})
+ serializer.is_valid()
+ serializer.save()
+ second_serializer = AlbumsSerializer(data={'title': 'a'})
+ self.assertFalse(second_serializer.is_valid())
+ self.assertEqual(second_serializer.errors, {'title': [u'Album with this Title already exists.']})
+
class RegexValidationTest(TestCase):
def test_create_failed(self):
| ModelSerializers should have ._post_clean()-like behavior
Currently, the coupling between ModelFields and serializer Fields is somewhat brittle.
As @tomchristie suggested, we should actually be mimicking the [post_clean()](https://github.com/django/django/blob/master/django/forms/models.py#L307) behavior from Django's ModelForms.
That way there would be no need to couple every possible ModelField to a serializer Field because all the necessary validation logic is already present on the model.
[This bit](https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/serializers.py#L434) where max_length is added to the kwargs could then also be dropped.
| 2012-11-26T22:41:50 |
|
encode/django-rest-framework | 462 | encode__django-rest-framework-462 | [
"461"
] | 8d485da483c2a5cc0713a65ef30606966c082327 | diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -272,10 +272,18 @@ def field_to_native(self, obj, field_name):
Override default so that we can apply ModelSerializer as a nested
field to relationships.
"""
- obj = getattr(obj, self.source or field_name)
- if is_simple_callable(obj):
- obj = obj()
+ if self.source:
+ value = obj
+ for component in self.source.split('.'):
+ value = getattr(value, component)
+ if is_simple_callable(value):
+ value = value()
+ obj = value
+ else:
+ value = getattr(obj, field_name)
+ if is_simple_callable(value):
+ obj = value()
# If the object has an "all" method, assume it's a relationship
if is_simple_callable(getattr(obj, 'all', None)):
| diff --git a/rest_framework/tests/models.py b/rest_framework/tests/models.py
--- a/rest_framework/tests/models.py
+++ b/rest_framework/tests/models.py
@@ -124,8 +124,21 @@ class ActionItem(RESTFrameworkModel):
# Models for reverse relations
+class Person(RESTFrameworkModel):
+ name = models.CharField(max_length=10)
+ age = models.IntegerField(null=True, blank=True)
+
+ @property
+ def info(self):
+ return {
+ 'name': self.name,
+ 'age': self.age,
+ }
+
+
class BlogPost(RESTFrameworkModel):
title = models.CharField(max_length=100)
+ writer = models.ForeignKey(Person, null=True, blank=True)
def get_first_comment(self):
return self.blogpostcomment_set.all()[0]
@@ -145,18 +158,6 @@ class Photo(RESTFrameworkModel):
album = models.ForeignKey(Album)
-class Person(RESTFrameworkModel):
- name = models.CharField(max_length=10)
- age = models.IntegerField(null=True, blank=True)
-
- @property
- def info(self):
- return {
- 'name': self.name,
- 'age': self.age,
- }
-
-
# Model for issue #324
class BlankFieldModel(RESTFrameworkModel):
title = models.CharField(max_length=100, blank=True)
diff --git a/rest_framework/tests/serializer.py b/rest_framework/tests/serializer.py
--- a/rest_framework/tests/serializer.py
+++ b/rest_framework/tests/serializer.py
@@ -560,6 +560,47 @@ class BlogPostSerializer(serializers.Serializer):
self.assertEqual(serializer.data, expected)
+class RelatedTraversalTest(TestCase):
+ def test_nested_traversal(self):
+ user = Person.objects.create(name="django")
+ post = BlogPost.objects.create(title="Test blog post", writer=user)
+ post.blogpostcomment_set.create(text="I love this blog post")
+
+ from rest_framework.tests.models import BlogPostComment
+
+ class PersonSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = Person
+ fields = ("name", "age")
+
+ class BlogPostCommentSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = BlogPostComment
+ fields = ("text", "post_owner")
+
+ text = serializers.CharField()
+ post_owner = PersonSerializer(source='blog_post.writer')
+
+ class BlogPostSerializer(serializers.Serializer):
+ title = serializers.CharField()
+ comments = BlogPostCommentSerializer(source='blogpostcomment_set')
+
+ serializer = BlogPostSerializer(instance=post)
+
+ expected = {
+ 'title': u'Test blog post',
+ 'comments': [{
+ 'text': u'I love this blog post',
+ 'post_owner': {
+ "name": u"django",
+ "age": None
+ }
+ }]
+ }
+
+ self.assertEqual(serializer.data, expected)
+
+
class SerializerMethodFieldTests(TestCase):
def setUp(self):
| ModelSerializer does not traverse through related object using '.' notation
In follow up to this thread:
https://groups.google.com/forum/?fromgroups=#!topic/django-rest-framework/H5C758KGy0I
Fixing this should allow more flexibility when defining nested serializers.
Going to try to submit a fix + test for this.
| Great, thanks Pavel.
On 29 Nov 2012, at 18:03, Pavel Savchenko [email protected] wrote:
> In follow up to this thread:
> https://groups.google.com/forum/?fromgroups=#!topic/django-rest-framework/H5C758KGy0I
>
> Fixing this should allow more flexibility when defining nested serializers.
>
> Going to try to submit a fix + test for this.
>
> —
> Reply to this email directly or view it on GitHub.
| 2012-11-29T23:42:15 |
encode/django-rest-framework | 470 | encode__django-rest-framework-470 | [
"469"
] | 3e3ede71d2f4826fa1d07523705dd53ab2cba29a | diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -817,6 +817,7 @@ def __deepcopy__(self, memo):
class RegexField(CharField):
type_name = 'RegexField'
+ form_field_class = forms.RegexField
def __init__(self, regex, max_length=None, min_length=None, *args, **kwargs):
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
diff --git a/rest_framework/renderers.py b/rest_framework/renderers.py
--- a/rest_framework/renderers.py
+++ b/rest_framework/renderers.py
@@ -320,6 +320,9 @@ def serializer_to_form_fields(self, serializer):
if getattr(v, 'choices', None) is not None:
kwargs['choices'] = v.choices
+ if getattr(v, 'regex', None) is not None:
+ kwargs['regex'] = v.regex
+
if getattr(v, 'widget', None):
widget = copy.deepcopy(v.widget)
kwargs['widget'] = widget
| RegexField Browsable API bug
Hi Tom,
## Issue
When trying to view an API view which utilises a serializer with a `RegexField` the browsable api view explodes with a `TypeError`. (I have attached my traceback after this message). I have traced it to the `renderers.py` file line 351 on the 2.1.6 release. The issue is that the code is trying to instantiate an instance of `django.forms.RegexField` without passing it the regex argument.
The `kwargs` passed to the instantiation call are:
```
{'label': 'code', 'required': False, 'widget': <django.forms.widgets.TextInput object at 0x1057bcb90>}
```
The field is mapping correctly to the `django.forms.RegexField` but because regex is not passed as an argument to the class, django whines that its missing some arguments. To re-create this issue, create a standard serializer with a `RegexField`, Create a standard `CreateAPIView`, map to the view with a URL and then access.
## Fix
It looks like we need some extra code in that method. This fixes the issue for me locally at least:
```
if getattr(v, 'regex', None) is not None:
kwargs['regex'] = v.regex
```
Cheers,
Rob
## Traceback
```
Request Method: GET
Request URL: http://localhost:8000/drf/xxxxx/
Traceback:
File "/Users/robcharlwood/Dev/Envs/ism-env/src/eggs/Django-1.4.2-py2.7.egg/django/core/handlers/base.py" in get_response
136. response = response.render()
File "/Users/robcharlwood/Dev/Envs/ism-env/src/eggs/Django-1.4.2-py2.7.egg/django/template/response.py" in render
104. self._set_content(self.rendered_content)
File "/Users/robcharlwood/Dev/Envs/ism-env/src/eggs/djangorestframework-2.1.6-py2.7.egg/rest_framework/response.py" in rendered_content
42. return renderer.render(self.data, media_type, context)
File "/Users/robcharlwood/Dev/Envs/ism-env/src/eggs/djangorestframework-2.1.6-py2.7.egg/rest_framework/renderers.py" in render
453. post_form = self.get_form(view, 'POST', request)
File "/Users/robcharlwood/Dev/Envs/ism-env/src/eggs/djangorestframework-2.1.6-py2.7.egg/rest_framework/renderers.py" in get_form
379. fields = self.serializer_to_form_fields(serializer)
File "/Users/robcharlwood/Dev/Envs/ism-env/src/eggs/djangorestframework-2.1.6-py2.7.egg/rest_framework/renderers.py" in serializer_to_form_fields
353. fields[k] = field_mapping[v.__class__](**kwargs)
Exception Type: TypeError at /drf/xxxxx/
Exception Value: __init__() takes at least 2 arguments (1 given)
```
| yep. you are totally right. missed that, cause when writing the `RegexField`, there was no entry in the `field_mapping` dict. so that caused the field to raise a keyerror and to use a `CharField` which works totally fine.
your fix will solve that problem (tested it). but i think it's not nessesary to use this mapping:
`serializers.RegexField: forms.RegexField`
because the validation will still be inside the validator. it's just a representational mapping.
`forms.RegexField` brings no additional value - so we will be fine with that:
`serializers.RegexField: forms.CharField`
--> works correctly and won't cause any trouble.
additionally.. your solution might be more future orientated so there could be form validation in some time?
| 2012-12-04T08:41:57 |
|
encode/django-rest-framework | 491 | encode__django-rest-framework-491 | [
"490"
] | ff01ae3571298b9da67f9b9583f0cb264676ed2b | diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -132,9 +132,9 @@ def get_excluded_fieldnames(self):
Returns the fieldnames that should not be validated.
"""
excluded_fields = list(self.opts.exclude)
- for field in self.fields.keys() + self.get_default_fields().keys():
- if self.opts.fields:
- if field not in self.opts.fields + self.opts.exclude:
+ if self.opts.fields:
+ for field in self.fields.keys() + self.get_default_fields().keys():
+ if field not in list(self.opts.fields) + excluded_fields:
excluded_fields.append(field)
return excluded_fields
| diff --git a/rest_framework/tests/serializer.py b/rest_framework/tests/serializer.py
--- a/rest_framework/tests/serializer.py
+++ b/rest_framework/tests/serializer.py
@@ -66,6 +66,7 @@ class AlbumsSerializer(serializers.ModelSerializer):
class Meta:
model = Album
+ fields = ['title'] # lists are also valid options
class BasicTests(TestCase):
@@ -282,9 +283,11 @@ def test_default_modelfield_max_length_exceeded(self):
self.assertEquals(serializer.is_valid(), False)
self.assertEquals(serializer.errors, {'info': [u'Ensure this value has at most 12 characters (it has 13).']})
+
+class ModelValidationTests(TestCase):
def test_validate_unique(self):
"""
- Just check if serializers.ModelSerializer.perform_model_validation() handles unique checks via .full_clean()
+ Just check if serializers.ModelSerializer handles unique checks via .full_clean()
"""
serializer = AlbumsSerializer(data={'title': 'a'})
serializer.is_valid()
| POSTing to framework throws: can only concatenate list (not "tuple") to list
In the method:
def get_excluded_fieldnames(self):
"""
Returns the fieldnames that should not be validated.
"""
excluded_fields = list(self.opts.exclude)
for field in self.fields.keys() + self.get_default_fields().keys():
if self.opts.fields:
if field not in self.opts.fields + self.opts.exclude:
excluded_fields.append(field)
return excluded_fields
If no exclude is defined on the ModelSerialzer and fields are the code tries to add a tuple to a list and throws an error. Either self.opts.exclude should be changed to excluded_fields created earlier or the default for self.opts.fields and self.opts.exclude should be changed to lists instead of tuples
| Hi @supercodepoet,
are you perhaps declaring fields as a list on your serializer, like so?:
```
class Meta:
fields = ['my_fields']
```
Yes, I am declaring fields in my Meta
As a list or as a tuple?
As a list
You should be using a tuple.
ok, is this noted in the docs? I must have missed it
In all the examples, the fields options are declared as tuples indeed.
@markotibold You should be able to declare them using either. I simply use tuples because I prefer it by convention.
The + bit of code should cast everything to lists before doing the append.
Yeah, I would say usually in any place I see tuple or list I could use either. The self.opts.exclude list is converted to a list in the first line of the code so that + should use self.opts.exclude
Turns out Django mostly allows lists as well, e.g. with ModelForms. My bad, sorry @supercodepoet.
Pull req coming up.
No problem, just glad I could help. haha, guess I should have submitted a Pull Request myself, sorry
| 2012-12-10T22:11:44 |
encode/django-rest-framework | 510 | encode__django-rest-framework-510 | [
"509",
"509"
] | 1d24d1fc5928d32372e700907aa71cf887b16ba9 | diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -794,7 +794,7 @@ def valid_value(self, value):
if value == smart_unicode(k2):
return True
else:
- if value == smart_unicode(k):
+ if value == smart_unicode(k) or value == k:
return True
return False
| diff --git a/rest_framework/tests/models.py b/rest_framework/tests/models.py
--- a/rest_framework/tests/models.py
+++ b/rest_framework/tests/models.py
@@ -51,6 +51,10 @@ class Meta:
abstract = True
+class HasPositiveIntegerAsChoice(RESTFrameworkModel):
+ some_choices = ((1,'A'),(2,'B'),(3,'C'))
+ some_integer = models.PositiveIntegerField(choices=some_choices)
+
class Anchor(RESTFrameworkModel):
text = models.CharField(max_length=100, default='anchor')
diff --git a/rest_framework/tests/serializer.py b/rest_framework/tests/serializer.py
--- a/rest_framework/tests/serializer.py
+++ b/rest_framework/tests/serializer.py
@@ -2,7 +2,7 @@
import pickle
from django.test import TestCase
from rest_framework import serializers
-from rest_framework.tests.models import (Album, ActionItem, Anchor, BasicModel,
+from rest_framework.tests.models import (HasPositiveIntegerAsChoice, Album, ActionItem, Anchor, BasicModel,
BlankFieldModel, BlogPost, Book, CallableDefaultValueModel, DefaultValueModel,
ManyToManyModel, Person, ReadOnlyManyToManyModel, Photo)
@@ -69,6 +69,11 @@ class Meta:
model = Album
fields = ['title'] # lists are also valid options
+class PositiveIntegerAsChoiceSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = HasPositiveIntegerAsChoice
+ fields = ['some_integer']
+
class BasicTests(TestCase):
def setUp(self):
@@ -285,6 +290,12 @@ def test_default_modelfield_max_length_exceeded(self):
self.assertEquals(serializer.errors, {'info': [u'Ensure this value has at most 12 characters (it has 13).']})
+class PositiveIntegerAsChoiceTests(TestCase):
+ def test_positive_integer_in_json_is_correctly_parsed(self):
+ data = {'some_integer':1}
+ serializer = PositiveIntegerAsChoiceSerializer(data=data)
+ self.assertEquals(serializer.is_valid(), True)
+
class ModelValidationTests(TestCase):
def test_validate_unique(self):
"""
| Bug : JSON integer won't match integer in a ChoiceField
I have a Model with :
```
PENDING = 1
COMPLETE = 2
CANCELLED = 3
STATUS = (
(PENDING, 'Pending'),
(COMPLETE, 'Complete'),
(CANCELLED, 'Cancelled'),
)
(...)
status = models.PositiveIntegerField(default=COMPLETE, choices=STATUS)
```
And when I perform a PUT (update) on that model (using the default ModelSerializer) with the following JSON :
```
{"id":8,"status":3,"t_type":1,"description":"Transaction example"}
```
I get the following error message :
```
"status" : "Select a valid choice. 3 is not one of the available choices."
```
Which it clearly is.
Bug : JSON integer won't match integer in a ChoiceField
I have a Model with :
```
PENDING = 1
COMPLETE = 2
CANCELLED = 3
STATUS = (
(PENDING, 'Pending'),
(COMPLETE, 'Complete'),
(CANCELLED, 'Cancelled'),
)
(...)
status = models.PositiveIntegerField(default=COMPLETE, choices=STATUS)
```
And when I perform a PUT (update) on that model (using the default ModelSerializer) with the following JSON :
```
{"id":8,"status":3,"t_type":1,"description":"Transaction example"}
```
I get the following error message :
```
"status" : "Select a valid choice. 3 is not one of the available choices."
```
Which it clearly is.
| 2012-12-15T15:56:09 |
|
encode/django-rest-framework | 515 | encode__django-rest-framework-515 | [
"514"
] | 70714c234630cd205ed88686ece3b594f387a48f | diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -456,7 +456,7 @@ def get_field(self, model_field):
kwargs['blank'] = model_field.blank
- if model_field.null:
+ if model_field.null or model_field.blank:
kwargs['required'] = False
if model_field.has_default():
| diff --git a/rest_framework/tests/models.py b/rest_framework/tests/models.py
--- a/rest_framework/tests/models.py
+++ b/rest_framework/tests/models.py
@@ -160,7 +160,7 @@ class Photo(RESTFrameworkModel):
# Model for issue #324
class BlankFieldModel(RESTFrameworkModel):
- title = models.CharField(max_length=100, blank=True, null=True)
+ title = models.CharField(max_length=100, blank=True, null=False)
# Model for issue #380
diff --git a/rest_framework/tests/serializer.py b/rest_framework/tests/serializer.py
--- a/rest_framework/tests/serializer.py
+++ b/rest_framework/tests/serializer.py
@@ -704,6 +704,10 @@ def test_create_model_not_blank_field(self):
serializer = self.not_blank_model_serializer_class(data=self.data)
self.assertEquals(serializer.is_valid(), False)
+ def test_create_model_null_field(self):
+ serializer = self.model_serializer_class(data={})
+ self.assertEquals(serializer.is_valid(), True)
+
#test for issue #460
class SerializerPickleTests(TestCase):
| ModelSerializer validation and blank=True
When None values are passed to the ModelSerializer validation, for a field that has `blank=True` set, but not `null=True`, the serializer will return "This field is required."
For example, if the field is:
```
title = models.CharField(max_length=100, blank=True, null=False)
```
If no value is passed to the API view - validation will fail.
Django suggests setting non empty string fields as `blank=True, null=False`:
> Note that empty string values will always get stored as empty strings, not as NULL. Only use null=True for non-string fields such as integers, booleans and dates...
https://docs.djangoproject.com/en/dev/ref/models/fields/#django.db.models.Field.null
This issue is similar to #336
Will submit a pull-request with a failing test case soon.
| 2012-12-17T14:52:43 |
|
encode/django-rest-framework | 541 | encode__django-rest-framework-541 | [
"532"
] | 161432d78c29dd607fb9b16957aaa00010027fcd | diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -189,8 +189,10 @@ def field_from_native(self, data, files, field_name, into):
else:
native = data[field_name]
except KeyError:
- if self.default is not None:
+ if self.default is not None and not self.root.partial:
native = self.default
+ # partial serializers shouldn't set the default field to avoid
+ # overriding the previously set value
else:
if self.required:
raise ValidationError(self.error_messages['required'])
| diff --git a/rest_framework/tests/models.py b/rest_framework/tests/models.py
--- a/rest_framework/tests/models.py
+++ b/rest_framework/tests/models.py
@@ -71,6 +71,7 @@ class SlugBasedModel(RESTFrameworkModel):
class DefaultValueModel(RESTFrameworkModel):
text = models.CharField(default='foobar', max_length=100)
+ extra = models.CharField(blank=True, null=True, max_length=100)
class CallableDefaultValueModel(RESTFrameworkModel):
diff --git a/rest_framework/tests/serializer.py b/rest_framework/tests/serializer.py
--- a/rest_framework/tests/serializer.py
+++ b/rest_framework/tests/serializer.py
@@ -340,7 +340,6 @@ class Meta:
self.assertTrue(photo_serializer.save())
-
class RegexValidationTest(TestCase):
def test_create_failed(self):
serializer = BookSerializer(data={'isbn': '1234567890'})
@@ -551,6 +550,21 @@ def test_create_overriding_default(self):
self.assertEquals(instance.pk, 1)
self.assertEquals(instance.text, 'overridden')
+ def test_partial_update_default(self):
+ """ Regression test for issue #532 """
+ data = {'text': 'overridden'}
+ serializer = self.serializer_class(data=data, partial=True)
+ self.assertEquals(serializer.is_valid(), True)
+ instance = serializer.save()
+
+ data = {'extra': 'extra_value'}
+ serializer = self.serializer_class(instance=instance, data=data, partial=True)
+ self.assertEquals(serializer.is_valid(), True)
+ instance = serializer.save()
+
+ self.assertEquals(instance.extra, 'extra_value')
+ self.assertEquals(instance.text, 'overridden')
+
class CallableDefaultValueTests(TestCase):
def setUp(self):
| Partial serializer and "default" values
If a model has a field with a default value set, and it's updated by a partial serializer with the field excluded from the data - the previously saved value will be overridden by the default.
| 2013-01-02T13:58:53 |
|
encode/django-rest-framework | 545 | encode__django-rest-framework-545 | [
"542"
] | 6da9cd5429b9f480a65d27b93c2938b927f72a5b | diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -181,6 +181,7 @@ def field_from_native(self, data, files, field_name, into):
try:
if self._use_files:
+ files = files or {}
native = files[field_name]
else:
native = data[field_name]
| diff --git a/rest_framework/tests/files.py b/rest_framework/tests/files.py
--- a/rest_framework/tests/files.py
+++ b/rest_framework/tests/files.py
@@ -25,7 +25,6 @@ def restore_object(self, attrs, instance=None):
class FileSerializerTests(TestCase):
-
def test_create(self):
now = datetime.datetime.now()
file = StringIO.StringIO('stuff')
@@ -37,3 +36,16 @@ def test_create(self):
self.assertEquals(serializer.object.created, uploaded_file.created)
self.assertEquals(serializer.object.file, uploaded_file.file)
self.assertFalse(serializer.object is uploaded_file)
+
+ def test_creation_failure(self):
+ """
+ Passing files=None should result in an ValidationError
+
+ Regression test for:
+ https://github.com/tomchristie/django-rest-framework/issues/542
+ """
+ now = datetime.datetime.now()
+
+ serializer = UploadedFileSerializer(data={'created': now})
+ self.assertFalse(serializer.is_valid())
+ self.assertIn('file', serializer.errors)
| Error in FileField validation with files=None
In FileField validation, if serializer.files is None, a typeerror will be raised.
This happens when parsing anything except `multipart/form-data` (which can be used if the file is not required).
Traceback:
```
File "***/rest_framework/views.py", line 363, in dispatch
response = self.handle_exception(exc)
File "***/rest_framework/views.py", line 360, in dispatch
response = handler(request, *args, **kwargs)
File "***/rest_framework/generics.py", line 172, in put
return self.update(request, *args, **kwargs)
File "***/rest_framework/mixins.py", line 94, in update
if serializer.is_valid():
File "***/rest_framework/serializers.py", line 328, in is_valid
return not self.errors
File "***/rest_framework/serializers.py", line 322, in errors
obj = self.from_native(self.init_data, self.init_files)
File "***/rest_framework/serializers.py", line 283, in from_native
attrs = self.restore_fields(data, files)
File "***/rest_framework/serializers.py", line 214, in restore_fields
field.field_from_native(data, files, field_name, reverted_data)
File "***/rest_framework/fields.py", line 184, in field_from_native
native = files[field_name]
TypeError: 'NoneType' object has no attribute '__getitem__'
```
For example, if the content-type is `application/x-www-form-urlencoded`, FormParser() will set the `files` to None:
https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/parsers.py#L98
Then the field validator tries to find the field in `files`:
https://github.com/tomchristie/django-rest-framework/blob/2.1.14/rest_framework/fields.py#L183
| Not sure what would be a better way to fix it - to typecheck `files` before calling `files[field_name]`, or making all parsers return an empty MultiValuedDict for data regardless of it's presence...
Not passing a files argument to a serializer is valid, so I'd say do `files = files or {}` prior to https://github.com/tomchristie/django-rest-framework/blob/2.1.14/rest_framework/fields.py#L183
I've given you the commit bit, so once this has a test, fix and a note in release notes, you're free to perform the merge yourself. (Tho please do so via a pull req and wait for travis tests as usual)
Cheers! :)
Wow, thanks... :)
| 2013-01-02T20:11:45 |
encode/django-rest-framework | 663 | encode__django-rest-framework-663 | [
"570"
] | 618606888ab34418998d1abfe4668804038ff22f | diff --git a/rest_framework/renderers.py b/rest_framework/renderers.py
--- a/rest_framework/renderers.py
+++ b/rest_framework/renderers.py
@@ -345,12 +345,11 @@ def get_form(self, view, method, request):
if not self.show_form_for_method(view, method, request, obj):
return
- if method == 'DELETE' or method == 'OPTIONS':
+ if method in ('DELETE', 'OPTIONS'):
return True # Don't actually need to return a form
if not getattr(view, 'get_serializer', None) or not parsers.FormParser in view.parser_classes:
- media_types = [parser.media_type for parser in view.parser_classes]
- return self.get_generic_content_form(media_types)
+ return
serializer = view.get_serializer(instance=obj)
fields = self.serializer_to_form_fields(serializer)
@@ -422,14 +421,17 @@ def render(self, data, accepted_media_type=None, renderer_context=None):
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
+ media_types = [parser.media_type for parser in view.parser_classes]
renderer = self.get_default_renderer(view)
content = self.get_content(renderer, data, accepted_media_type, renderer_context)
put_form = self.get_form(view, 'PUT', request)
post_form = self.get_form(view, 'POST', request)
+ patch_form = self.get_form(view, 'PATCH', request)
delete_form = self.get_form(view, 'DELETE', request)
options_form = self.get_form(view, 'OPTIONS', request)
+ generic_content_form = self.get_generic_content_form(media_types)
name = self.get_name(view)
description = self.get_description(view)
@@ -449,8 +451,10 @@ def render(self, data, accepted_media_type=None, renderer_context=None):
'available_formats': [renderer.format for renderer in view.renderer_classes],
'put_form': put_form,
'post_form': post_form,
+ 'patch_form': patch_form,
'delete_form': delete_form,
'options_form': options_form,
+ 'generic_content_form': generic_content_form,
'api_settings': api_settings
})
| diff --git a/rest_framework/tests/renderers.py b/rest_framework/tests/renderers.py
--- a/rest_framework/tests/renderers.py
+++ b/rest_framework/tests/renderers.py
@@ -112,6 +112,9 @@ def post(self, request):
def put(self, request):
return Response()
+ def patch(self, request):
+ return Response()
+
class DocumentingRendererTests(TestCase):
def test_only_permitted_forms_are_displayed(self):
@@ -120,6 +123,7 @@ def test_only_permitted_forms_are_displayed(self):
response = view(request).render()
self.assertNotContains(response, '>POST<')
self.assertContains(response, '>PUT<')
+ self.assertContains(response, '>PATCH<')
class RendererEndToEndTests(TestCase):
diff --git a/rest_framework/tests/utils.py b/rest_framework/tests/utils.py
--- a/rest_framework/tests/utils.py
+++ b/rest_framework/tests/utils.py
@@ -1,10 +1,10 @@
from __future__ import unicode_literals
-from django.test.client import RequestFactory, FakePayload
+from django.test.client import FakePayload, Client as _Client, RequestFactory as _RequestFactory
from django.test.client import MULTIPART_CONTENT
from rest_framework.compat import urlparse
-class RequestFactory(RequestFactory):
+class RequestFactory(_RequestFactory):
def __init__(self, **defaults):
super(RequestFactory, self).__init__(**defaults)
@@ -26,3 +26,15 @@ def patch(self, path, data={}, content_type=MULTIPART_CONTENT,
}
r.update(extra)
return self.request(**r)
+
+
+class Client(_Client, RequestFactory):
+ def patch(self, path, data={}, content_type=MULTIPART_CONTENT,
+ follow=False, **extra):
+ """
+ Send a resource to the server using PATCH.
+ """
+ response = super(Client, self).patch(path, data=data, content_type=content_type, **extra)
+ if follow:
+ response = self._handle_redirects(response, **extra)
+ return response
| Add patch interface to the Browsable API
Currently it seems only post/put/get is supported.
Expected behaviour: same form like in post/put should appear, perhaps not pre-filled
| Closing as superseeded by #591.
| 2013-02-15T10:42:00 |
encode/django-rest-framework | 670 | encode__django-rest-framework-670 | [
"570"
] | 47a4f0863d08e4b839ea3bbd7308ecc0f995b7d9 | diff --git a/rest_framework/renderers.py b/rest_framework/renderers.py
--- a/rest_framework/renderers.py
+++ b/rest_framework/renderers.py
@@ -345,12 +345,11 @@ def get_form(self, view, method, request):
if not self.show_form_for_method(view, method, request, obj):
return
- if method == 'DELETE' or method == 'OPTIONS':
+ if method in ('DELETE', 'OPTIONS'):
return True # Don't actually need to return a form
if not getattr(view, 'get_serializer', None) or not parsers.FormParser in view.parser_classes:
- media_types = [parser.media_type for parser in view.parser_classes]
- return self.get_generic_content_form(media_types)
+ return
serializer = view.get_serializer(instance=obj)
fields = self.serializer_to_form_fields(serializer)
@@ -362,7 +361,7 @@ def get_form(self, view, method, request):
form_instance = OnTheFlyForm(data)
return form_instance
- def get_generic_content_form(self, media_types):
+ def get_raw_data_form(self, view, method, request, media_types):
"""
Returns a form that allows for arbitrary content types to be tunneled
via standard HTML forms.
@@ -375,6 +374,11 @@ def get_generic_content_form(self, media_types):
and api_settings.FORM_CONTENTTYPE_OVERRIDE):
return None
+ # Check permissions
+ obj = getattr(view, 'object', None)
+ if not self.show_form_for_method(view, method, request, obj):
+ return
+
content_type_field = api_settings.FORM_CONTENTTYPE_OVERRIDE
content_field = api_settings.FORM_CONTENT_OVERRIDE
choices = [(media_type, media_type) for media_type in media_types]
@@ -386,7 +390,7 @@ def __init__(self):
super(GenericContentForm, self).__init__()
self.fields[content_type_field] = forms.ChoiceField(
- label='Content Type',
+ label='Media type',
choices=choices,
initial=initial
)
@@ -422,15 +426,22 @@ def render(self, data, accepted_media_type=None, renderer_context=None):
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
+ media_types = [parser.media_type for parser in view.parser_classes]
renderer = self.get_default_renderer(view)
content = self.get_content(renderer, data, accepted_media_type, renderer_context)
put_form = self.get_form(view, 'PUT', request)
post_form = self.get_form(view, 'POST', request)
+ patch_form = self.get_form(view, 'PATCH', request)
delete_form = self.get_form(view, 'DELETE', request)
options_form = self.get_form(view, 'OPTIONS', request)
+ raw_data_put_form = self.get_raw_data_form(view, 'PUT', request, media_types)
+ raw_data_post_form = self.get_raw_data_form(view, 'POST', request, media_types)
+ raw_data_patch_form = self.get_raw_data_form(view, 'PATCH', request, media_types)
+ raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
+
name = self.get_name(view)
description = self.get_description(view)
breadcrumb_list = get_breadcrumbs(request.path)
@@ -447,10 +458,18 @@ def render(self, data, accepted_media_type=None, renderer_context=None):
'breadcrumblist': breadcrumb_list,
'allowed_methods': view.allowed_methods,
'available_formats': [renderer.format for renderer in view.renderer_classes],
+
'put_form': put_form,
'post_form': post_form,
+ 'patch_form': patch_form,
'delete_form': delete_form,
'options_form': options_form,
+
+ 'raw_data_put_form': raw_data_put_form,
+ 'raw_data_post_form': raw_data_post_form,
+ 'raw_data_patch_form': raw_data_patch_form,
+ 'raw_data_put_or_patch_form': raw_data_put_or_patch_form,
+
'api_settings': api_settings
})
| diff --git a/rest_framework/tests/renderers.py b/rest_framework/tests/renderers.py
--- a/rest_framework/tests/renderers.py
+++ b/rest_framework/tests/renderers.py
@@ -112,6 +112,9 @@ def post(self, request):
def put(self, request):
return Response()
+ def patch(self, request):
+ return Response()
+
class DocumentingRendererTests(TestCase):
def test_only_permitted_forms_are_displayed(self):
@@ -120,6 +123,7 @@ def test_only_permitted_forms_are_displayed(self):
response = view(request).render()
self.assertNotContains(response, '>POST<')
self.assertContains(response, '>PUT<')
+ self.assertContains(response, '>PATCH<')
class RendererEndToEndTests(TestCase):
diff --git a/rest_framework/tests/utils.py b/rest_framework/tests/utils.py
--- a/rest_framework/tests/utils.py
+++ b/rest_framework/tests/utils.py
@@ -1,10 +1,10 @@
from __future__ import unicode_literals
-from django.test.client import RequestFactory, FakePayload
+from django.test.client import FakePayload, Client as _Client, RequestFactory as _RequestFactory
from django.test.client import MULTIPART_CONTENT
from rest_framework.compat import urlparse
-class RequestFactory(RequestFactory):
+class RequestFactory(_RequestFactory):
def __init__(self, **defaults):
super(RequestFactory, self).__init__(**defaults)
@@ -26,3 +26,15 @@ def patch(self, path, data={}, content_type=MULTIPART_CONTENT,
}
r.update(extra)
return self.request(**r)
+
+
+class Client(_Client, RequestFactory):
+ def patch(self, path, data={}, content_type=MULTIPART_CONTENT,
+ follow=False, **extra):
+ """
+ Send a resource to the server using PATCH.
+ """
+ response = super(Client, self).patch(path, data=data, content_type=content_type, **extra)
+ if follow:
+ response = self._handle_redirects(response, **extra)
+ return response
| Add patch interface to the Browsable API
Currently it seems only post/put/get is supported.
Expected behaviour: same form like in post/put should appear, perhaps not pre-filled
| Closing as superseeded by #591.
| 2013-02-22T08:41:51 |
encode/django-rest-framework | 708 | encode__django-rest-framework-708 | [
"707"
] | 751064a6fda8adeab409d63d06dc4a39be1c159f | diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -534,6 +534,8 @@ def from_native(self, value):
raise ValidationError(msg)
def to_native(self, value):
+ if value is None:
+ return None
if isinstance(value, datetime.datetime):
value = value.date()
if self.format.lower() == ISO_8601:
@@ -599,6 +601,8 @@ def from_native(self, value):
raise ValidationError(msg)
def to_native(self, value):
+ if value is None:
+ return None
if self.format.lower() == ISO_8601:
return value.isoformat()
return value.strftime(self.format)
| diff --git a/rest_framework/tests/fields.py b/rest_framework/tests/fields.py
--- a/rest_framework/tests/fields.py
+++ b/rest_framework/tests/fields.py
@@ -171,6 +171,13 @@ def test_to_native_custom_format(self):
self.assertEqual('1984 - 07.31', result_1)
+ def test_to_native_none(self):
+ """
+ Make sure from_native() returns None on None param.
+ """
+ f = serializers.DateField(required=False)
+ self.assertEqual(None, f.to_native(None))
+
class DateTimeFieldTest(TestCase):
"""
@@ -303,6 +310,13 @@ def test_to_native_custom_format(self):
self.assertEqual('1984 - 04:31', result_3)
self.assertEqual('1984 - 04:31', result_4)
+ def test_to_native_none(self):
+ """
+ Make sure from_native() returns None on None param.
+ """
+ f = serializers.DateTimeField(required=False)
+ self.assertEqual(None, f.to_native(None))
+
class TimeFieldTest(TestCase):
"""
| 2.2.2 Breaks Date Fields
Couldn't this have been moved to 2.3? Seems like a major enough change to not expect breakage from v2.2.1 to v2.2.2 (thank you continuous integration tests!).
```
return JSONRenderer().render(self.get_serializer_class()(obj).data)
vim +389 .env/local/lib/python2.7/site-packages/rest_framework/serializers.py # data
self._data = self.to_native(obj)
vim +279 .env/local/lib/python2.7/site-packages/rest_framework/serializers.py # to_native
value = field.field_to_native(obj, field_name)
vim +151 .env/local/lib/python2.7/site-packages/rest_framework/fields.py # field_to_native
return self.to_native(value)
vim +540 .env/local/lib/python2.7/site-packages/rest_framework/fields.py # to_native
return value.isoformat()
AttributeError: 'NoneType' object has no attribute 'isoformat'
```
| I'm still looking to identify the exact cause of this error and create a reproducible test case, but wanted to bring attention since 2.2.2 just hit PyPI and broke things for me.
It seems to be that my model has an optional DateField(null=True, blank=True) and the field serializer wasn't expecting that option.
In 'to_native(value)' we should start with:
```
if value is None:
return None
```
On 6 Mar 2013, at 21:43, Kevin Stone [email protected] wrote:
> I'm still looking to identify the exact cause of this error and create a reproducible test case, but wanted to bring attention since 2.2.2 just hit PyPI and broke things for me.
>
> —
> Reply to this email directly or view it on GitHub.
| 2013-03-06T23:18:10 |
encode/django-rest-framework | 722 | encode__django-rest-framework-722 | [
"720"
] | 239758e034a9418aac3be1bab992679babc461e6 | diff --git a/rest_framework/relations.py b/rest_framework/relations.py
--- a/rest_framework/relations.py
+++ b/rest_framework/relations.py
@@ -235,7 +235,6 @@ def field_to_native(self, obj, field_name):
pk = getattr(obj, self.source or field_name).pk
except ObjectDoesNotExist:
return None
- return self.to_native(obj.pk)
# Forward relationship
return self.to_native(pk)
| diff --git a/rest_framework/tests/relations_pk.py b/rest_framework/tests/relations_pk.py
--- a/rest_framework/tests/relations_pk.py
+++ b/rest_framework/tests/relations_pk.py
@@ -407,14 +407,14 @@ def setUp(self):
target.save()
new_target = OneToOneTarget(name='target-2')
new_target.save()
- source = NullableOneToOneSource(name='source-1', target=target)
+ source = NullableOneToOneSource(name='source-1', target=new_target)
source.save()
def test_reverse_foreign_key_retrieve_with_null(self):
queryset = OneToOneTarget.objects.all()
serializer = NullableOneToOneTargetSerializer(queryset, many=True)
expected = [
- {'id': 1, 'name': 'target-1', 'nullable_source': 1},
- {'id': 2, 'name': 'target-2', 'nullable_source': None},
+ {'id': 1, 'name': 'target-1', 'nullable_source': None},
+ {'id': 2, 'name': 'target-2', 'nullable_source': 1},
]
self.assertEqual(serializer.data, expected)
| PrimaryKeyRelatedField with OneToOneField serializes wrong object's id
```
class A(Model):
pass
class B(Model):
a = OneToOneField('A')
class ASerializer(Serializer):
b_id = PrimaryKeyRelatedField(source='b', null=True)
```
Now when an `A` is serialized, it will not have `B`'s `id`, but its own. I believe this is due to [this erroneous line in PrimaryKeyRelatedField](https://github.com/tomchristie/django-rest-framework/blob/018298deb89628b39e1caeceecb414c1e27310da/rest_framework/relations.py#L238). Once I remove that line, the correct value for `b_id` will be serialized.
| Yup this looks incorrect to me too.
I'll try submitting a PR for this
| 2013-03-10T20:04:32 |
encode/django-rest-framework | 767 | encode__django-rest-framework-767 | [
"759"
] | 9b56616750bc769a3a5172f8f7603153c9335685 | diff --git a/rest_framework/authentication.py b/rest_framework/authentication.py
--- a/rest_framework/authentication.py
+++ b/rest_framework/authentication.py
@@ -2,14 +2,16 @@
Provides a set of pluggable authentication policies.
"""
from __future__ import unicode_literals
+import base64
+from datetime import datetime
+
from django.contrib.auth import authenticate
from django.core.exceptions import ImproperlyConfigured
from rest_framework import exceptions, HTTP_HEADER_ENCODING
from rest_framework.compat import CsrfViewMiddleware
from rest_framework.compat import oauth, oauth_provider, oauth_provider_store
-from rest_framework.compat import oauth2_provider, oauth2_provider_forms, oauth2_provider_backends
+from rest_framework.compat import oauth2_provider, oauth2_provider_forms
from rest_framework.authtoken.models import Token
-import base64
def get_authorization_header(request):
@@ -315,21 +317,15 @@ def authenticate_credentials(self, request, access_token):
Authenticate the request, given the access token.
"""
- # Authenticate the client
- oauth2_client_form = oauth2_provider_forms.ClientAuthForm(request.REQUEST)
- if not oauth2_client_form.is_valid():
- raise exceptions.AuthenticationFailed('Client could not be validated')
- client = oauth2_client_form.cleaned_data.get('client')
-
- # Retrieve the `OAuth2AccessToken` instance from the access_token
- auth_backend = oauth2_provider_backends.AccessTokenBackend()
- token = auth_backend.authenticate(access_token, client)
- if token is None:
+ try:
+ token = oauth2_provider.models.AccessToken.objects.select_related('user')
+ # TODO: Change to timezone aware datetime when oauth2_provider add
+ # support to it.
+ token = token.get(token=access_token, expires__gt=datetime.now())
+ except oauth2_provider.models.AccessToken.DoesNotExist:
raise exceptions.AuthenticationFailed('Invalid token')
- user = token.user
-
- if not user.is_active:
+ if not token.user.is_active:
msg = 'User inactive or deleted: %s' % user.username
raise exceptions.AuthenticationFailed(msg)
diff --git a/rest_framework/compat.py b/rest_framework/compat.py
--- a/rest_framework/compat.py
+++ b/rest_framework/compat.py
@@ -445,14 +445,12 @@ def apply_markdown(text):
# OAuth 2 support is optional
try:
import provider.oauth2 as oauth2_provider
- from provider.oauth2 import backends as oauth2_provider_backends
from provider.oauth2 import models as oauth2_provider_models
from provider.oauth2 import forms as oauth2_provider_forms
from provider import scope as oauth2_provider_scope
from provider import constants as oauth2_constants
except ImportError:
oauth2_provider = None
- oauth2_provider_backends = None
oauth2_provider_models = None
oauth2_provider_forms = None
oauth2_provider_scope = None
| diff --git a/rest_framework/tests/authentication.py b/rest_framework/tests/authentication.py
--- a/rest_framework/tests/authentication.py
+++ b/rest_framework/tests/authentication.py
@@ -466,17 +466,13 @@ def setUp(self):
def _create_authorization_header(self, token=None):
return "Bearer {0}".format(token or self.access_token.token)
- def _client_credentials_params(self):
- return {'client_id': self.CLIENT_ID, 'client_secret': self.CLIENT_SECRET}
-
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_get_form_with_wrong_authorization_header_token_type_failing(self):
"""Ensure that a wrong token type lead to the correct HTTP error status code"""
auth = "Wrong token-type-obsviously"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
- params = self._client_credentials_params()
- response = self.csrf_client.get('/oauth2-test/', params, HTTP_AUTHORIZATION=auth)
+ response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
@@ -485,8 +481,7 @@ def test_get_form_with_wrong_authorization_header_token_format_failing(self):
auth = "Bearer wrong token format"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
- params = self._client_credentials_params()
- response = self.csrf_client.get('/oauth2-test/', params, HTTP_AUTHORIZATION=auth)
+ response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
@@ -495,33 +490,21 @@ def test_get_form_with_wrong_authorization_header_token_failing(self):
auth = "Bearer wrong-token"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
- params = self._client_credentials_params()
- response = self.csrf_client.get('/oauth2-test/', params, HTTP_AUTHORIZATION=auth)
- self.assertEqual(response.status_code, 401)
-
- @unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
- def test_get_form_with_wrong_client_data_failing_auth(self):
- """Ensure GETing form over OAuth with incorrect client credentials fails"""
- auth = self._create_authorization_header()
- params = self._client_credentials_params()
- params['client_id'] += 'a'
- response = self.csrf_client.get('/oauth2-test/', params, HTTP_AUTHORIZATION=auth)
+ response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_get_form_passing_auth(self):
"""Ensure GETing form over OAuth with correct client credentials succeed"""
auth = self._create_authorization_header()
- params = self._client_credentials_params()
- response = self.csrf_client.get('/oauth2-test/', params, HTTP_AUTHORIZATION=auth)
+ response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_passing_auth(self):
"""Ensure POSTing form over OAuth with correct credentials passes and does not require CSRF"""
auth = self._create_authorization_header()
- params = self._client_credentials_params()
- response = self.csrf_client.post('/oauth2-test/', params, HTTP_AUTHORIZATION=auth)
+ response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
@@ -529,16 +512,14 @@ def test_post_form_token_removed_failing_auth(self):
"""Ensure POSTing when there is no OAuth access token in db fails"""
self.access_token.delete()
auth = self._create_authorization_header()
- params = self._client_credentials_params()
- response = self.csrf_client.post('/oauth2-test/', params, HTTP_AUTHORIZATION=auth)
+ response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
def test_post_form_with_refresh_token_failing_auth(self):
"""Ensure POSTing with refresh token instead of access token fails"""
auth = self._create_authorization_header(token=self.refresh_token.token)
- params = self._client_credentials_params()
- response = self.csrf_client.post('/oauth2-test/', params, HTTP_AUTHORIZATION=auth)
+ response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
@@ -547,8 +528,7 @@ def test_post_form_with_expired_access_token_failing_auth(self):
self.access_token.expires = datetime.datetime.now() - datetime.timedelta(seconds=10) # 10 seconds late
self.access_token.save()
auth = self._create_authorization_header()
- params = self._client_credentials_params()
- response = self.csrf_client.post('/oauth2-test/', params, HTTP_AUTHORIZATION=auth)
+ response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
self.assertIn('Invalid token', response.content)
@@ -559,10 +539,9 @@ def test_post_form_with_invalid_scope_failing_auth(self):
read_only_access_token.scope = oauth2_provider_scope.SCOPE_NAME_DICT['read']
read_only_access_token.save()
auth = self._create_authorization_header(token=read_only_access_token.token)
- params = self._client_credentials_params()
- response = self.csrf_client.get('/oauth2-with-scope-test/', params, HTTP_AUTHORIZATION=auth)
+ response = self.csrf_client.get('/oauth2-with-scope-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
- response = self.csrf_client.post('/oauth2-with-scope-test/', params, HTTP_AUTHORIZATION=auth)
+ response = self.csrf_client.post('/oauth2-with-scope-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@unittest.skipUnless(oauth2_provider, 'django-oauth2-provider not installed')
@@ -572,6 +551,5 @@ def test_post_form_with_valid_scope_passing_auth(self):
read_write_access_token.scope = oauth2_provider_scope.SCOPE_NAME_DICT['write']
read_write_access_token.save()
auth = self._create_authorization_header(token=read_write_access_token.token)
- params = self._client_credentials_params()
- response = self.csrf_client.post('/oauth2-with-scope-test/', params, HTTP_AUTHORIZATION=auth)
+ response = self.csrf_client.post('/oauth2-with-scope-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
| [oauth2] client_id and client_secret shouldn't be needed in all requests
The docs[0] says:
> The only thing needed to make the OAuth2Authentication class work is to insert the access_token you've received in the Authorization request header.
But the authenticate_credentials[1] always check for client_id and client_secret, and use user from it to check the token.
If client_id and client_secret are not informed, the form is invalid because client_id and client_secret are required.
The client_id and client_secret should be only required for requiring the token, right?
[0] http://django-rest-framework.org/api-guide/authentication.html
[1] https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/authentication.py#L320
| @dulaccc one for you I think? :p
Btw. I haven't actually used OAuth2 myself yet so I'm going to be a terrible person for helping resolve this ticket.
May be worth reading through [this thread](https://groups.google.com/forum/?fromgroups=#!topic/django-rest-framework/5pB7WIFlDXE) related to implementing OAuth2 to see if that helps at all, and consider raising this question on the mailing list.
The only benefit to check for all requests is that it lets us be sure that the access_token submitted is used with the right client.
But I think you are right @fernandogrd, the client credentials are only needed in case of an access_token request, or a refresh_token request. When [accessing protected resources](https://tools.ietf.org/html/rfc6749#section-7.1) there is indeed no paragraph talking about client credentials.
Also, should token be unique? I find weird that oauth2-provider only have a token + client authentication backend.
Yes that makes sense. And like you said that weird, the [AccessToken backend](https://github.com/caffeinehit/django-oauth2-provider/blob/master/provider/oauth2/backends.py#L64-L74) check for a database match with the `token` and `client` parameter. Like the two combine would be the primary key...
Wrt, we probably need to write our own backend.
It seems it uses shortuuid for generating token, so it is probably unique enough, but maybe enforce it in database is a good idea.
I'm working on a patch
| 2013-03-28T00:16:17 |
encode/django-rest-framework | 804 | encode__django-rest-framework-804 | [
"803"
] | 5d357a9b0807311b97de1e999be588f36fcd5b2f | diff --git a/rest_framework/compat.py b/rest_framework/compat.py
--- a/rest_framework/compat.py
+++ b/rest_framework/compat.py
@@ -6,6 +6,7 @@
from __future__ import unicode_literals
import django
+from django.core.exceptions import ImproperlyConfigured
# Try to import six from Django, fallback to included `six`.
try:
@@ -473,7 +474,7 @@ def apply_markdown(text):
try:
import oauth_provider
from oauth_provider.store import store as oauth_provider_store
-except ImportError:
+except (ImportError, ImproperlyConfigured):
oauth_provider = None
oauth_provider_store = None
| Module "oauth_provider.store.db" does not define an oauth store named "ModelStore"
After upgrading from 2.2.3 to 2.2.7, the following exception started occuring:
```
Traceback (most recent call last):
File "/home/danilo/.virtualenvs/studentenportal/lib/python2.7/site-packages/django/core/handlers/base.py", line 92, in get_response
response = middleware_method(request)
File "/home/danilo/.virtualenvs/studentenportal/lib/python2.7/site-packages/django/middleware/common.py", line 69, in process_request
if (not urlresolvers.is_valid_path(request.path_info, urlconf) and
File "/home/danilo/.virtualenvs/studentenportal/lib/python2.7/site-packages/django/core/urlresolvers.py", line 551, in is_valid_path
resolve(path, urlconf)
File "/home/danilo/.virtualenvs/studentenportal/lib/python2.7/site-packages/django/core/urlresolvers.py", line 440, in resolve
return get_resolver(urlconf).resolve(path)
File "/home/danilo/.virtualenvs/studentenportal/lib/python2.7/site-packages/django/core/urlresolvers.py", line 319, in resolve
for pattern in self.url_patterns:
File "/home/danilo/.virtualenvs/studentenportal/lib/python2.7/site-packages/django/core/urlresolvers.py", line 347, in url_patterns
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
File "/home/danilo/.virtualenvs/studentenportal/lib/python2.7/site-packages/django/core/urlresolvers.py", line 342, in urlconf_module
self._urlconf_module = import_module(self.urlconf_name)
File "/home/danilo/.virtualenvs/studentenportal/lib/python2.7/site-packages/django/utils/importlib.py", line 35, in import_module
__import__(name)
File "/home/danilo/Projects/studentenportal2/../studentenportal2/urls.py", line 8, in <module>
from apps.api import urls as api_urls
File "/home/danilo/Projects/studentenportal2/../studentenportal2/apps/api/urls.py", line 4, in <module>
from rest_framework.urlpatterns import format_suffix_patterns
File "/home/danilo/.virtualenvs/studentenportal/lib/python2.7/site-packages/rest_framework/urlpatterns.py", line 3, in <module>
from rest_framework.compat import url, include
File "/home/danilo/.virtualenvs/studentenportal/lib/python2.7/site-packages/rest_framework/compat.py", line 475, in <module>
from oauth_provider.store import store as oauth_provider_store
File "/home/danilo/.virtualenvs/studentenportal/lib/python2.7/site-packages/oauth_provider/store/__init__.py", line 177, in <module>
store = get_store()
File "/home/danilo/.virtualenvs/studentenportal/lib/python2.7/site-packages/oauth_provider/store/__init__.py", line 172, in get_store
raise ImproperlyConfigured('Module "%s" does not define an oauth store named "%s"' % (module, attr))
ImproperlyConfigured: Module "oauth_provider.store.db" does not define an oauth store named "ModelStore"
```
I have not enabled OAuth. Grepping for oauth in my repository does not return any results.
| Oh, I had oauth2 and django-oauth-plus installed. But didn't enable them in my code. After uninstalling them, it works again.
Just because I have those packages installed, you can't assume that I've also enabled and configured them :)
| 2013-04-30T21:27:31 |
|
encode/django-rest-framework | 857 | encode__django-rest-framework-857 | [
"675"
] | ea9a3d88bce5507af753a79b259e6bd8e53a9059 | diff --git a/rest_framework/relations.py b/rest_framework/relations.py
--- a/rest_framework/relations.py
+++ b/rest_framework/relations.py
@@ -8,6 +8,7 @@
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.urlresolvers import resolve, get_script_prefix, NoReverseMatch
from django import forms
+from django.db.models.fields import BLANK_CHOICE_DASH
from django.forms import widgets
from django.forms.models import ModelChoiceIterator
from django.utils.translation import ugettext_lazy as _
@@ -47,7 +48,7 @@ def __init__(self, *args, **kwargs):
DeprecationWarning, stacklevel=2)
kwargs['required'] = not kwargs.pop('null')
- self.queryset = kwargs.pop('queryset', None)
+ queryset = kwargs.pop('queryset', None)
self.many = kwargs.pop('many', self.many)
if self.many:
self.widget = self.many_widget
@@ -56,6 +57,11 @@ def __init__(self, *args, **kwargs):
kwargs['read_only'] = kwargs.pop('read_only', self.read_only)
super(RelatedField, self).__init__(*args, **kwargs)
+ if not self.required:
+ self.empty_label = BLANK_CHOICE_DASH[0][1]
+
+ self.queryset = queryset
+
def initialize(self, parent, field_name):
super(RelatedField, self).initialize(parent, field_name)
if self.queryset is None and not self.read_only:
| diff --git a/rest_framework/tests/serializer.py b/rest_framework/tests/serializer.py
--- a/rest_framework/tests/serializer.py
+++ b/rest_framework/tests/serializer.py
@@ -1139,6 +1139,63 @@ def test_choices_blank_true_with_default(self):
)
+# Regression tests for #675
+class Ticket(models.Model):
+ assigned = models.ForeignKey(
+ Person, related_name='assigned_tickets')
+ reviewer = models.ForeignKey(
+ Person, blank=True, null=True, related_name='reviewed_tickets')
+
+
+class SerializerRelatedChoicesTest(TestCase):
+
+ def setUp(self):
+ super(SerializerRelatedChoicesTest, self).setUp()
+
+ class RelatedChoicesSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = Ticket
+ fields = ('assigned', 'reviewer')
+
+ self.related_fields_serializer = RelatedChoicesSerializer
+
+ def test_empty_queryset_required(self):
+ serializer = self.related_fields_serializer()
+ self.assertEqual(serializer.fields['assigned'].queryset.count(), 0)
+ self.assertEqual(
+ [x for x in serializer.fields['assigned'].widget.choices],
+ []
+ )
+
+ def test_empty_queryset_not_required(self):
+ serializer = self.related_fields_serializer()
+ self.assertEqual(serializer.fields['reviewer'].queryset.count(), 0)
+ self.assertEqual(
+ [x for x in serializer.fields['reviewer'].widget.choices],
+ [('', '---------')]
+ )
+
+ def test_with_some_persons_required(self):
+ Person.objects.create(name="Lionel Messi")
+ Person.objects.create(name="Xavi Hernandez")
+ serializer = self.related_fields_serializer()
+ self.assertEqual(serializer.fields['assigned'].queryset.count(), 2)
+ self.assertEqual(
+ [x for x in serializer.fields['assigned'].widget.choices],
+ [(1, 'Person object - 1'), (2, 'Person object - 2')]
+ )
+
+ def test_with_some_persons_not_required(self):
+ Person.objects.create(name="Lionel Messi")
+ Person.objects.create(name="Xavi Hernandez")
+ serializer = self.related_fields_serializer()
+ self.assertEqual(serializer.fields['reviewer'].queryset.count(), 2)
+ self.assertEqual(
+ [x for x in serializer.fields['reviewer'].widget.choices],
+ [('', '---------'), (1, 'Person object - 1'), (2, 'Person object - 2')]
+ )
+
+
class DepthTest(TestCase):
def test_implicit_nesting(self):
| `RelatedField`s with required=False needs to create form_field_class with empty_labels
Let's say I'm working on an endpoint to represent `Photo`s, and who have a `CopyrightHolder`. (Meaning, Photo has ForeignKeyField with `null=True` for the CopyrightHolder)
My serializers would be like this:
``` python
class PhotoSerializer(serializers.ModelSerializer):
copyright_holder = serializers.HyperlinkedRelatedField(view_name='holder-detail', required=False)
class Meta:
model = models.Photo
```
I believe that validation works correctly, but unless I'm missing something, the generated form does not allow me to edit a photo without setting a copyright holder.
| Just to clarify, this relates just to the browsable API, right? (Or am I misunderstanding and it also applies to submitting via JSON?)
> unless I'm missing something, the generated form does not allow me to edit a photo without setting a copyright holder.
No I don't think you're missing anything, seems valid to me.
Correct. I'm talking about the browsable API.
Cool, all sounds sensible. Take a look in BrowseableAPIRenderer - shouldn't be too hard to resolve. If its problematic shout and I will try to take a look.
On 25 Feb 2013, at 19:22, Raphael Lullis [email protected] wrote:
> Correct. I'm talking about the browsable API.
>
> —
> Reply to this email directly or view it on GitHub.
Believe this is identical to #725.
As I solved #725, I'm taking this one as well
After inspecting a bit, it's the same issue but with `ChoiceField` with `RelatedField`.
| 2013-05-18T13:02:43 |
encode/django-rest-framework | 859 | encode__django-rest-framework-859 | [
"765"
] | a0e3c44c99a61a6dc878308bdf0890fbb10c41e4 | diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -378,23 +378,27 @@ def field_from_native(self, data, files, field_name, into):
# Set the serializer object if it exists
obj = getattr(self.parent.object, field_name) if self.parent.object else None
- if value in (None, ''):
- into[(self.source or field_name)] = None
+ if self.source == '*':
+ if value:
+ into.update(value)
else:
- kwargs = {
- 'instance': obj,
- 'data': value,
- 'context': self.context,
- 'partial': self.partial,
- 'many': self.many
- }
- serializer = self.__class__(**kwargs)
-
- if serializer.is_valid():
- into[self.source or field_name] = serializer.object
+ if value in (None, ''):
+ into[(self.source or field_name)] = None
else:
- # Propagate errors up to our parent
- raise NestedValidationError(serializer.errors)
+ kwargs = {
+ 'instance': obj,
+ 'data': value,
+ 'context': self.context,
+ 'partial': self.partial,
+ 'many': self.many
+ }
+ serializer = self.__class__(**kwargs)
+
+ if serializer.is_valid():
+ into[self.source or field_name] = serializer.object
+ else:
+ # Propagate errors up to our parent
+ raise NestedValidationError(serializer.errors)
def get_identity(self, data):
"""
| diff --git a/rest_framework/tests/serializer.py b/rest_framework/tests/serializer.py
--- a/rest_framework/tests/serializer.py
+++ b/rest_framework/tests/serializer.py
@@ -78,6 +78,17 @@ class Meta:
read_only_fields = ('age',)
+class NestedSerializer(serializers.Serializer):
+ info = serializers.Field()
+
+
+class ModelSerializerWithNestedSerializer(serializers.ModelSerializer):
+ nested = NestedSerializer(source='*')
+
+ class Meta:
+ model = Person
+
+
class PersonSerializerInvalidReadOnly(serializers.ModelSerializer):
"""
Testing for #652.
@@ -369,6 +380,17 @@ def test_missing_model_field_exception_msg(self):
except:
self.fail('Wrong exception type thrown.')
+ def test_writable_star_source_on_nested_serializer(self):
+ """
+ Assert that a nested serializer instantiated with source='*' correctly
+ expands the data into the outer serializer.
+ """
+ serializer = ModelSerializerWithNestedSerializer(data={
+ 'name': 'marko',
+ 'nested': {'info': 'hi'}},
+ )
+ self.assertEqual(serializer.is_valid(), True)
+
class CustomValidationTests(TestCase):
class CommentSerializerWithFieldValidator(CommentSerializer):
| "TypeError: '*' is an invalid keyword argument for this function" in nested serializer
I have a nested serializer that looks like:
``` python
classs FooSerializer(serializers.Serializer):
field_1 = serializers.Field('method1')
field_2 = serializers.Field('method2')
```
And I use it like this:
``` python
class MyMainSerializer(serializer.ModelSerializer):
foo = FooSerializer(source='*')
```
And testing it against master, though a CreateView I get:
``` python
TypeError: '*' is an invalid keyword argument for this function
```
By debugging I found the error is raised here:
https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/mixins.py#L45
I git checkout backwards and it seems to begin happen after this commit:
https://github.com/tomchristie/django-rest-framework/commit/9bf7c9b714713f7b2fe84074cfd05a8bc3ef4022#L2L212
I couldn't reproduce it from python shell. Any ideas?
| I'll try to reproduce this bug.
Maybe it's a silly question, but why using `source='*'` ?
Related to #694 - May be good for someone to try picking that one off first, then tackle this.
Let me add more context to clarify the issue. First one use case that was not what motivated me to open it.
Suppose I have a Place model [0] and for some reason™ I want some of it's fields to be exposed in the api as nested.
My first try for this was using a ModelSerializer [1], using worked well for read, but for write, it gives me the TypeError above.
[0] https://bitbucket.org/fernandogrd/foo/src/d4ea5d999f37fe35fde255e64a90c2fbb09898be/places/models.py?at=default
[1] https://bitbucket.org/fernandogrd/foo/src/d4ea5d999f37/places/serializers.py?at=default#cl-7
It may seems obvious to get some kind of error in that case, since docs says it is a read only serializer. What gets me to the kind of thing that made me create the issue.
My serializer was similar to the example app (it has fields from the model of root serializer, and some user model), but it was in fact read_only, I guess it fails in newer versions because I didn't make it read_only explicit (I couldn't reproduce it when I open it, so I also guess the error was poping up in some of my POST tests)
Today I ended up with [2], a WritableField, that seems to work well for write and read.
[2] https://bitbucket.org/fernandogrd/foo/src/d4ea5d999f37/places/serializers.py?at=default#cl-23
In the end, I think I like [1] style.. seems very logical to me, so it may be a good use case for writable nested serializers when souce='*'
I'm looking into this now
| 2013-05-18T13:25:48 |
encode/django-rest-framework | 865 | encode__django-rest-framework-865 | [
"747"
] | 3f47eb7a77fcc735782dd1bf8e8e053e26417ea1 | diff --git a/rest_framework/compat.py b/rest_framework/compat.py
--- a/rest_framework/compat.py
+++ b/rest_framework/compat.py
@@ -495,3 +495,16 @@ def apply_markdown(text):
oauth2_provider_forms = None
oauth2_provider_scope = None
oauth2_constants = None
+
+# Handle lazy strings
+from django.utils.functional import Promise
+
+if six.PY3:
+ def is_non_str_iterable(obj):
+ if (isinstance(obj, str) or
+ (isinstance(obj, Promise) and obj._delegate_text)):
+ return False
+ return hasattr(obj, '__iter__')
+else:
+ def is_non_str_iterable(obj):
+ return hasattr(obj, '__iter__')
diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -26,7 +26,7 @@
from rest_framework.compat import timezone, parse_date, parse_datetime, parse_time
from rest_framework.compat import BytesIO
from rest_framework.compat import six
-from rest_framework.compat import smart_text
+from rest_framework.compat import smart_text, force_text, is_non_str_iterable
from rest_framework.settings import api_settings
@@ -45,7 +45,6 @@ def is_simple_callable(obj):
len_defaults = len(defaults) if defaults else 0
return len_args <= len_defaults
-
def get_component(obj, attr_name):
"""
Given an object, and an attribute name,
@@ -169,7 +168,8 @@ def to_native(self, value):
if is_protected_type(value):
return value
- elif hasattr(value, '__iter__') and not isinstance(value, (dict, six.string_types)):
+ elif (is_non_str_iterable(value) and
+ not isinstance(value, (dict, six.string_types))):
return [self.to_native(item) for item in value]
elif isinstance(value, dict):
# Make sure we preserve field ordering, if it exists
@@ -177,7 +177,7 @@ def to_native(self, value):
for key, val in value.items():
ret[key] = self.to_native(val)
return ret
- return smart_text(value)
+ return force_text(value)
def attributes(self):
"""
| diff --git a/rest_framework/tests/serializer.py b/rest_framework/tests/serializer.py
--- a/rest_framework/tests/serializer.py
+++ b/rest_framework/tests/serializer.py
@@ -1,8 +1,9 @@
from __future__ import unicode_literals
from django.db import models
from django.db.models.fields import BLANK_CHOICE_DASH
-from django.utils.datastructures import MultiValueDict
from django.test import TestCase
+from django.utils.datastructures import MultiValueDict
+from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.tests.models import (HasPositiveIntegerAsChoice, Album, ActionItem, Anchor, BasicModel,
BlankFieldModel, BlogPost, BlogPostComment, Book, CallableDefaultValueModel, DefaultValueModel,
@@ -1323,6 +1324,34 @@ def test_errors_return_as_list(self):
self.assertEqual(serializer.errors, expected)
+# test for issue 747
+
+
+class LazyStringModel(object):
+ def __init__(self, lazystring):
+ self.lazystring = lazystring
+
+
+class LazyStringSerializer(serializers.Serializer):
+ lazystring = serializers.Field()
+
+ def restore_object(self, attrs, instance=None):
+ if instance is not None:
+ instance.lazystring = attrs.get('lazystring', instance.lazystring)
+ return instance
+ return LazyStringModel(**attrs)
+
+
+class LazyStringsTestCase(TestCase):
+ def setUp(self):
+ self.model = LazyStringModel(lazystring=_('lazystring'))
+
+ def test_lazy_strings_are_translated(self):
+ serializer = LazyStringSerializer(self.model)
+ self.assertEqual(type(serializer.data['lazystring']),
+ type('lazystring'))
+
+
class AttributeMappingOnAutogeneratedFieldsTests(TestCase):
def setUp(self):
| Make sure lazy translatable string get translated by the serializer fields
Right now if a lazy translatable string is rendered in a serializer fields everything dies with a `<django.utils.functional.__proxy__ object at 0x7fac6c074290> is not JSON serializable` error
Tom suggested this could be achieved by changing the code here: https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/fields.py#L168
Of course we will need some tests to ensure nothing else is broken. I will try to provide some tests.
| 2013-05-18T15:30:52 |
|
encode/django-rest-framework | 922 | encode__django-rest-framework-922 | [
"921"
] | 75e3cbc903f265e88ac28c6d35ec4ba8e76fed3b | diff --git a/rest_framework/routers.py b/rest_framework/routers.py
--- a/rest_framework/routers.py
+++ b/rest_framework/routers.py
@@ -215,6 +215,7 @@ class DefaultRouter(SimpleRouter):
"""
include_root_view = True
include_format_suffixes = True
+ root_view_name = 'api-root'
def get_api_root_view(self):
"""
@@ -244,7 +245,7 @@ def get_urls(self):
urls = []
if self.include_root_view:
- root_url = url(r'^$', self.get_api_root_view(), name='api-root')
+ root_url = url(r'^$', self.get_api_root_view(), name=self.root_view_name)
urls.append(root_url)
default_urls = super(DefaultRouter, self).get_urls()
diff --git a/rest_framework/views.py b/rest_framework/views.py
--- a/rest_framework/views.py
+++ b/rest_framework/views.py
@@ -304,10 +304,10 @@ def dispatch(self, request, *args, **kwargs):
`.dispatch()` is pretty much the same as Django's regular dispatch,
but with extra hooks for startup, finalize, and exception handling.
"""
- request = self.initialize_request(request, *args, **kwargs)
- self.request = request
self.args = args
self.kwargs = kwargs
+ request = self.initialize_request(request, *args, **kwargs)
+ self.request = request
self.headers = self.default_response_headers # deprecate?
try:
| diff --git a/rest_framework/tests/test_routers.py b/rest_framework/tests/test_routers.py
--- a/rest_framework/tests/test_routers.py
+++ b/rest_framework/tests/test_routers.py
@@ -6,7 +6,7 @@
from rest_framework.compat import include, patterns, url
from rest_framework.decorators import link, action
from rest_framework.response import Response
-from rest_framework.routers import SimpleRouter
+from rest_framework.routers import SimpleRouter, DefaultRouter
factory = RequestFactory()
@@ -148,3 +148,17 @@ def test_urls_can_have_trailing_slash_removed(self):
expected = ['^notes$', '^notes/(?P<pk>[^/]+)$']
for idx in range(len(expected)):
self.assertEqual(expected[idx], self.urls[idx].regex.pattern)
+
+class TestNameableRoot(TestCase):
+ def setUp(self):
+ class NoteViewSet(viewsets.ModelViewSet):
+ model = RouterTestModel
+ self.router = DefaultRouter()
+ self.router.root_view_name = 'nameable-root'
+ self.router.register(r'notes', NoteViewSet)
+ self.urls = self.router.urls
+
+ def test_router_has_custom_name(self):
+ expected = 'nameable-root'
+ self.assertEqual(expected, self.urls[0].name)
+
| FileUploadParser never gets filename from url because get_parser_context called before kwargs set
get_parser_context always returns an empty args and kwargs because it depends on args & kwargs being set, and dispatch calls initialize_request before setting the variables.
Either get_parser_context needs to be passed args and kwargs, or dispatch needs to set args and kwargs before invoking initialize_request.
Additionally, FileUploadParser is fine returning None as a filename, but the standard filehandlers do not create a file in that circumstance, causing request.FILES['file'] to not be invoked.
| 2013-06-08T02:28:19 |
|
encode/django-rest-framework | 953 | encode__django-rest-framework-953 | [
"940"
] | af2fdc03a6f4cafe6e2f19b2adcf59c8918088f2 | diff --git a/rest_framework/routers.py b/rest_framework/routers.py
--- a/rest_framework/routers.py
+++ b/rest_framework/routers.py
@@ -15,7 +15,9 @@
"""
from __future__ import unicode_literals
+import itertools
from collections import namedtuple
+from django.core.exceptions import ImproperlyConfigured
from rest_framework import views
from rest_framework.compat import patterns, url
from rest_framework.response import Response
@@ -38,6 +40,13 @@ def replace_methodname(format_string, methodname):
return ret
+def flatten(list_of_lists):
+ """
+ Takes an iterable of iterables, returns a single iterable containing all items
+ """
+ return itertools.chain(*list_of_lists)
+
+
class BaseRouter(object):
def __init__(self):
self.registry = []
@@ -130,12 +139,17 @@ def get_routes(self, viewset):
Returns a list of the Route namedtuple.
"""
+ known_actions = flatten([route.mapping.values() for route in self.routes])
+
# Determine any `@action` or `@link` decorated methods on the viewset
dynamic_routes = []
for methodname in dir(viewset):
attr = getattr(viewset, methodname)
httpmethods = getattr(attr, 'bind_to_methods', None)
if httpmethods:
+ if methodname in known_actions:
+ raise ImproperlyConfigured('Cannot use @action or @link decorator on '
+ 'method "%s" as it is an existing route' % methodname)
httpmethods = [method.lower() for method in httpmethods]
dynamic_routes.append((httpmethods, methodname))
| diff --git a/rest_framework/tests/test_routers.py b/rest_framework/tests/test_routers.py
--- a/rest_framework/tests/test_routers.py
+++ b/rest_framework/tests/test_routers.py
@@ -2,6 +2,7 @@
from django.db import models
from django.test import TestCase
from django.test.client import RequestFactory
+from django.core.exceptions import ImproperlyConfigured
from rest_framework import serializers, viewsets, permissions
from rest_framework.compat import include, patterns, url
from rest_framework.decorators import link, action
@@ -191,3 +192,24 @@ def test_action_kwargs(self):
response.data,
{'permission_classes': [permissions.AllowAny]}
)
+
+class TestActionAppliedToExistingRoute(TestCase):
+ """
+ Ensure `@action` decorator raises an except when applied
+ to an existing route
+ """
+
+ def test_exception_raised_when_action_applied_to_existing_route(self):
+ class TestViewSet(viewsets.ModelViewSet):
+
+ @action()
+ def retrieve(self, request, *args, **kwargs):
+ return Response({
+ 'hello': 'world'
+ })
+
+ self.router = SimpleRouter()
+ self.router.register(r'test', TestViewSet, base_name='test')
+
+ with self.assertRaises(ImproperlyConfigured):
+ self.router.urls
| Keyword arguments to @action lost.
When attempting to use `@action` to override `permission_classes` on a per-method basis (as the documentation [suggests](http://django-rest-framework.org/api-guide/viewsets.html#marking-extra-methods-for-routing) is possible), it appears that the additional keyword arguments are lost.
Test case:
```
class BookViewSet(mixins.RetrieveModelMixin, mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = Book.objects.all()
serializer_class = BookSerializer
permission_classes = []
@action(permission_classes=[AuthorBookset])
def update(self, request, *args, **kwargs):
return super(BookViewSet, self). update(request, *args, **kwargs)
```
This happens because `APIView.get_permissions()` uses `self.permission_classes` when creating permission instances. `@action` decorates a method on a subclass of `APIView`, and [assigns](https://github.com/tomchristie/django-rest-framework/blob/ae2887ffc41b1e05d6706f51b00266efccad7a58/rest_framework/decorators.py#L127) those additional keyword arguments as a property of the decorated method. Since they are applied to the method and not used to update the class, `APIView.get_permissions()` never sees them and they are lost.
| CC @andymckay
Note: Please ignore the shitty commit history on the reference commit - that's me being braindead :-/
This is now tested for [here](https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/tests/test_routers.py#L167) and works as expected.
I didn't notice when this ticket was filed that the method you're referring to is named `update`.
The `@action` decorator is intended to create _new_ routes, not to modify the existing standard routes.
I'm slightly confused about exactly what you'd expect to happen in the example you've used - a additional route such as `'/book/123/update/`, or modify the existing standard PUT on `/book/123/`?
I'm leaving this ticket open for the moment, because this probably either needs better documentation, or the example you've given should raise an assertion error, or both.
Ah, it seemed to me that `@action` could be used to modify a standard route by, for example, allowing a `Permission` class to be selectively applied to a single route. Seems like a common-enough use case: allow all users to retrieve an object, but only allow owners that object to update or delete it. [From the docs](http://django-rest-framework.org/api-guide/viewsets.html#marking-extra-methods-for-routing), `@action` seemed to be the way to get that done (though admittedly, the name doesn't imply that).
Though definitely my mistake, the docs would likely benefit from a little clarification.
| 2013-06-26T22:05:47 |
encode/django-rest-framework | 1,010 | encode__django-rest-framework-1010 | [
"1008"
] | 0fa9866848238ed355461a619e5aa9a148403f5f | diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -690,7 +690,7 @@ def get_default_fields(self):
assert field_name in ret, \
"Noexistant field '%s' specified in `read_only_fields` " \
"on serializer '%s'." % \
- (self.__class__.__name__, field_name)
+ (field_name, self.__class__.__name__)
ret[field_name].read_only = True
return ret
| Mini.correction in assert in serializers
Hi, I've just noticed a small problem in the get_default_fields methods of the ModelSerializer : line 694 :
```
assert field_name in ret, \
"Noexistant field '%s' specified in `read_only_fields` " \
"on serializer '%s'." % \
(self.__class__.__name__, field_name)
```
As you may see directly, the arguments are inverted, which leads to rather funny error messages :-)
| 2013-07-26T15:04:55 |
||
encode/django-rest-framework | 1,038 | encode__django-rest-framework-1038 | [
"1037"
] | 999056cde1c6355d5ca036f109b35b41cb9d47cc | diff --git a/rest_framework/views.py b/rest_framework/views.py
--- a/rest_framework/views.py
+++ b/rest_framework/views.py
@@ -269,7 +269,7 @@ def handle_exception(self, exc):
Handle any exception that occurs, by returning an appropriate response,
or re-raising the error.
"""
- if isinstance(exc, exceptions.Throttled):
+ if isinstance(exc, exceptions.Throttled) and exc.wait is not None:
# Throttle wait header
self.headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait
| diff --git a/rest_framework/tests/test_throttling.py b/rest_framework/tests/test_throttling.py
--- a/rest_framework/tests/test_throttling.py
+++ b/rest_framework/tests/test_throttling.py
@@ -7,7 +7,7 @@
from django.core.cache import cache
from rest_framework.test import APIRequestFactory
from rest_framework.views import APIView
-from rest_framework.throttling import UserRateThrottle, ScopedRateThrottle
+from rest_framework.throttling import BaseThrottle, UserRateThrottle, ScopedRateThrottle
from rest_framework.response import Response
@@ -21,6 +21,14 @@ class User3MinRateThrottle(UserRateThrottle):
scope = 'minutes'
+class NonTimeThrottle(BaseThrottle):
+ def allow_request(self, request, view):
+ if not hasattr(self.__class__, 'called'):
+ self.__class__.called = True
+ return True
+ return False
+
+
class MockView(APIView):
throttle_classes = (User3SecRateThrottle,)
@@ -35,6 +43,13 @@ def get(self, request):
return Response('foo')
+class MockView_NonTimeThrottling(APIView):
+ throttle_classes = (NonTimeThrottle,)
+
+ def get(self, request):
+ return Response('foo')
+
+
class ThrottlingTests(TestCase):
def setUp(self):
"""
@@ -140,6 +155,22 @@ def test_next_rate_remains_constant_if_followed(self):
(80, None)
))
+ def test_non_time_throttle(self):
+ """
+ Ensure for second based throttles.
+ """
+ request = self.factory.get('/')
+
+ self.assertFalse(hasattr(MockView_NonTimeThrottling.throttle_classes[0], 'called'))
+
+ response = MockView_NonTimeThrottling.as_view()(request)
+ self.assertFalse('X-Throttle-Wait-Seconds' in response)
+
+ self.assertTrue(MockView_NonTimeThrottling.throttle_classes[0].called)
+
+ response = MockView_NonTimeThrottling.as_view()(request)
+ self.assertFalse('X-Throttle-Wait-Seconds' in response)
+
class ScopedRateThrottleTests(TestCase):
"""
| Views handling Throttled exception with wait == None should not attempt to set Throttle wait header
Defining a non-time-based throttle, in which wait() returns None, causes APIView.handle_exception(exc) to fail when setting X-Throttle-Wait-Seconds header since it's using '%d' % exc.wait
| 2013-08-13T20:34:24 |
|
encode/django-rest-framework | 1,078 | encode__django-rest-framework-1078 | [
"1072"
] | f8101114d1ec13e296cb393b43b0ebd9618fa997 | diff --git a/rest_framework/relations.py b/rest_framework/relations.py
--- a/rest_framework/relations.py
+++ b/rest_framework/relations.py
@@ -264,7 +264,7 @@ def field_to_native(self, obj, field_name):
# RelatedObject (reverse relationship)
try:
pk = getattr(obj, self.source or field_name).pk
- except ObjectDoesNotExist:
+ except (ObjectDoesNotExist, AttributeError):
return None
# Forward relationship
| diff --git a/rest_framework/tests/test_relations_pk.py b/rest_framework/tests/test_relations_pk.py
--- a/rest_framework/tests/test_relations_pk.py
+++ b/rest_framework/tests/test_relations_pk.py
@@ -283,6 +283,15 @@ def test_foreign_key_update_with_invalid_null(self):
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'target': ['This field is required.']})
+ def test_foreign_key_with_empty(self):
+ """
+ Regression test for #1072
+
+ https://github.com/tomchristie/django-rest-framework/issues/1072
+ """
+ serializer = NullableForeignKeySourceSerializer()
+ self.assertEqual(serializer.data['target'], None)
+
class PKNullableForeignKeyTests(TestCase):
def setUp(self):
| Browsable API bug (in master)
The browsable API breaks on listview when the queryset is empty.
Git bisect says it was introduced in commit 10d386e.
Traceback:
```
'NoneType' object has no attribute 'company'
File "[...]/django/core/handlers/base.py" in get_response
140. response = response.render()
File "[...]/django/template/response.py" in render
105. self.content = self.rendered_content
File "[...]/rest_framework/response.py" in rendered_content
59. ret = renderer.render(self.data, media_type, context)
File "[...]/rest_framework/renderers.py" in render
540. post_form = self._get_rendered_html_form(view, 'POST', request)
File "[...]/rest_framework/renderers.py" in _get_rendered_html_form
437. return self.get_rendered_html_form(view, method, request)
File "[...]/rest_framework/renderers.py" in get_rendered_html_form
459. data = serializer.data
File "[...]/rest_framework/serializers.py" in data
506. self._data = self.to_native(obj)
File "[...]/rest_framework/serializers.py" in to_native
309. value = field.field_to_native(obj, field_name)
File "[...]/rest_framework/relations.py" in field_to_native
266. pk = getattr(obj, self.source or field_name).pk
```
| A quick and dirty fix would be adding:
``` python
if not obj:
return None
```
in PrimaryKeyRelatedField.field_to_native(), but I'm not sure it covers all cases...
@yprez - Can't replicate this, can you give me a failing example?
Yeah, I'll try replicating this with a simpler case.
| 2013-08-31T14:20:14 |
encode/django-rest-framework | 1,092 | encode__django-rest-framework-1092 | [
"907"
] | 916d8ab37da2f0c4412507710649ba0f352f29bb | diff --git a/rest_framework/settings.py b/rest_framework/settings.py
--- a/rest_framework/settings.py
+++ b/rest_framework/settings.py
@@ -77,6 +77,9 @@
'VIEW_NAME_FUNCTION': 'rest_framework.views.get_view_name',
'VIEW_DESCRIPTION_FUNCTION': 'rest_framework.views.get_view_description',
+ # Exception handling
+ 'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler',
+
# Testing
'TEST_REQUEST_RENDERER_CLASSES': (
'rest_framework.renderers.MultiPartRenderer',
@@ -125,6 +128,7 @@
'DEFAULT_MODEL_SERIALIZER_CLASS',
'DEFAULT_PAGINATION_SERIALIZER_CLASS',
'DEFAULT_FILTER_BACKENDS',
+ 'EXCEPTION_HANDLER',
'FILTER_BACKEND',
'TEST_REQUEST_RENDERER_CLASSES',
'UNAUTHENTICATED_USER',
diff --git a/rest_framework/views.py b/rest_framework/views.py
--- a/rest_framework/views.py
+++ b/rest_framework/views.py
@@ -361,7 +361,7 @@ def handle_exception(self, exc):
else:
exc.status_code = status.HTTP_403_FORBIDDEN
- response = exception_handler(exc)
+ response = self.settings.EXCEPTION_HANDLER(exc)
if response is None:
raise
| diff --git a/rest_framework/tests/test_views.py b/rest_framework/tests/test_views.py
--- a/rest_framework/tests/test_views.py
+++ b/rest_framework/tests/test_views.py
@@ -32,6 +32,16 @@ def basic_view(request):
return {'method': 'PATCH', 'data': request.DATA}
+class ErrorView(APIView):
+ def get(self, request, *args, **kwargs):
+ raise Exception
+
+
+@api_view(['GET'])
+def error_view(request):
+ raise Exception
+
+
def sanitise_json_error(error_dict):
"""
Exact contents of JSON error messages depend on the installed version
@@ -99,3 +109,34 @@ def test_400_parse_error_tunneled_content(self):
}
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(sanitise_json_error(response.data), expected)
+
+
+class TestCustomExceptionHandler(TestCase):
+ def setUp(self):
+ self.DEFAULT_HANDLER = api_settings.EXCEPTION_HANDLER
+
+ def exception_handler(exc):
+ return Response('Error!', status=status.HTTP_400_BAD_REQUEST)
+
+ api_settings.EXCEPTION_HANDLER = exception_handler
+
+ def tearDown(self):
+ api_settings.EXCEPTION_HANDLER = self.DEFAULT_HANDLER
+
+ def test_class_based_view_exception_handler(self):
+ view = ErrorView.as_view()
+
+ request = factory.get('/', content_type='application/json')
+ response = view(request)
+ expected = 'Error!'
+ self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
+ self.assertEqual(response.data, expected)
+
+ def test_function_based_view_exception_handler(self):
+ view = error_view
+
+ request = factory.get('/', content_type='application/json')
+ response = view(request)
+ expected = 'Error!'
+ self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
+ self.assertEqual(response.data, expected)
| Customizable exception handler
Please take a look.
I did not touch docs yet. Also, in Python 3 there is some weird test failure caused by catch_warnings()
Main ideas:
1. Depreciate returning Response with status_code >= 400. Exception should be raised instead;
2. Make exception handler customizable via settings
| Okay. A lot of this looks good, but I'm not really happy with raising exceptions instead of returning responses.
That's too big of a change for me to be comfortable with, and it just doesn't feel right.
I'd accept something along these lines if it strictly only dealt with existing exceptions, although I realize that might not do 100% of what you're looking for in your use case. If you really need to modify 400 error responses and you don't want to override the existing views you might need to look into doing that in middleware or something similar.
No, middleware is bad idea. In this case it's better to write a couple of custom mixins and assemble custom views in my project.
I think the only thing you may consider to reject in this code is raising warning when user tries to return non-4xx error code. I agree that this is too much of restriction.
I needed something similar here — to add a custom error code. I ended up creating a mixin that overrides finalise_response.
Roughly: check the response, update it accordingly, call the super implementation — It seemed to work okay.
On 4 Jun 2013, at 21:31, Tom Christie [email protected] wrote:
> Okay. A lot of this looks good, but I'm not really happy with raising exceptions instead of returning responses.
> That's too big of a change for me to be comfortable with, and it just doesn't feel right.
> I'd accept something along these lines if it strictly only dealt with existing exceptions, although I realize that might not do 100% of what you're looking for in your use case. If you really need to modify 400 error responses and you don't want to override the existing views you might need to look into doing that in middleware or something similar.
>
> —
> Reply to this email directly or view it on GitHub.
Yeah, agree that mixin approach is nicer than middleware.
Either way, the point about not wanting to change the current 400 responses into exceptions still stands - that just doesn't feel right to me.
Ok will remote that and fix tests...
See the following thread... https://groups.google.com/d/msg/django-rest-framework/ndgmIU8kfOY/M_tMmAiPhp8J
This is now easier, as we just need to switch the current hardcoded exception handler function out into a setting... https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/views.py#L364
Marked for attention at DjangoCon. My previous comment and links in there should provide sufficient context to get going on this, essentially just switch out the hardcoded exception handler function into a configurable setting.
Hi Tom,
Sorry I was too busy these months, and could not dedicate time to this PR...
@avsd - totally fine, nothing to apologise for :)
| 2013-09-06T17:03:08 |
encode/django-rest-framework | 1,109 | encode__django-rest-framework-1109 | [
"1105"
] | 068e4a18f490cff77364ecddd47a614ba3167013 | diff --git a/rest_framework/renderers.py b/rest_framework/renderers.py
--- a/rest_framework/renderers.py
+++ b/rest_framework/renderers.py
@@ -564,67 +564,65 @@ def get_description(self, view):
def get_breadcrumbs(self, request):
return get_breadcrumbs(request.path)
- def render(self, data, accepted_media_type=None, renderer_context=None):
+ def get_context(self, data, accepted_media_type, renderer_context):
"""
- Render the HTML for the browsable API representation.
+ Returns the context used to render.
"""
- self.accepted_media_type = accepted_media_type or ''
- self.renderer_context = renderer_context or {}
-
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
renderer = self.get_default_renderer(view)
- content = self.get_content(renderer, data, accepted_media_type, renderer_context)
-
- put_form = self.get_rendered_html_form(view, 'PUT', request)
- post_form = self.get_rendered_html_form(view, 'POST', request)
- patch_form = self.get_rendered_html_form(view, 'PATCH', request)
- delete_form = self.get_rendered_html_form(view, 'DELETE', request)
- options_form = self.get_rendered_html_form(view, 'OPTIONS', request)
raw_data_put_form = self.get_raw_data_form(view, 'PUT', request)
- raw_data_post_form = self.get_raw_data_form(view, 'POST', request)
raw_data_patch_form = self.get_raw_data_form(view, 'PATCH', request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
- name = self.get_name(view)
- description = self.get_description(view)
- breadcrumb_list = self.get_breadcrumbs(request)
-
- template = loader.get_template(self.template)
- context = RequestContext(request, {
- 'content': content,
+ context = {
+ 'content': self.get_content(renderer, data, accepted_media_type, renderer_context),
'view': view,
'request': request,
'response': response,
- 'description': description,
- 'name': name,
+ 'description': self.get_description(view),
+ 'name': self.get_name(view),
'version': VERSION,
- 'breadcrumblist': breadcrumb_list,
+ 'breadcrumblist': self.get_breadcrumbs(request),
'allowed_methods': view.allowed_methods,
'available_formats': [renderer.format for renderer in view.renderer_classes],
- 'put_form': put_form,
- 'post_form': post_form,
- 'patch_form': patch_form,
- 'delete_form': delete_form,
- 'options_form': options_form,
+ 'put_form': self.get_rendered_html_form(view, 'PUT', request),
+ 'post_form': self.get_rendered_html_form(view, 'POST', request),
+ 'patch_form': self.get_rendered_html_form(view, 'PATCH', request),
+ 'delete_form': self.get_rendered_html_form(view, 'DELETE', request),
+ 'options_form': self.get_rendered_html_form(view, 'OPTIONS', request),
'raw_data_put_form': raw_data_put_form,
- 'raw_data_post_form': raw_data_post_form,
+ 'raw_data_post_form': self.get_raw_data_form(view, 'POST', request),
'raw_data_patch_form': raw_data_patch_form,
'raw_data_put_or_patch_form': raw_data_put_or_patch_form,
+ 'display_edit_forms': bool(response.status_code != 403),
+
'api_settings': api_settings
- })
+ }
+ return context
+ def render(self, data, accepted_media_type=None, renderer_context=None):
+ """
+ Render the HTML for the browsable API representation.
+ """
+ self.accepted_media_type = accepted_media_type or ''
+ self.renderer_context = renderer_context or {}
+
+ template = loader.get_template(self.template)
+ context = self.get_context(data, accepted_media_type, renderer_context)
+ context = RequestContext(renderer_context['request'], context)
ret = template.render(context)
# Munge DELETE Response code to allow us to return content
# (Do this *after* we've rendered the template so that we include
# the normal deletion response code in the output)
+ response = renderer_context['response']
if response.status_code == status.HTTP_204_NO_CONTENT:
response.status_code = status.HTTP_200_OK
| Decouple POST/create (API browser) form from GET/list (API) permissions.
I would like to use `DjangoModelPermissions` to restrict API access in general, and then explicitly allow access by anonymous users to POST/create and GET/retrieve (but not GET/list). For GET/retrieve, an additional URL argument will be verified so that clients can only GET/retrieve objects for which they know the URL.
I think I can allow access to POST/create by subclassing `DjangoModelPermissions.has_permission()` and returning `True` if `request.method == "POST"`. But, I can't test this using the API browser because when it returns a 403 forbidden response to my GET/list API request, it also does not show the POST/create form even though I do have POST/create permission.
This is incorrectly denying me the ability to submit a POST/create request via the API browser because a GET/list request is denied.
Also, on a somewhat related note, I don't think there is any (at least no easy) way to allow GET/retrieve but deny GET/list? I'm not using ViewSets, just generic CBVs. I think that if I did use ViewSets, an `action` attribute would be set on the view.
Could (and should) this also be set on generic CBVs, so that `has_permission()` can check `view.action` as well as `request.method` in determining whether or not to authorize a request?
This inconsistency makes ViewSets less of a convenience and more of a requirement if you need to customise permissions in this way.
| The relevant line in code appears to be #125 in `rest_framework/base.html`, where it has `{% if response.status_code != 403 %}`.
This hides the form that would enable us to submit a POST/create request (which I have permission to do) in the API browser because an API response of 403 was returned for GET/list (which I have intentionally denied).
I'm not sure about supporting this. The behavior is as it is, in order to avoid leaking any accidental information about objects existence or otherwise that might accidentally be able to be inferred by the inclusion of those forms. If you look into the git history for that particular line you might be able to track down exactly when and why it was introduced.
I'm somewhat inclined to close this as being simply not a supported style of using the browsable API. Typically in this case you'd want list to return a _filtered_ list of any objects the user can operate on. Of course if you want to modify the behavior in your own app you still can, by customising the browsable API template (and omitting that line)
(Oops, hit send early)
here... http://django-rest-framework.org/topics/browsable-api.html#customizing
> Could (and should) this also be set on generic CBVs, so that has_permission() can check view.action as well as request.method in determining whether or not to authorize a request?
Nope. action is specifically a viewset concept. In regular views you're making a request with a particular method, but there's no specific action, just a view that's being called. You can do something similar by inspecting the URL kwargs that are being passed through eg to differentiate between a list view (eg. no pk being passed in) and a detail view (eg. a pk or other kwarg being passed in the URL kwargs)
I looked through the history for that file (took a while - a few renames to follow), and there doesn't appear to be any discussion or justification around this change.
https://github.com/tomchristie/django-rest-framework/commit/0b18b58c170aae0c2a93a608997e557fdcefe826
Could you elaborate on how exactly exposing a `POST/create` form in the API browser could leak information about object existence? The form should only be exposed when the client is authorised to make `POST/create` requests.
To be clear, I am not suggesting that the API response for the `GET/list` request (which is on the same page) should be allowed just because `POST/create` is allowed. I don't really see a reason why these can't be decoupled?
The "<Model> List" page in the API browser should display a 403 forbidden response to the `GET/list` request (if GET/list is not allowed), but it should still display an empty form (if `POST/create` is allowed) so that users a) know they have permission to POST/create; and b) can actually submit a test POST/create request via the API browser. These two permission checks for two different endpoints should be separate from each other, even though they are displayed on the same page in the API browser.
Currently, anyone using the API browser when `GET/list` is denied but `POST/create` is allowed will have to know in advance that `POST/create` is allowed (even though the API browser says not), and will have to manually construct and submit requests in order to test their API.
I tried your suggested method of allowing `GET/list` (just so I could use the `POST/create` form in the API browser) and then filtering the queryset with `.none()` (because I don't actually want to allow `GET/list`). But this has a knock-on effect of breaking `GET/retrieve` (because the queryset has no objects). It's also forcing me to open up more permissions than I should have to, for API endpoints that I'm not interested in using.
I don't want anonymous users to be able to `GET/list` all available objects, but as these are anonymous users, I can't filter the set of objects that they can operate on for `GET/retrieve`. Instead, I limit access to `GET/retrieve` specific objects by using a PK and a salted hash as URL arguments, that they must know in advance (e.g. after they `POST/create` an object, they will be told the PK and hash or the complete URL to `GET/retrieve`.
Customising the API browser template as you suggested won't be easy. The docs say I should create `rest_framework/api.html` and extend `rest_framework/base.html`, then override the blocks that I want to change. Unfortunately the entire content section (including the form) is not wrapped in a block. Even if there was a block around the form section specifically, I'd still have to copy and paste nearly 100 lines of HTML just to remove that one line so that the form would display.
Please reconsider :) The API browser should accurately expose authorised endpoints and facilitate the creation of POST/PUT/PATCH requests.
One example might be if a project is making permission checks inside the view logic for some reason. The user could receive a 403 response, but checking the permission classes would indicate that PUT and POST are allowed. PUT forms would then end up displaying instance data on the 403 pages.
I'm not saying that your request isn't at all sensible - it does make sense, but I'm not convinced it's worth the trade off in potentially exposing data unexpectedly. The browsable API isn't imposing any particularly awkward constraints here - you need to be able to make a permitted GET request to the endpoint or we won't expose any extra information.
May be happy to reconsider if it's getting re-raised by other folks but I'd suggest that you consider either customizing the browsable API so it works for your specific use case, or using a more typical style of data representation (eg have your list view expose the filtered set of objects that the user has view permission)
It's often said of the Django Admin that "the Admin is not your App". I think the same holds true of DRF's Browsable API.
The Browseable API serves as a awesome prototyping tool — but it has necessary limitations: it's too much of a job to extend it to serve as a general purpose web application for all use-cases.
If people want to customise it for their own app then cool — go ahead, and share the results by all means — but, just as in customising the Django Admin, it's often going to be better to build the app you want rather than trying to push it too far out of shape.
I completely agree that the API browser is not your app. I am not trying to make it my application, and I only use the Django Admin for basic raw access to the database for the same reason.
I am only using the API browser to inspect, navigate and test the boundaries and permissions of my API during development. The only customisation I am making here is assigning different permissions for various endpoints.
This is why I think it would be most beneficial for the API browser to accurately reflect the actual permissions defined in the API. So that we can be confident that what we see during development is what clients will see in production.
On the subject of leaking data, there's no danger that displaying the POST/create will leak anything, right? So we could at least still show the POST/create form even if the GET/list endpoint returns 403 forbidden, because no GET/list data is needed to construct a POST/create request.
As for PUT, a normal API client would first have to GET/retrieve the data, alter it, and then PUT/update it. So the API browser should not assume that PUT/update permission also grants GET/retrieve permission, and should only display (or only pre-populate with initial data) the PUT/update form if the GET/retrieve operation on the same page was actually successful.
This should cover off the use-case where permissions are being checked inside the actual view. No data will be leaked and displayed in the PUT/update form if GET/retrieve is denied in the view, and both POST/create and PUT/update can return 403 denied in the view when the form is submitted.
If the answer remains "don't do that - you want something non-standard, you're free to customise", I would at least like to make that customisation a little easier. Perhaps a hook somewhere in the view code where I can set a context variable in certain situations that will dictate whether or not the form is displayed, instead of a blank denial when the GET/list request is denied? Or at the very least, a block around the form section in `rest_framework/base.html` so that I can copy and paste 81 of 83 lines of code :)
Unfortunately, with anonymous users I don't think I can adopt a typical "filtered list of objects this user can operate on" workflow. This particular app is for anonymous users to submit entries for online promotions. There is no authentication. There is no need and it's too risky to have anonymous users listing entries that might belong to other users, but I can give users a URL (with a PK and salted hash as URL arguments) that they can use to lookup an entry they just created.
Adding customisation hooks is not a bad idea in principle. @mrmachine: If you have time to sketch out the required changes and put them in a pull request we'd have something concrete to base discussion around. (Any pull request doesn't need to be perfect to begin — it can be "How about something like this?")
> On the subject of leaking data, there's no danger that displaying the POST/create will leak anything, right?
Even with the POST form you may be exposing information such as which URLs are valid.
Having the 403 check in there just seems safer. It would be seriously bad news if we accidentally opened up API information on some live installations somewhere because we'd not fully thought some type of request or edge case through.
> Adding customisation hooks is not a bad idea in principle.
Refactoring BrowsableAPIRenderer to use a `.get_context()` method would make sense here. We could drop the 403 check into a 'include_forms' context key, and you'd simply be able to subclass BrowsableAPIRenderer, overriding `.get_context()`if you wanted to modify it. I think there's also some clean up that could be done along the way, by just populating the context dictionary directly, rather than by setting a bunch of attributes that are then immediatly passed into the dictionary and never reused elsewhere.
Hope I've explained adequately here! :)
| 2013-09-13T13:16:12 |
|
encode/django-rest-framework | 1,117 | encode__django-rest-framework-1117 | [
"1116"
] | eb0a98ad4b031be5742b5d257b847404c2e5249b | diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -518,6 +518,9 @@ def save(self, **kwargs):
"""
Save the deserialized object and return it.
"""
+ # Clear cached _data, which may be invalidated by `save()`
+ self._data = None
+
if isinstance(self.object, list):
[self.save_object(item, **kwargs) for item in self.object]
| diff --git a/rest_framework/tests/test_serializer.py b/rest_framework/tests/test_serializer.py
--- a/rest_framework/tests/test_serializer.py
+++ b/rest_framework/tests/test_serializer.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.db.models.fields import BLANK_CHOICE_DASH
@@ -136,6 +137,7 @@ def setUp(self):
'Happy new year!',
datetime.datetime(2012, 1, 1)
)
+ self.actionitem = ActionItem(title='Some to do item',)
self.data = {
'email': '[email protected]',
'content': 'Happy new year!',
@@ -264,6 +266,20 @@ def test_invalid_read_only_fields(self):
"""
self.assertRaises(AssertionError, PersonSerializerInvalidReadOnly, [])
+ def test_serializer_data_is_cleared_on_save(self):
+ """
+ Check _data attribute is cleared on `save()`
+
+ Regression test for #1116
+ — id field is not populated if `data` is accessed prior to `save()`
+ """
+ serializer = ActionItemSerializer(self.actionitem)
+ self.assertIsNone(serializer.data.get('id',None), 'New instance. `id` should not be set.')
+ serializer.save()
+ self.assertIsNotNone(serializer.data.get('id',None), 'Model is saved. `id` should be set.')
+
+
+
class DictStyleSerializer(serializers.Serializer):
"""
| model serializer's 'id' field returns null
re-posted from stackoverflow (http://stackoverflow.com/questions/18886171/django-rest-framework-serializer-datas-id-field-returns-null/18895869?noredirect=1#18895869)
Here's a failing test case
**models.py**
```
class SomeModel(models.Model):
field = models.CharField(max_length=100)
```
**serializers.py**
```
class SomeModelSerializer(serializers.ModelSerializer):
class Meta:
model = SomeModel
fields = ('id', 'field',)
```
The error happens in the view, where serializer.data['id'] is null:
**views.py**
```
... within an user-defined subclass of APIView ...
serializer = SomeModelSerializer(data=request.DATA)
if serializer.is_valid():
some_dict = {'hello': serializer.data['field']}
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else: <do something else>
```
| Ok. I'll look at this.
@fangsterr if you could zip up a demo project with this code that'd speed me up. ;-)
@fangsterr I realise there's not enough info here. Please post a full version of the APIView subclass. Thanks
The reason for this will be that you're accessing `request.data` before calling `serializer.save()` - that'll mean it's being populated with a serialized representation of the instance, before it's been saved, so `.id` will be `None`.
Just put your `some_dict` statement after the `save()`.
Not sure if we should consider this a bug, a documentation bug, or a non-issue.
Of course!
I'm inclined to say non-issue — unless it's the sort of thing that comes up a lot — in which case Docs.
---
With this view with it's easily seen:
```
@api_view(['GET', 'POST'])
def model_list(request):
if request.method == 'GET':
our_models = SomeModel.objects.all()
serializer = SomeModelSerializer(our_models, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = SomeModelSerializer(data=request.DATA)
if serializer.is_valid():
some_dict = {'field': serializer.data['field']}
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
```
The view works as expected though (i.e. the items are written to the DB etc).
In fact I think perhaps not.
It would be easy enough to clear the _data attribute on save()... Perhaps we should do that. (It's a edge case but the cost in the normal run of things is basically nil.)
@tomchristie I'm happy to write a test for that and make the change if you like.
> clear the _data attribute on save()
Yeah seems reasonable. As it happens you'd still be incurring extra cost with this style, by forcing the serialization to run twice, but at least there'd be no functional issues with the code then.
On 20 Sep 2013, at 10:47, Tom Christie [email protected] wrote:
> As it happens you'd still be incurring extra cost with this style, by forcing the serialization to run twice
Yeah — but if you're going to do funny things then that's up to you. ;-)
I meant there'd be no performance cost for "normal use" — where I'm defining that as not accessing the data both before and after save.
I'll put a pull request together for this.
| 2013-09-20T12:13:06 |
encode/django-rest-framework | 1,181 | encode__django-rest-framework-1181 | [
"1179"
] | bdf8b532180f465c035fca5da121ab1de8a057e1 | diff --git a/rest_framework/utils/encoders.py b/rest_framework/utils/encoders.py
--- a/rest_framework/utils/encoders.py
+++ b/rest_framework/utils/encoders.py
@@ -89,6 +89,9 @@ def represent_mapping(self, tag, mapping, flow_style=None):
node.flow_style = best_style
return node
+ SafeDumper.add_representer(decimal.Decimal,
+ SafeDumper.represent_decimal)
+
SafeDumper.add_representer(SortedDict,
yaml.representer.SafeRepresenter.represent_dict)
SafeDumper.add_representer(DictWithMetadata,
| diff --git a/rest_framework/tests/test_renderers.py b/rest_framework/tests/test_renderers.py
--- a/rest_framework/tests/test_renderers.py
+++ b/rest_framework/tests/test_renderers.py
@@ -328,7 +328,7 @@ def test_with_callback(self):
class YAMLRendererTests(TestCase):
"""
- Tests specific to the JSON Renderer
+ Tests specific to the YAML Renderer
"""
def test_render(self):
@@ -354,6 +354,17 @@ def test_render_and_parse(self):
data = parser.parse(StringIO(content))
self.assertEqual(obj, data)
+ def test_render_decimal(self):
+ """
+ Test YAML decimal rendering.
+ """
+ renderer = YAMLRenderer()
+ content = renderer.render({'field': Decimal('111.2')}, 'application/yaml')
+ self.assertYAMLContains(content, "field: '111.2'")
+
+ def assertYAMLContains(self, content, string):
+ self.assertTrue(string in content, '%r not in %r' % (string, content))
+
class XMLRendererTestCase(TestCase):
"""
| RepresenterError with DecimalField and YAMLRenderer
A RepresenterError occurs when trying to render a DecimalField with YAMLRenderer
```
RepresenterError: cannot represent an object: 55.378051
```
Adding
```
SafeDumper.add_representer(decimal.Decimal, SafeDumper.represent_decimal)
```
just after the declaration of SafeDumper (utils/encoders.py) seems to fix the problem.
| Gotcha. Pull request with that fix would be very welcome :)
| 2013-10-16T23:23:06 |
encode/django-rest-framework | 1,240 | encode__django-rest-framework-1240 | [
"1035"
] | ca2bd616d989f78d00641231e645198a6c95caa0 | diff --git a/rest_framework/mixins.py b/rest_framework/mixins.py
--- a/rest_framework/mixins.py
+++ b/rest_framework/mixins.py
@@ -6,6 +6,7 @@
"""
from __future__ import unicode_literals
+from django.core.exceptions import ValidationError
from django.http import Http404
from rest_framework import status
from rest_framework.response import Response
@@ -127,7 +128,12 @@ def update(self, request, *args, **kwargs):
files=request.FILES, partial=partial)
if serializer.is_valid():
- self.pre_save(serializer.object)
+ try:
+ self.pre_save(serializer.object)
+ except ValidationError as err:
+ # full_clean on model instance may be called in pre_save, so we
+ # have to handle eventual errors.
+ return Response(err.message_dict, status=status.HTTP_400_BAD_REQUEST)
self.object = serializer.save(**save_kwargs)
self.post_save(self.object, created=created)
return Response(serializer.data, status=success_status_code)
| diff --git a/rest_framework/tests/test_generics.py b/rest_framework/tests/test_generics.py
--- a/rest_framework/tests/test_generics.py
+++ b/rest_framework/tests/test_generics.py
@@ -23,6 +23,10 @@ class InstanceView(generics.RetrieveUpdateDestroyAPIView):
"""
model = BasicModel
+ def get_queryset(self):
+ queryset = super(InstanceView, self).get_queryset()
+ return queryset.exclude(text='filtered out')
+
class SlugSerializer(serializers.ModelSerializer):
slug = serializers.Field() # read only
@@ -160,10 +164,10 @@ def setUp(self):
"""
Create 3 BasicModel intances.
"""
- items = ['foo', 'bar', 'baz']
+ items = ['foo', 'bar', 'baz', 'filtered out']
for item in items:
BasicModel(text=item).save()
- self.objects = BasicModel.objects
+ self.objects = BasicModel.objects.exclude(text='filtered out')
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
@@ -352,6 +356,17 @@ def test_put_to_deleted_instance(self):
updated = self.objects.get(id=1)
self.assertEqual(updated.text, 'foobar')
+ def test_put_to_filtered_out_instance(self):
+ """
+ PUT requests to an URL of instance which is filtered out should not be
+ able to create new objects.
+ """
+ data = {'text': 'foo'}
+ filtered_out_pk = BasicModel.objects.filter(text='filtered out')[0].pk
+ request = factory.put('/{0}'.format(filtered_out_pk), data, format='json')
+ response = self.view(request, pk=filtered_out_pk).render()
+ self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
+
def test_put_as_create_on_id_based_url(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should create an object
| PUT request falls back to creating object if it has been filtered out
If one makes a `PUT` request to an URL which is already occupied by an existing object, but this object has been previously filtered out, 404 exception will be raised by `get_object` function (which is correct). This exception will however be catched by [get_object_or_none](https://github.com/tomchristie/django-rest-framework/blob/999056cde1c6355d5ca036f109b35b41cb9d47cc/rest_framework/mixins.py#L141) function which will return `None` to indicate that new object can be created. Therefore an attempt to create the object will be performed which will often result in `IntegrityError` and 500 status code being returned.
This doesn’t seem to be an easy issue to fix. `get_queryset` could be used to get unfiltered queryset and check if such an object already exists, but… overriding get_queryset is a documented method of filtering, so it doesn’t have to work. Also I’m not sure whether we should return 403 or 404 in this case.
| Interesting, yeah. I guess there's a simpler version of this problem, where the `queryset` attribute is exposing a subset of the actual instances, and a PUT request that would recreate one of the instances that does exist, but isn't in that queryset then fails. I guess we ought to return some type of 4xx response. 409 might be reasonable if it's okay for the client to know that a conflicting object exists.
You had any further thoughts on what would be sensible behaviour here or how to address this?
I agree with the reasonableness of the HTTP 409. The client needs to be told that the request cannot be completed due to some sort of conflict. The only other status that may make sense is a 403 but I think that could lead to confusion based on the assumption that 403s are generally permission issues.
Nice, I met exactly the same issue when moving my views to viewsets today and was wondering what to do.
Actually `ValidationError` is raised instead of `IntegrityError`, because [additional call of `full_clean` is permormed in `UpdateModelMixin` after setting the primary key](https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/mixins.py#L174). This is generally a good thing, but this exception isn’t catched, which results in 500 status code, which is bad. So one solution would be to refactor the code to catch this exception. This will give us 400 status code, which may be good enough.
I’ve created a failing test draft (works, but needs to be written better): https://github.com/KrzysiekJ/django-rest-framework/commit/d21669250e5842b07b4d038c925f1ef878207e78
I don’t see an easy way to hide the information that some object exists under an URL associated with a specific primary key, if that primary key is the only cause of problems. With the (slow) arrival of composite primary keys, which may contain concrete data, to Django, this may be considered a security issue, but… in such case 403 should be raised earlier to indicate that an user cannot make a PUT request to a particular URL (if he is allowed to make such request, then information leak also seems unavoidable, but it shouldn’t be that harmful). So this is actually a separate issue.
I use get_queryset to limit a user's visibility to only his profile. In this case, a PUT request to another user's pk would ideally return a 404—same as a GET request to an object outside the queryset. If that doesn't make sense, then DELETE's behavior of returning 403 would be my second pick.
If anyone else encounters this in the interim, you can override `update` as a workaround:
``` python
from django.http import Http404
class UserViewSet(viewsets.ModelViewSet):
model = User
def get_queryset(self):
return User.objects.filter(pk=self.request.user.pk)
def update(self, request, pk=None):
try:
self.get_object()
except Http404:
return Response(
{'detail': 'Not found'}, status=status.HTTP_404_NOT_FOUND)
return super(UserViewSet, self).update(request, pk)
```
| 2013-11-19T14:47:10 |
encode/django-rest-framework | 1,258 | encode__django-rest-framework-1258 | [
"1227"
] | 01040b077c16f69101249282b62506f08ebff651 | diff --git a/rest_framework/generics.py b/rest_framework/generics.py
--- a/rest_framework/generics.py
+++ b/rest_framework/generics.py
@@ -344,6 +344,18 @@ def post_save(self, obj, created=False):
"""
pass
+ def pre_delete(self, obj):
+ """
+ Placeholder method for calling before deleting an object.
+ """
+ pass
+
+ def post_delete(self, obj):
+ """
+ Placeholder method for calling after saving an object.
+ """
+ pass
+
def metadata(self, request):
"""
Return a dictionary of metadata about the view.
diff --git a/rest_framework/mixins.py b/rest_framework/mixins.py
--- a/rest_framework/mixins.py
+++ b/rest_framework/mixins.py
@@ -192,5 +192,7 @@ class DestroyModelMixin(object):
"""
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
+ self.pre_delete(obj)
obj.delete()
+ self.post_delete(obj)
return Response(status=status.HTTP_204_NO_CONTENT)
| Add pre_delete and post_delete hooks to DestroyModelMixin
Like the `pre_save` and `post_save` hooks in `UpdateModelMixin` and `CreateModelMixin`.
| Yeah I guess I'd accept a PR for this, so long as it included them in the documentation.
| 2013-12-03T00:08:53 |
|
encode/django-rest-framework | 1,265 | encode__django-rest-framework-1265 | [
"1196"
] | 9f1918e41e1b8dcfa621b00788bab865f2fc31aa | diff --git a/rest_framework/response.py b/rest_framework/response.py
--- a/rest_framework/response.py
+++ b/rest_framework/response.py
@@ -61,6 +61,10 @@ def rendered_content(self):
assert charset, 'renderer returned unicode, and did not specify ' \
'a charset value.'
return bytes(ret.encode(charset))
+
+ if not ret:
+ del self['Content-Type']
+
return ret
@property
| diff --git a/rest_framework/tests/test_renderers.py b/rest_framework/tests/test_renderers.py
--- a/rest_framework/tests/test_renderers.py
+++ b/rest_framework/tests/test_renderers.py
@@ -64,11 +64,16 @@ def get(self, request, **kwargs):
class MockGETView(APIView):
-
def get(self, request, **kwargs):
return Response({'foo': ['bar', 'baz']})
+class EmptyGETView(APIView):
+ renderer_classes = (JSONRenderer,)
+
+ def get(self, request, **kwargs):
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
class HTMLView(APIView):
renderer_classes = (BrowsableAPIRenderer, )
@@ -90,6 +95,7 @@ def get(self, request, **kwargs):
url(r'^jsonp/nojsonrenderer$', MockGETView.as_view(renderer_classes=[JSONPRenderer])),
url(r'^html$', HTMLView.as_view()),
url(r'^html1$', HTMLView1.as_view()),
+ url(r'^empty$', EmptyGETView.as_view()),
url(r'^api', include('rest_framework.urls', namespace='rest_framework'))
)
@@ -219,6 +225,16 @@ def test_specified_renderer_is_used_on_format_query_with_matching_accept(self):
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
+ def test_204_no_content_responses_have_no_content_type_set(self):
+ """
+ Regression test for #1196
+
+ https://github.com/tomchristie/django-rest-framework/issues/1196
+ """
+ resp = self.client.get('/empty')
+ self.assertEqual(resp.get('Content-Type', None), None)
+ self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
+
_flat_repr = '{"foo": ["bar", "baz"]}'
_indented_repr = '{\n "foo": [\n "bar",\n "baz"\n ]\n}'
| HTTP 204 has a Content-Type header
I've noticed I'm getting WSGI warnings from my DRF app due to:
```
WSGIWarning: Content-Type header found in a 204 response, which not return content.
```
Is this the intended behaviour of DRF, or would you be interested in a patch to change the Content-Type setting for 204 responses?
| > would you be interested in a patch to change the Content-Type setting for 204 responses?
That'd be reasonable yeah. It's possible that it might be difficult though - I'm not sure but Django's response machinery might end up providing a default one if it's not added?
| 2013-12-03T16:55:35 |
encode/django-rest-framework | 1,280 | encode__django-rest-framework-1280 | [
"1205"
] | 785a42cd5aee9e96f9b780ff144fa13c16189748 | diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -412,7 +412,13 @@ def field_from_native(self, data, files, field_name, into):
# Set the serializer object if it exists
obj = get_component(self.parent.object, self.source or field_name) if self.parent.object else None
- obj = obj.all() if is_simple_callable(getattr(obj, 'all', None)) else obj
+
+ # If we have a model manager or similar object then we need
+ # to iterate through each instance.
+ if (self.many and
+ not hasattr(obj, '__iter__') and
+ is_simple_callable(getattr(obj, 'all', None))):
+ obj = obj.all()
if self.source == '*':
if value:
| Stop breaking non-model usage in serializer
45d4622f090f8d81a04b4d3e888017419676bbc0 added a weird workaround for a problem that has nothing to do with the `BaseSerializer` but is tailor made for the Django ORM.
I'm trying to use DRF with elasticutils' search querysets and been wondering why the serializer always returns ALL results, instead of the ones I told the paginator to return. The `all` method returns all objects, no matter what filter or query has been done before.
As to how to fix this, I would propose that this code needs to live in the `ModelSerializer.field_to_native` method not in its parent class. That would help non-Django ORM uses greatly.
| Amusingly, I wrote the code that's causing this bug.. and I ran into it myself a few weeks ago. If you try to serialize a queryset that's been returned from a cache, the `all()` call will force the queryset to be cloned, which will cause a database query - making the cached version pointless.
Any suggestions for a better approach would be welcomed!
Not sure if that's the way to go, one test is not passing sadly, I'd be happy to also fix that, but I'm a bit uncertain why it would do that. @tomchristie Any idea?
@jezdez Been thinking about this and reckon the sensible way to resolve this is not to remove `.all()` handling from the serializer class, but just tweak _when_ it gets used. Simply moving it into ModelSerializer would introduce a really subtle difference between serializers and model serializers. At the moment the difference is fairly simple to explain:
- ModelSerializers get fields automatically generated.
- ModelSerializers have automatic restore object and save behaviour.
I think we should change the behavior so that `.all()` only gets used if:
- `many=True` has been explicitly set.
- The attribute does not have an `__iter__` but does have an `all()`
Note that this approach would also fix @j4mie's issue, as querysets would simply be iterated over, wheras model managers would (necessarily) have `all()` applied.
@tomchristie That would make sense to me, indeed!
| 2013-12-10T08:47:11 |
|
encode/django-rest-framework | 1,291 | encode__django-rest-framework-1291 | [
"1101"
] | ca244ad614e2f6fb4fef1dc9987be996d2624303 | diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -428,7 +428,7 @@ class BooleanField(WritableField):
def field_from_native(self, data, files, field_name, into):
# HTML checkboxes do not explicitly represent unchecked as `False`
# we deal with that here...
- if isinstance(data, QueryDict):
+ if isinstance(data, QueryDict) and self.default is None:
self.default = False
return super(BooleanField, self).field_from_native(
| diff --git a/rest_framework/tests/test_serializer.py b/rest_framework/tests/test_serializer.py
--- a/rest_framework/tests/test_serializer.py
+++ b/rest_framework/tests/test_serializer.py
@@ -1743,3 +1743,42 @@ def test_missing_fields(self):
'b_renamed': None,
}
)
+
+
+class DefaultTrueBooleanModel(models.Model):
+ cat = models.BooleanField(default=True)
+ dog = models.BooleanField(default=False)
+
+
+class SerializerDefaultTrueBoolean(TestCase):
+
+ def setUp(self):
+ super(SerializerDefaultTrueBoolean, self).setUp()
+
+ class DefaultTrueBooleanSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = DefaultTrueBooleanModel
+ fields = ('cat', 'dog')
+
+ self.default_true_boolean_serializer = DefaultTrueBooleanSerializer
+
+ def test_enabled_as_false(self):
+ serializer = self.default_true_boolean_serializer(data={'cat': False,
+ 'dog': False})
+ self.assertEqual(serializer.is_valid(), True)
+ self.assertEqual(serializer.data['cat'], False)
+ self.assertEqual(serializer.data['dog'], False)
+
+ def test_enabled_as_true(self):
+ serializer = self.default_true_boolean_serializer(data={'cat': True,
+ 'dog': True})
+ self.assertEqual(serializer.is_valid(), True)
+ self.assertEqual(serializer.data['cat'], True)
+ self.assertEqual(serializer.data['dog'], True)
+
+ def test_enabled_partial(self):
+ serializer = self.default_true_boolean_serializer(data={'cat': False},
+ partial=True)
+ self.assertEqual(serializer.is_valid(), True)
+ self.assertEqual(serializer.data['cat'], False)
+ self.assertEqual(serializer.data['dog'], False)
| Model defaults ignored on empty text field or empty boolean field.
My tables has boolean fields like below.
```
enabled = models.BooleanField(default=True)
```
and save it using a serializer in a view class without passing the enabled field in the request.
```
serializer = SomeSerializer(data=request.DATA)
if serializer.is_valid():
object = serializer.save()
```
the result is `object.enabled == False`.
I've updated the framework to version 2.3.8 then the issue started occurring. Everything works fine in 2.3.7.
Affected versions >= 2.3.8
| this problem have in tests and when open auto generic api web, in section raw data
Content: {
"is_active": false,
"username": "",
"email": "",
"name": "" }
but in models is_active default = True
but if send post request via curl or another...everything is working properly, is_active set TRUE
This problem hasn't fixed even in 2.3.9.
I can't update it from 2.3.7 because of this, breaking numerous test cases.
Is there any open pull request associated with this issue?
First starting point would be coding a failing test case.
I have never submitted a patch or anything before, but would this be a suitable failing test case? Would it work with /tests/test_serializer.py?
```
class DefaultTrueBooleanModel(models.Model):
enabled = models.BooleanField(default=True)
class SerializerDefaultTrueBoolean(TestCase):
def setUp(self):
super(SerializerDefaultTrueBoolean, self).setUp()
class DefaultTrueBooleanSerializer(serializers.ModelSerializer):
class Meta:
model = DefaultTrueBooleanModel
fields = ('enabled')
self.default_true_boolean_serializer = DefaultTrueBooleanSerializer
def test_blank_input(self):
serializer = self.default_true_boolean_serializer()
self.assertEqual(serializer.data['enabled'], True)
def test_enabled_as_false(self):
serializer = self.default_true_boolean_serializer({'enabled': False})
self.assertEqual(serializer.data['enabled'], False)
def test_enabled_as_true(self):
serializer = self.default_true_boolean_serializer({'enabled': True})
self.assertEqual(serializer.data['enabled'], True)
```
| 2013-12-13T18:33:02 |
encode/django-rest-framework | 1,460 | encode__django-rest-framework-1460 | [
"1434"
] | 2090f452b224e60853b40e73d1d0e9aad58cd24a | diff --git a/rest_framework/filters.py b/rest_framework/filters.py
--- a/rest_framework/filters.py
+++ b/rest_framework/filters.py
@@ -6,6 +6,7 @@
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from rest_framework.compat import django_filters, six, guardian, get_model_name
+from rest_framework.settings import api_settings
from functools import reduce
import operator
@@ -69,7 +70,8 @@ def filter_queryset(self, request, queryset, view):
class SearchFilter(BaseFilterBackend):
- search_param = 'search' # The URL query parameter used for the search.
+ # The URL query parameter used for the search.
+ search_param = api_settings.SEARCH_PARAM
def get_search_terms(self, request):
"""
@@ -107,7 +109,8 @@ def filter_queryset(self, request, queryset, view):
class OrderingFilter(BaseFilterBackend):
- ordering_param = 'ordering' # The URL query parameter used for the ordering.
+ # The URL query parameter used for the ordering.
+ ordering_param = api_settings.ORDERING_PARAM
ordering_fields = None
def get_ordering(self, request):
diff --git a/rest_framework/settings.py b/rest_framework/settings.py
--- a/rest_framework/settings.py
+++ b/rest_framework/settings.py
@@ -69,6 +69,10 @@
'PAGINATE_BY_PARAM': None,
'MAX_PAGINATE_BY': None,
+ # Filtering
+ 'SEARCH_PARAM': 'search',
+ 'ORDERING_PARAM': 'ordering',
+
# Authentication
'UNAUTHENTICATED_USER': 'django.contrib.auth.models.AnonymousUser',
'UNAUTHENTICATED_TOKEN': None,
| diff --git a/rest_framework/tests/test_filters.py b/rest_framework/tests/test_filters.py
--- a/rest_framework/tests/test_filters.py
+++ b/rest_framework/tests/test_filters.py
@@ -7,9 +7,11 @@
from django.utils import unittest
from rest_framework import generics, serializers, status, filters
from rest_framework.compat import django_filters, patterns, url
+from rest_framework.settings import api_settings
from rest_framework.test import APIRequestFactory
from rest_framework.tests.models import BasicModel
from .models import FilterableItem
+from .utils import temporary_setting
factory = APIRequestFactory()
@@ -363,6 +365,24 @@ class SearchListView(generics.ListAPIView):
]
)
+ def test_search_with_nonstandard_search_param(self):
+ with temporary_setting('SEARCH_PARAM', 'query', module=filters):
+ class SearchListView(generics.ListAPIView):
+ model = SearchFilterModel
+ filter_backends = (filters.SearchFilter,)
+ search_fields = ('title', 'text')
+
+ view = SearchListView.as_view()
+ request = factory.get('/', {'query': 'b'})
+ response = view(request)
+ self.assertEqual(
+ response.data,
+ [
+ {'id': 1, 'title': 'z', 'text': 'abc'},
+ {'id': 2, 'title': 'zz', 'text': 'bcd'}
+ ]
+ )
+
class OrdringFilterModel(models.Model):
title = models.CharField(max_length=20)
@@ -520,6 +540,26 @@ class OrderingListView(generics.ListAPIView):
]
)
+ def test_ordering_with_nonstandard_ordering_param(self):
+ with temporary_setting('ORDERING_PARAM', 'order', filters):
+ class OrderingListView(generics.ListAPIView):
+ model = OrdringFilterModel
+ filter_backends = (filters.OrderingFilter,)
+ ordering = ('title',)
+ ordering_fields = ('text',)
+
+ view = OrderingListView.as_view()
+ request = factory.get('/', {'order': 'text'})
+ response = view(request)
+ self.assertEqual(
+ response.data,
+ [
+ {'id': 1, 'title': 'zyx', 'text': 'abc'},
+ {'id': 2, 'title': 'yxw', 'text': 'bcd'},
+ {'id': 3, 'title': 'xwv', 'text': 'cde'},
+ ]
+ )
+
class SensitiveOrderingFilterModel(models.Model):
username = models.CharField(max_length=20)
@@ -618,4 +658,4 @@ class OrderingListView(generics.ListAPIView):
{'id': 2, username_field: 'userB'}, # PassC
{'id': 3, username_field: 'userC'}, # PassA
]
- )
\ No newline at end of file
+ )
diff --git a/rest_framework/tests/utils.py b/rest_framework/tests/utils.py
new file mode 100644
--- /dev/null
+++ b/rest_framework/tests/utils.py
@@ -0,0 +1,25 @@
+from contextlib import contextmanager
+from rest_framework.compat import six
+from rest_framework.settings import api_settings
+
+
+@contextmanager
+def temporary_setting(setting, value, module=None):
+ """
+ Temporarily change value of setting for test.
+
+ Optionally reload given module, useful when module uses value of setting on
+ import.
+ """
+ original_value = getattr(api_settings, setting)
+ setattr(api_settings, setting, value)
+
+ if module is not None:
+ six.moves.reload_module(module)
+
+ yield
+
+ setattr(api_settings, setting, original_value)
+
+ if module is not None:
+ six.moves.reload_module(module)
| ORDERING_PARAM and SEARCH_PARAM
Hello,
please, add search_param and ordering_param property value into settings file, so it can be configurable such as PAGINATE_BY_PARAM.
[filters.py]
class SearchFilter(BaseFilterBackend):
search_param = 'search'
...
class OrderingFilter(BaseFilterBackend):
ordering_param = 'ordering'
...
Thank you
| Fancy submitting a pull request for it? :smile:
OK, I'll try it :) ...
| 2014-03-06T21:41:23 |
encode/django-rest-framework | 1,511 | encode__django-rest-framework-1511 | [
"1506"
] | 60ac3d7a76c4132d97d7e8047e25be253018280e | diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -164,7 +164,7 @@ def initialize(self, parent, field_name):
Called to set up a field prior to field_to_native or field_from_native.
parent - The parent serializer.
- model_field - The model field this field corresponds to, if one exists.
+ field_name - The name of the field being initialized.
"""
self.parent = parent
self.root = parent.root or parent
| Possible error in Docstring,
In the `Field` class, the [docstring](https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/fields.py#L167) for the `initialize` method seems to have a mistake.
The comment refers to a `model_field` parameter, but the parameters in the function are called `parent` and `field_name`.
| Yep. Looks like it should be `field_name`. @maurodec fancy doing a pull request correcting that?
| 2014-04-07T21:29:47 |
|
encode/django-rest-framework | 1,515 | encode__django-rest-framework-1515 | [
"1408"
] | b0ba8d61ecf3c74470fa6ac019caff1fd4ca1be6 | diff --git a/rest_framework/relations.py b/rest_framework/relations.py
--- a/rest_framework/relations.py
+++ b/rest_framework/relations.py
@@ -59,6 +59,8 @@ def __init__(self, *args, **kwargs):
super(RelatedField, self).__init__(*args, **kwargs)
if not self.required:
+ # Accessed in ModelChoiceIterator django/forms/models.py:1034
+ # If set adds empty choice.
self.empty_label = BLANK_CHOICE_DASH[0][1]
self.queryset = queryset
| diff --git a/rest_framework/tests/test_relations.py b/rest_framework/tests/test_relations.py
--- a/rest_framework/tests/test_relations.py
+++ b/rest_framework/tests/test_relations.py
@@ -2,8 +2,10 @@
General tests for relational fields.
"""
from __future__ import unicode_literals
+from django import get_version
from django.db import models
from django.test import TestCase
+from django.utils import unittest
from rest_framework import serializers
from rest_framework.tests.models import BlogPost
@@ -118,3 +120,25 @@ class Meta:
(serializers.ModelSerializer,), attrs)
with self.assertRaises(AttributeError):
TestSerializer(data={'name': 'foo'})
+
[email protected](get_version() < '1.6.0', 'Upstream behaviour changed in v1.6')
+class RelatedFieldChoicesTests(TestCase):
+ """
+ Tests for #1408 "Web browseable API doesn't have blank option on drop down list box"
+ https://github.com/tomchristie/django-rest-framework/issues/1408
+ """
+ def test_blank_option_is_added_to_choice_if_required_equals_false(self):
+ """
+
+ """
+ post = BlogPost(title="Checking blank option is added")
+ post.save()
+
+ queryset = BlogPost.objects.all()
+ field = serializers.RelatedField(required=False, queryset=queryset)
+
+ choice_count = BlogPost.objects.count()
+ widget_count = len(field.widget.choices)
+
+ self.assertEqual(widget_count, choice_count + 1, 'BLANK_CHOICE_DASH option should have been added')
+
| Web browseable API doesn't have blank option on drop down list box
A model with a foreign key attribute which has been set to blankable and nullable. And a serializer which has arguments of read_only=False and required=False.
When viewed with the web browseable API we get a drop box which automatically selects the first available foreignkey. Testing with an independant REST post application it was determined that the model was happy to post a null attribute.
Perhaps the correct user experience would be to have a null option in the drop box??

| Yep. That sounds reasonable. A Django Forms ChoiceField adds (and in absence of a default selects) a extra null option.
Sounds correct to me too yup.
Just to be thorough, perhaps it should only do this if the attribute is blank=True and null=True in the model. I am not sure how the behaviour should change depending on the serializer options.
Shouldn't dep on the model field, but rather if required=False on the serializer (Which in the case of model serializers gets set automatically based on the model fields, so same end result)
Awesome, I figured that was the case, thanks for clarifying!
This should already be handled: when a `ChoiceField` is initialised, an extra empty field is prepended to the list of choices if the field is not required: https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/fields.py#L512-L517
@keyCutter: which version of DRF did you see this with?
Perhaps that init code for ChoiceField also needs to be replicated for serializer relation fields?
djangorestframework-2.2.6
| 2014-04-09T13:58:27 |
encode/django-rest-framework | 1,518 | encode__django-rest-framework-1518 | [
"1517"
] | c93ddf1750267f9145cbea556deb412fa5b7123e | diff --git a/rest_framework/templatetags/rest_framework.py b/rest_framework/templatetags/rest_framework.py
--- a/rest_framework/templatetags/rest_framework.py
+++ b/rest_framework/templatetags/rest_framework.py
@@ -180,7 +180,7 @@ def add_class(value, css_class):
# Bunch of stuff cloned from urlize
-TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)', '"', "'"]
+TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)', '"', "']", "'}", "'"]
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'),
('"', '"'), ("'", "'")]
word_split_re = re.compile(r'(\s+)')
| diff --git a/rest_framework/tests/test_urlizer.py b/rest_framework/tests/test_urlizer.py
new file mode 100644
--- /dev/null
+++ b/rest_framework/tests/test_urlizer.py
@@ -0,0 +1,38 @@
+from __future__ import unicode_literals
+from django.test import TestCase
+from rest_framework.templatetags.rest_framework import urlize_quoted_links
+import sys
+
+
+class URLizerTests(TestCase):
+ """
+ Test if both JSON and YAML URLs are transformed into links well
+ """
+ def _urlize_dict_check(self, data):
+ """
+ For all items in dict test assert that the value is urlized key
+ """
+ for original, urlized in data.items():
+ assert urlize_quoted_links(original, nofollow=False) == urlized
+
+ def test_json_with_url(self):
+ """
+ Test if JSON URLs are transformed into links well
+ """
+ data = {}
+ data['"url": "http://api/users/1/", '] = \
+ '"url": "<a href="http://api/users/1/">http://api/users/1/</a>", '
+ data['"foo_set": [\n "http://api/foos/1/"\n], '] = \
+ '"foo_set": [\n "<a href="http://api/foos/1/">http://api/foos/1/</a>"\n], '
+ self._urlize_dict_check(data)
+
+ def test_yaml_with_url(self):
+ """
+ Test if YAML URLs are transformed into links well
+ """
+ data = {}
+ data['''{users: 'http://api/users/'}'''] = \
+ '''{users: '<a href="http://api/users/">http://api/users/</a>'}'''
+ data['''foo_set: ['http://api/foos/1/']'''] = \
+ '''foo_set: ['<a href="http://api/foos/1/">http://api/foos/1/</a>']'''
+ self._urlize_dict_check(data)
| YAMLRenderer with BrowsableAPIRenderer produces bad links
Considering the following yaml output:
```
{daps: 'http://127.0.0.1:8000/api/daps/', metadaps: 'http://127.0.0.1:8000/api/metadaps/', users: 'http://127.0.0.1:8000/api/users/'}
```
While the first two URLs works fine, the last one is interpreted as follows:
```
...users: '<a href="http://127.0.0.1:8000/api/users/'%7D" rel="nofollow">http://127.0.0.1:8000/api/users/'}</a>
```
Instead of:
```
...users: '<a href="http://127.0.0.1:8000/api/users/" rel="nofollow">http://127.0.0.1:8000/api/users/</a>'}
```
(The ending quote and brackets are in the link.) Happens basically on any last URL in a bracket, except sometimes the bracket is in the link and sometimes only the `'` sign.
```
dap_set: ['http://127.0.0.1:8000/api/daps/2/']
```
Goes to:
```
dap_set: ['<a href="http://127.0.0.1:8000/api/daps/2/'" rel="nofollow">http://127.0.0.1:8000/api/daps/2/'</a>]
```
Instead if:
```
dap_set: ['<a href="http://127.0.0.1:8000/api/daps/2/" rel="nofollow">http://127.0.0.1:8000/api/daps/2/</a>']
```
Happens on djangorestframework-2.3.13 with the following settings:
```
REST_FRAMEWORK = {
# ...
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.YAMLRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
# ...
}
```
| BTW it makes the browsable API almost unbrowsable.
| 2014-04-09T21:36:05 |
encode/django-rest-framework | 1,520 | encode__django-rest-framework-1520 | [
"1519"
] | b0ba8d61ecf3c74470fa6ac019caff1fd4ca1be6 | diff --git a/rest_framework/renderers.py b/rest_framework/renderers.py
--- a/rest_framework/renderers.py
+++ b/rest_framework/renderers.py
@@ -193,6 +193,7 @@ class YAMLRenderer(BaseRenderer):
format = 'yaml'
encoder = encoders.SafeDumper
charset = 'utf-8'
+ ensure_ascii = True
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
@@ -203,7 +204,15 @@ def render(self, data, accepted_media_type=None, renderer_context=None):
if data is None:
return ''
- return yaml.dump(data, stream=None, encoding=self.charset, Dumper=self.encoder)
+ return yaml.dump(data, stream=None, encoding=self.charset, Dumper=self.encoder, allow_unicode=not self.ensure_ascii)
+
+
+class UnicodeYAMLRenderer(YAMLRenderer):
+ """
+ Renderer which serializes to YAML.
+ Does *not* apply character escaping for non-ascii characters.
+ """
+ ensure_ascii = False
class TemplateHTMLRenderer(BaseRenderer):
| diff --git a/rest_framework/tests/test_renderers.py b/rest_framework/tests/test_renderers.py
--- a/rest_framework/tests/test_renderers.py
+++ b/rest_framework/tests/test_renderers.py
@@ -12,7 +12,7 @@
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.renderers import BaseRenderer, JSONRenderer, YAMLRenderer, \
- XMLRenderer, JSONPRenderer, BrowsableAPIRenderer, UnicodeJSONRenderer
+ XMLRenderer, JSONPRenderer, BrowsableAPIRenderer, UnicodeJSONRenderer, UnicodeYAMLRenderer
from rest_framework.parsers import YAMLParser, XMLParser
from rest_framework.settings import api_settings
from rest_framework.test import APIRequestFactory
@@ -467,6 +467,17 @@ def assertYAMLContains(self, content, string):
self.assertTrue(string in content, '%r not in %r' % (string, content))
+ class UnicodeYAMLRendererTests(TestCase):
+ """
+ Tests specific for the Unicode YAML Renderer
+ """
+ def test_proper_encoding(self):
+ obj = {'countries': ['United Kingdom', 'France', 'España']}
+ renderer = UnicodeYAMLRenderer()
+ content = renderer.render(obj, 'application/yaml')
+ self.assertEqual(content.strip(), 'countries: [United Kingdom, France, España]'.encode('utf-8'))
+
+
class XMLRendererTestCase(TestCase):
"""
Tests specific to the XML Renderer
| YAML cripples unicode
While a CharField is containing no unicode (i.e. non-ASCII), I see it in YAML API nicely:
```
full_name: Miro Hroncok
```
However, if it contains non-ASCII chars, I get:
```
full_name: "Miro Hron\u010Dok"
```
While the following would be much nicer (and fairly loadable by PyYAML):
```
full_name: Miro Hrončok
```
I have a fix in mind.
| 2014-04-09T23:35:48 |
|
encode/django-rest-framework | 1,763 | encode__django-rest-framework-1763 | [
"1106"
] | 38a0e3e6278db96660c89bfcb3e660704c068ff5 | diff --git a/rest_framework/request.py b/rest_framework/request.py
--- a/rest_framework/request.py
+++ b/rest_framework/request.py
@@ -42,12 +42,16 @@ def __init__(self, view, request, method):
self.view = view
self.request = request
self.method = method
+ self.action = getattr(view, 'action', None)
def __enter__(self):
self.view.request = clone_request(self.request, self.method)
+ action_map = getattr(self.view, 'action_map', {})
+ self.view.action = action_map.get(self.method.lower())
return self.view.request
def __exit__(self, *args, **kwarg):
+ self.view.action = self.action
self.view.request = self.request
| API browser checks some custom permissions inconsistently compared to real API endpoints.
The API browser makes use of an `override_method` context manager to change the request method on a view so that it can check permissions for different endpoints (than the current request) and determine whether or not a form should be created.
My permissions are `GET/list` (deny) and `POST/create` (allow) on one URL, and `GET/retrieve` (allow), `PUT/update` (deny) and `PATCH/partial_update` (deny) on another. I want anonymous users to be able to create objects and retrieve specific objects, but not list or update objects.
All of these action methods are defined on one `ModelViewSet`, which is mapped to two URLs (one maps `GET` to `list` and the other maps `GET` to `retrieve`). I have a custom permission (applied to the whole viewset) that checks `view.action` and allows `create` and `retrieve` actions but denies `list`, `update` and `partial_update` actions for anonymous users.
I think this should work for real clients making real requests against the API. But the API browser gets confused on the list page because it displays the results for `GET/list` **and** a form for `POST/create` endpoints on the same page.
The API browser uses `override_method` to change the request method from `GET` to `POST` when checking if it should render a form, but it does not change the action from `list` to `create`.
I suppose in this particular case I could check a combination of request method and action. E.g. use `view.action` to differentiate between `GET/list` and `GET/retrieve` and use `request.method` for everything else?
But it feels wrong that the API browser doesn't behave consistently with the real API when checking permissions for various endpoints. If `view.action` is there to be tested when implementing custom permissions, then it should be consistently overridden along with `request.method` when the API browser checks for permissions.
| Whilst the specifics are different, this seems essentially a Duplicate of #1105.
By default an form is shown where it would not be in this particular use-case if permissions were checked. The important thing is that the permissions would be honoured on the submitted request. Users are free to customise the Browseable API if it doesn't serve their needs.
Actually although there's some similarity I'd be more minded to accept a pull request on this than on #1105 - it probably would be a good idea to temporarily set the `.action` on the view in the same way that we temporarily set the request on the view.
If @mrmachine is interested in taking that on then the right approach would be to modify the context manager to also temporarily set `.action` on the view _if_ the `action_map` attribute exists. Of course it'd also need a test that uses a custom permission which uses the `.action` to determine if permission should be allowed.
| 2014-08-16T22:47:24 |
|
encode/django-rest-framework | 1,771 | encode__django-rest-framework-1771 | [
"1718"
] | 8244c7cc33e8d8078529dd0a9a3bdc2ce3a817fc | diff --git a/rest_framework/request.py b/rest_framework/request.py
--- a/rest_framework/request.py
+++ b/rest_framework/request.py
@@ -280,8 +280,8 @@ def _load_method_and_content_type(self):
self._method = self._request.method
# Allow X-HTTP-METHOD-OVERRIDE header
- self._method = self.META.get('HTTP_X_HTTP_METHOD_OVERRIDE',
- self._method)
+ if 'HTTP_X_HTTP_METHOD_OVERRIDE' in self.META:
+ self._method = self.META['HTTP_X_HTTP_METHOD_OVERRIDE'].upper()
def _load_stream(self):
"""
| `X-Http-Method-Override` and non-uppercase `request.method`
Found a minor problem.
According to the documentation, http://www.django-rest-framework.org/api-guide/requests#method:
```
request.method returns the uppercased string representation of the request's HTTP method.
```
From https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/request.py#L283:
```
# Allow X-HTTP-METHOD-OVERRIDE header
self._method = self.META.get('HTTP_X_HTTP_METHOD_OVERRIDE',
self._method)
```
That means if the X-Http-Method-Override header was set to `"Patch"` instead of `"PATCH"`, the `request.method` will no longer be fully uppercased, causing problems when comparing method names in the views code checking for permissions, e.g. `if request.method == 'PATCH'`.
So the documentation and the code aren't fully congruent. Changing the line to:
```
self._method = self.META.get('HTTP_X_HTTP_METHOD_OVERRIDE',
self._method).upper()
```
would fix it, but it would prevent using custom case-sensitive method names. I'm not sure that's a sensible idea anyway but it's a limitation. Alternatively, the documentation could be corrected to:
```
request.method returns the uppercased string representation of the request's HTTP method, except when the method is overridden using the header 'X-Http-Method-Override'.
```
I encountered this problem since the javascript REST client (restangular) I was using was assigning lower cased method names to the X-Http-Method-Override header.
Love Django REST framework, keep up the good work!
| > custom case-sensitive method names
Don't want to support them. Go wild with the uppercasing. :space_invader: :sparkles:
| 2014-08-18T14:14:45 |
|
encode/django-rest-framework | 1,772 | encode__django-rest-framework-1772 | [
"1583"
] | 8244c7cc33e8d8078529dd0a9a3bdc2ce3a817fc | diff --git a/rest_framework/generics.py b/rest_framework/generics.py
--- a/rest_framework/generics.py
+++ b/rest_framework/generics.py
@@ -189,7 +189,13 @@ def get_filter_backends(self):
"""
Returns the list of filter backends that this view requires.
"""
- filter_backends = self.filter_backends or []
+ if self.filter_backends is None:
+ filter_backends = []
+ else:
+ # Note that we are returning a *copy* of the class attribute,
+ # so that it is safe for the view to mutate it if needed.
+ filter_backends = list(self.filter_backends)
+
if not filter_backends and self.filter_backend:
warnings.warn(
'The `filter_backend` attribute and `FILTER_BACKEND` setting '
@@ -199,6 +205,7 @@ def get_filter_backends(self):
PendingDeprecationWarning, stacklevel=2
)
filter_backends = [self.filter_backend]
+
return filter_backends
| Modify filters in `get_filter_backends` changes all default filters
If you alter the returned list in an overridden `GenericAPIView.get_filter_backends` than it gets modified for all viewsets that use the default value.
When you modify that value without copying it then you actually modify the `GenericAPIView.filter_backends` list.
| Yup - we should probably be returning a copy of `self.filter_backends` rather than returning the class attribute directly.
| 2014-08-18T14:34:51 |
|
encode/django-rest-framework | 1,784 | encode__django-rest-framework-1784 | [
"1782"
] | 59b47eac14778767a17e56bd8adc0610417f2878 | diff --git a/rest_framework/generics.py b/rest_framework/generics.py
--- a/rest_framework/generics.py
+++ b/rest_framework/generics.py
@@ -252,6 +252,12 @@ def get_serializer_class(self):
if serializer_class is not None:
return serializer_class
+ warnings.warn(
+ 'The `.model` attribute on view classes is now deprecated in favor '
+ 'of the more explicit `serializer_class` and `queryset` attributes.',
+ DeprecationWarning, stacklevel=2
+ )
+
assert self.model is not None, \
"'%s' should either include a 'serializer_class' attribute, " \
"or use the 'model' attribute as a shortcut for " \
@@ -282,6 +288,11 @@ def get_queryset(self):
return self.queryset._clone()
if self.model is not None:
+ warnings.warn(
+ 'The `.model` attribute on view classes is now deprecated in favor '
+ 'of the more explicit `serializer_class` and `queryset` attributes.',
+ DeprecationWarning, stacklevel=2
+ )
return self.model._default_manager.all()
error_format = "'%s' must define 'queryset' or 'model'"
diff --git a/rest_framework/permissions.py b/rest_framework/permissions.py
--- a/rest_framework/permissions.py
+++ b/rest_framework/permissions.py
@@ -108,6 +108,9 @@ def get_required_permissions(self, method, model_cls):
return [perm % kwargs for perm in self.perms_map[method]]
def has_permission(self, request, view):
+ # Note that `.model` attribute on views is deprecated, although we
+ # enforce the deprecation on the view `get_serializer_class()` and
+ # `get_queryset()` methods, rather than here.
model_cls = getattr(view, 'model', None)
queryset = getattr(view, 'queryset', None)
diff --git a/rest_framework/routers.py b/rest_framework/routers.py
--- a/rest_framework/routers.py
+++ b/rest_framework/routers.py
@@ -128,6 +128,9 @@ def get_default_base_name(self, viewset):
If `base_name` is not specified, attempt to automatically determine
it from the viewset.
"""
+ # Note that `.model` attribute on views is deprecated, although we
+ # enforce the deprecation on the view `get_serializer_class()` and
+ # `get_queryset()` methods, rather than here.
model_cls = getattr(viewset, 'model', None)
queryset = getattr(viewset, 'queryset', None)
if model_cls is None and queryset is not None:
@@ -135,7 +138,7 @@ def get_default_base_name(self, viewset):
assert model_cls, '`base_name` argument not specified, and could ' \
'not automatically determine the name from the viewset, as ' \
- 'it does not have a `.model` or `.queryset` attribute.'
+ 'it does not have a `.queryset` attribute.'
return model_cls._meta.object_name.lower()
| Consider deprecation of `.model` attribute on views.
The `.model` attribute on views is a shortcut for `.serializer_class` and/or `.queryset`.
It's usage is currently discouraged in favor of the more explicit style, but we should probably deprecate it entirely. It creates undesirable implicit behaviour, and is a potential point of confusion.
Removing it would create a simpler very explicit style in the generic views that's feels like a further step in the right direction to me.
This would also allow us to remove the `DEFAULT_MODEL_SERIALIZER_CLASS` setting.
| To be more clear - I think it's problematic because it's unclear to the user _exactly_ what behaviour they should expect if `.model` is set. The `.queryset` and `.serializer_class` have a very specific set of concerns (object lookup and validation/serialisation respectively)
Marking for 2.4 consideration.
Note that doing this would force us to change the homepage/README example to use an explicit serializer, and fix a long outstanding bug that the example contains, as noted in #1249.
| 2014-08-20T15:25:44 |
|
encode/django-rest-framework | 1,818 | encode__django-rest-framework-1818 | [
"1807"
] | b40525d8e6b3b4839c61053bc1613843d5b8b63d | diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -625,6 +625,20 @@ def __init__(self, meta):
self.write_only_fields = getattr(meta, 'write_only_fields', ())
+def _get_class_mapping(mapping, obj):
+ """
+ Takes a dictionary with classes as keys, and an object.
+ Traverses the object's inheritance hierarchy in method
+ resolution order, and returns the first matching value
+ from the dictionary or None.
+
+ """
+ return next(
+ (mapping[cls] for cls in inspect.getmro(obj.__class__) if cls in mapping),
+ None
+ )
+
+
class ModelSerializer(Serializer):
"""
A serializer that deals with model instances and querysets.
@@ -899,15 +913,17 @@ def get_field(self, model_field):
models.URLField: ['max_length'],
}
- if model_field.__class__ in attribute_dict:
- attributes = attribute_dict[model_field.__class__]
+ attributes = _get_class_mapping(attribute_dict, model_field)
+ if attributes:
for attribute in attributes:
kwargs.update({attribute: getattr(model_field, attribute)})
- try:
- return self.field_mapping[model_field.__class__](**kwargs)
- except KeyError:
- return ModelField(model_field=model_field, **kwargs)
+ serializer_field_class = _get_class_mapping(
+ self.field_mapping, model_field)
+
+ if serializer_field_class:
+ return serializer_field_class(**kwargs)
+ return ModelField(model_field=model_field, **kwargs)
def get_validation_exclusions(self, instance=None):
"""
| Custom model fields may map to serializer fields incorrectly.
I just upgraded to 2.4.0 and I stumbled upon an issue regarding field serialization.
I have a custom field on a model that is a subclass of a Django **CharField**, and when instantiate the ModelSerializer based on this model, I encountered an error running this code (in the [_get_field_](https://github.com/tomchristie/django-rest-framework/blob/2.4.0/rest_framework/serializers.py#L851) method of **ModelSerializer**) :
``` python
if model_field.null and \
issubclass(model_field.__class__, (models.CharField, models.TextField)):
kwargs['allow_none'] = True
attribute_dict = {
models.CharField: ['max_length'],
models.CommaSeparatedIntegerField: ['max_length'],
models.DecimalField: ['max_digits', 'decimal_places'],
models.EmailField: ['max_length'],
models.FileField: ['max_length'],
models.ImageField: ['max_length'],
models.SlugField: ['max_length'],
models.URLField: ['max_length'],
}
if model_field.__class__ in attribute_dict:
attributes = attribute_dict[model_field.__class__]
for attribute in attributes:
kwargs.update({attribute: getattr(model_field, attribute)})
try:
return self.field_mapping[model_field.__class__](**kwargs)
except KeyError:
return ModelField(model_field=model_field, **kwargs)
```
My field is a subclass of **CharField** so the kwargs do get an _allow_none_ key, but since it is not in the _attribute_dict_, I get the KeyError exception, and since the ModelField constructor doesn't allow an _allow_none_ argument, I'm screwed.
I haven't submitted any code since I don't know which direction to take (yet) but some comments about these suggestions might help:
- Should the framework handle this case?
If it does:
- Since IMO the test if the field is a subclass of a **Charfield** was meant for **SlugField**, **EmailField**, etc., maybe adding a membership test on the attribute_dict would prevent the _allow_none_ key to be inserted?
- Or, when the **KeyError** exception is thrown, using the same subclass test, we could instantiate a **CharField** rather than a **ModelField**.
Cheers
| 2014-09-01T09:21:54 |
||
encode/django-rest-framework | 1,820 | encode__django-rest-framework-1820 | [
"1738"
] | b40525d8e6b3b4839c61053bc1613843d5b8b63d | diff --git a/rest_framework/templatetags/rest_framework.py b/rest_framework/templatetags/rest_framework.py
--- a/rest_framework/templatetags/rest_framework.py
+++ b/rest_framework/templatetags/rest_framework.py
@@ -41,22 +41,31 @@ def optional_login(request):
except NoReverseMatch:
return ''
- snippet = "<a href='%s?next=%s'>Log in</a>" % (login_url, escape(request.path))
+ snippet = "<li><a href='{href}?next={next}'>Log in</a></li>".format(href=login_url, next=escape(request.path))
return snippet
@register.simple_tag
-def optional_logout(request):
+def optional_logout(request, user):
"""
Include a logout snippet if REST framework's logout view is in the URLconf.
"""
try:
logout_url = reverse('rest_framework:logout')
except NoReverseMatch:
- return ''
-
- snippet = "<a href='%s?next=%s'>Log out</a>" % (logout_url, escape(request.path))
- return snippet
+ return '<li class="navbar-text">{user}</li>'.format(user=user)
+
+ snippet = """<li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown">
+ {user}
+ <b class="caret"></b>
+ </a>
+ <ul class="dropdown-menu">
+ <li><a href='{href}?next={next}'>Log out</a></li>
+ </ul>
+ </li>"""
+
+ return snippet.format(user=user, href=logout_url, next=escape(request.path))
@register.simple_tag
| diff --git a/tests/browsable_api/__init__.py b/tests/browsable_api/__init__.py
new file mode 100644
diff --git a/tests/browsable_api/auth_urls.py b/tests/browsable_api/auth_urls.py
new file mode 100644
--- /dev/null
+++ b/tests/browsable_api/auth_urls.py
@@ -0,0 +1,10 @@
+from __future__ import unicode_literals
+from django.conf.urls import patterns, url, include
+
+from .views import MockView
+
+urlpatterns = patterns(
+ '',
+ (r'^$', MockView.as_view()),
+ url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
+)
diff --git a/tests/browsable_api/no_auth_urls.py b/tests/browsable_api/no_auth_urls.py
new file mode 100644
--- /dev/null
+++ b/tests/browsable_api/no_auth_urls.py
@@ -0,0 +1,9 @@
+from __future__ import unicode_literals
+from django.conf.urls import patterns
+
+from .views import MockView
+
+urlpatterns = patterns(
+ '',
+ (r'^$', MockView.as_view()),
+)
diff --git a/tests/browsable_api/test_browsable_api.py b/tests/browsable_api/test_browsable_api.py
new file mode 100644
--- /dev/null
+++ b/tests/browsable_api/test_browsable_api.py
@@ -0,0 +1,65 @@
+from __future__ import unicode_literals
+from django.contrib.auth.models import User
+from django.test import TestCase
+
+from rest_framework.test import APIClient
+
+
+class DropdownWithAuthTests(TestCase):
+ """Tests correct dropdown behaviour with Auth views enabled."""
+
+ urls = 'tests.browsable_api.auth_urls'
+
+ def setUp(self):
+ self.client = APIClient(enforce_csrf_checks=True)
+ self.username = 'john'
+ self.email = '[email protected]'
+ self.password = 'password'
+ self.user = User.objects.create_user(self.username, self.email, self.password)
+
+ def tearDown(self):
+ self.client.logout()
+
+ def test_name_shown_when_logged_in(self):
+ self.client.login(username=self.username, password=self.password)
+ response = self.client.get('/')
+ self.assertContains(response, 'john')
+
+ def test_logout_shown_when_logged_in(self):
+ self.client.login(username=self.username, password=self.password)
+ response = self.client.get('/')
+ self.assertContains(response, '>Log out<')
+
+ def test_login_shown_when_logged_out(self):
+ response = self.client.get('/')
+ self.assertContains(response, '>Log in<')
+
+
+class NoDropdownWithoutAuthTests(TestCase):
+ """Tests correct dropdown behaviour with Auth views NOT enabled."""
+
+ urls = 'tests.browsable_api.no_auth_urls'
+
+ def setUp(self):
+ self.client = APIClient(enforce_csrf_checks=True)
+ self.username = 'john'
+ self.email = '[email protected]'
+ self.password = 'password'
+ self.user = User.objects.create_user(self.username, self.email, self.password)
+
+ def tearDown(self):
+ self.client.logout()
+
+ def test_name_shown_when_logged_in(self):
+ self.client.login(username=self.username, password=self.password)
+ response = self.client.get('/')
+ self.assertContains(response, 'john')
+
+ def test_dropdown_not_shown_when_logged_in(self):
+ self.client.login(username=self.username, password=self.password)
+ response = self.client.get('/')
+ self.assertNotContains(response, '<li class="dropdown">')
+
+ def test_dropdown_not_shown_when_logged_out(self):
+ response = self.client.get('/')
+ self.assertNotContains(response, '<li class="dropdown">')
diff --git a/tests/browsable_api/views.py b/tests/browsable_api/views.py
new file mode 100644
--- /dev/null
+++ b/tests/browsable_api/views.py
@@ -0,0 +1,15 @@
+from __future__ import unicode_literals
+
+from rest_framework.views import APIView
+from rest_framework import authentication
+from rest_framework import renderers
+from rest_framework.response import Response
+
+
+class MockView(APIView):
+
+ authentication_classes = (authentication.SessionAuthentication,)
+ renderer_classes = (renderers.BrowsableAPIRenderer,)
+
+ def get(self, request):
+ return Response({'a': 1, 'b': 2, 'c': 3})
| If login view is not registered the browsable API displays an empty dropdown control.
Using with Django 1.7c2

| Assume you've not included the login and logout views.
Have you included this in your URLconf?...
```
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
```
From the installation instructions [here](http://www.django-rest-framework.org/#installation)
I'll go ahead and reopen the ticket if that's not the issue.
I have a feeling this is coming from following the pre-release "Lightweight Django". We use the browsable API but rely on the browser to hold the Basic Auth credentials rather than use the `SessionAuthentication`. Unfortunately the UI doesn't degrade well for this case. It looks like there is/should be a dropdown but there isn't.
To be clear I'm not saying this needs to be fixed in DRF. We'll add a note that this dropdown shouldn't be expected.
Actually yes, it should degrade more nicely.
@mlavin: Looking forward to taking a look at "Lightweight Django" btw - just holding off ATM as I'm tempted to wait a purchase the hardcopy when it's ready :) (And one of these days we might wanna catch up on your DEP for request parsing)
:smile: yes the DEP is waiting on the book but one day I'll get back to it.
Thanks @mlavin for pointing that out, yes indeed it's from your book. It confuse me why would there be a dropdown if it doesn't have any content. As I'm new to DRF, I tried to debug but I can't find any solution.
BTW, excited for the next content release...
If the login views are not registered, we shouldn't style the username as a dropdown with empty login link.
DjangoCon guidance - ensure that the broswable API displays sensibly if the login/logout links are not present. The username should still display if authenticated, but should not present the dropdown control. Include a screenshot with your pull request.
| 2014-09-02T12:56:44 |
encode/django-rest-framework | 1,836 | encode__django-rest-framework-1836 | [
"1432"
] | 2b47c6b700be530605bd30f0afe7214ea376bd13 | diff --git a/rest_framework/filters.py b/rest_framework/filters.py
--- a/rest_framework/filters.py
+++ b/rest_framework/filters.py
@@ -56,7 +56,6 @@ class AutoFilterSet(self.default_filter_set):
class Meta:
model = queryset.model
fields = filter_fields
- order_by = True
return AutoFilterSet
return None
| diff --git a/tests/test_filters.py b/tests/test_filters.py
--- a/tests/test_filters.py
+++ b/tests/test_filters.py
@@ -408,16 +408,61 @@ class SearchListView(generics.ListAPIView):
)
-class OrdringFilterModel(models.Model):
+class OrderingFilterModel(models.Model):
title = models.CharField(max_length=20)
text = models.CharField(max_length=100)
class OrderingFilterRelatedModel(models.Model):
- related_object = models.ForeignKey(OrdringFilterModel,
+ related_object = models.ForeignKey(OrderingFilterModel,
related_name="relateds")
+class DjangoFilterOrderingModel(models.Model):
+ date = models.DateField()
+ text = models.CharField(max_length=10)
+
+ class Meta:
+ ordering = ['-date']
+
+
+class DjangoFilterOrderingTests(TestCase):
+ def setUp(self):
+ data = [{
+ 'date': datetime.date(2012, 10, 8),
+ 'text': 'abc'
+ }, {
+ 'date': datetime.date(2013, 10, 8),
+ 'text': 'bcd'
+ }, {
+ 'date': datetime.date(2014, 10, 8),
+ 'text': 'cde'
+ }]
+
+ for d in data:
+ DjangoFilterOrderingModel.objects.create(**d)
+
+ def test_default_ordering(self):
+ class DjangoFilterOrderingView(generics.ListAPIView):
+ model = DjangoFilterOrderingModel
+ filter_backends = (filters.DjangoFilterBackend,)
+ filter_fields = ['text']
+ ordering = ('-date',)
+
+ view = DjangoFilterOrderingView.as_view()
+ request = factory.get('/')
+ response = view(request)
+
+ self.assertEqual(
+ response.data,
+ [
+ {'id': 3, 'date': datetime.date(2014, 10, 8), 'text': 'cde'},
+ {'id': 2, 'date': datetime.date(2013, 10, 8), 'text': 'bcd'},
+ {'id': 1, 'date': datetime.date(2012, 10, 8), 'text': 'abc'}
+ ]
+ )
+
+
class OrderingFilterTests(TestCase):
def setUp(self):
# Sequence of title/text is:
@@ -436,11 +481,11 @@ def setUp(self):
chr(idx + ord('b')) +
chr(idx + ord('c'))
)
- OrdringFilterModel(title=title, text=text).save()
+ OrderingFilterModel(title=title, text=text).save()
def test_ordering(self):
class OrderingListView(generics.ListAPIView):
- model = OrdringFilterModel
+ model = OrderingFilterModel
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
@@ -459,7 +504,7 @@ class OrderingListView(generics.ListAPIView):
def test_reverse_ordering(self):
class OrderingListView(generics.ListAPIView):
- model = OrdringFilterModel
+ model = OrderingFilterModel
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
@@ -478,7 +523,7 @@ class OrderingListView(generics.ListAPIView):
def test_incorrectfield_ordering(self):
class OrderingListView(generics.ListAPIView):
- model = OrdringFilterModel
+ model = OrderingFilterModel
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
@@ -497,7 +542,7 @@ class OrderingListView(generics.ListAPIView):
def test_default_ordering(self):
class OrderingListView(generics.ListAPIView):
- model = OrdringFilterModel
+ model = OrderingFilterModel
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
oredering_fields = ('text',)
@@ -516,7 +561,7 @@ class OrderingListView(generics.ListAPIView):
def test_default_ordering_using_string(self):
class OrderingListView(generics.ListAPIView):
- model = OrdringFilterModel
+ model = OrderingFilterModel
filter_backends = (filters.OrderingFilter,)
ordering = 'title'
ordering_fields = ('text',)
@@ -536,7 +581,7 @@ class OrderingListView(generics.ListAPIView):
def test_ordering_by_aggregate_field(self):
# create some related models to aggregate order by
num_objs = [2, 5, 3]
- for obj, num_relateds in zip(OrdringFilterModel.objects.all(),
+ for obj, num_relateds in zip(OrderingFilterModel.objects.all(),
num_objs):
for _ in range(num_relateds):
new_related = OrderingFilterRelatedModel(
@@ -545,11 +590,11 @@ def test_ordering_by_aggregate_field(self):
new_related.save()
class OrderingListView(generics.ListAPIView):
- model = OrdringFilterModel
+ model = OrderingFilterModel
filter_backends = (filters.OrderingFilter,)
ordering = 'title'
ordering_fields = '__all__'
- queryset = OrdringFilterModel.objects.all().annotate(
+ queryset = OrderingFilterModel.objects.all().annotate(
models.Count("relateds"))
view = OrderingListView.as_view()
@@ -567,7 +612,7 @@ class OrderingListView(generics.ListAPIView):
def test_ordering_with_nonstandard_ordering_param(self):
with temporary_setting('ORDERING_PARAM', 'order', filters):
class OrderingListView(generics.ListAPIView):
- model = OrdringFilterModel
+ model = OrderingFilterModel
filter_backends = (filters.OrderingFilter,)
ordering = ('title',)
ordering_fields = ('text',)
| Adding the `DjangoFilterBackend` filter changes queryset ordering.
Tried posting on StackOverflow with no reply (http://stackoverflow.com/questions/21848095/adding-filtering-changes-ordering) => decided to open bug here
I have a ModelViewSet that I want to add filtering to. My simple model looks like
```
class Article(models.Model):
date = = models.DateField()
language = models.CharField(max_length=10)
class Meta:
ordering = ['-date']
```
And the ModelViewSet (read only):
```
class ArticleViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
```
Articles on the API are now ordered by date descending as I would expect. Now I wich to allow filtering on language. I've set the filter backend to `DjangoFilterBackend` in settings.py. My updated ModelViewSet now looks like:
```
class ArticleViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
filter_fields = ['language']
```
This changes the ordering to language ASC. Adding `order_by('-date')` to queryset does not change anything. Adding `ordering = ('-date', )` does not change anything. => How do I specify both filtering and ordering (or simply use default ordering while allowing filtering)?
**EDIT:**
Current functionality seems to come from AutoFilterSet created in Rest Framework by default:
https://github.com/tomchristie/django-rest-framework/blob/822eb39599b248c68573c3095639a831ab6df99a/rest_framework/filters.py#L53
... where `order_by=True` and the handing of this in django-filter `get_ordering_field` here: https://github.com/alex/django-filter/blob/d88b98dd2b70551deb9c128b209fcf783b325acc/django_filters/filterset.py#L325
=> Seems I have to create a FilterSet class:
```
class LanguageFilter(django_filters.FilterSet):
class Meta:
model = Article
fields = ['language']
order_by = model()._meta.ordering
class ArticleViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
filter_class = LanguageFilter
```
Does this look correct? Seems a bit "much"/verbose/counter-intuitive to retain default ordering.
| If simply adding DjangoFilterBackend quietly & unexpectedly changes the default ordering then I think we can prob treat that as a bug, yup.
Just ran into this behaviour and found it odd. How about making `order_by` in `AutoFilterSet` default to `filter_fields` if no `model()._meta.ordering` is set? I think that should work. Thoughts?
Update: Or maybe just appending both iterables together.
As far as I can see `order_by` on the `AutoFilterSet` should simply _not_ be set.
There's no documentation against `DjangoFilterBackend` noting that behavior.
I imagine that it's just a left-over from when Django REST framework didn't support pluggable filters and `OrderingFilter` didn't exist.
Of course we would want to call it out loudly if we remove it. It's possible that some users are relying on the undocumented behavior there.
@tomchristie any specific way you want to tackle it? Anyway I can help?
Well, start with a PR removing it and check if any of the existing test cases break I guess.
+1
@jpadilla Okay so that looks great - next thing is to properly describe the change in a way that's easy for users to understand so that we have something we can add to the release notes. What does `order_by` on the `FilterSet` mean and do, how does the change impact the ordering and how do users switch back to the old behavior if they need too?
If someone wants to pull that text together it could just be as a comment on this thread.
DjangoCon sprint guidance - this already has a pull request, but needs release notes and test, [see my comment above from Apr 9](https://github.com/tomchristie/django-rest-framework/issues/1432#issuecomment-40005139).
I'm picking this one up again
| 2014-09-05T18:37:51 |
encode/django-rest-framework | 1,838 | encode__django-rest-framework-1838 | [
"1531"
] | f4e02446f99cef42f18f57a2712c435a84451868 | diff --git a/rest_framework/parsers.py b/rest_framework/parsers.py
--- a/rest_framework/parsers.py
+++ b/rest_framework/parsers.py
@@ -11,7 +11,7 @@
from django.http.multipartparser import MultiPartParser as DjangoMultiPartParser
from django.http.multipartparser import MultiPartParserError, parse_header, ChunkIter
from django.utils import six
-from rest_framework.compat import etree, yaml, force_text
+from rest_framework.compat import etree, yaml, force_text, urlparse
from rest_framework.exceptions import ParseError
from rest_framework import renderers
import json
@@ -290,6 +290,22 @@ def get_filename(self, stream, media_type, parser_context):
try:
meta = parser_context['request'].META
disposition = parse_header(meta['HTTP_CONTENT_DISPOSITION'].encode('utf-8'))
- return force_text(disposition[1]['filename'])
+ filename_parm = disposition[1]
+ if 'filename*' in filename_parm:
+ return self.get_encoded_filename(filename_parm)
+ return force_text(filename_parm['filename'])
except (AttributeError, KeyError):
pass
+
+ def get_encoded_filename(self, filename_parm):
+ """
+ Handle encoded filenames per RFC6266. See also:
+ http://tools.ietf.org/html/rfc2231#section-4
+ """
+ encoded_filename = force_text(filename_parm['filename*'])
+ try:
+ charset, lang, filename = encoded_filename.split('\'', 2)
+ filename = urlparse.unquote(filename)
+ except (ValueError, LookupError):
+ filename = force_text(filename_parm['filename'])
+ return filename
| diff --git a/tests/test_parsers.py b/tests/test_parsers.py
--- a/tests/test_parsers.py
+++ b/tests/test_parsers.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
from __future__ import unicode_literals
from rest_framework.compat import StringIO
from django import forms
@@ -113,3 +115,25 @@ def test_get_filename(self):
parser = FileUploadParser()
filename = parser.get_filename(self.stream, None, self.parser_context)
self.assertEqual(filename, 'file.txt')
+
+ def test_get_encoded_filename(self):
+ parser = FileUploadParser()
+
+ self.__replace_content_disposition('inline; filename*=utf-8\'\'ÀĥƦ.txt')
+ filename = parser.get_filename(self.stream, None, self.parser_context)
+ self.assertEqual(filename, 'ÀĥƦ.txt')
+
+ self.__replace_content_disposition('inline; filename=fallback.txt; filename*=utf-8\'\'ÀĥƦ.txt')
+ filename = parser.get_filename(self.stream, None, self.parser_context)
+ self.assertEqual(filename, 'ÀĥƦ.txt')
+
+ self.__replace_content_disposition('inline; filename=fallback.txt; filename*=utf-8\'en-us\'ÀĥƦ.txt')
+ filename = parser.get_filename(self.stream, None, self.parser_context)
+ self.assertEqual(filename, 'ÀĥƦ.txt')
+
+ self.__replace_content_disposition('inline; filename=fallback.txt; filename*=utf-8--ÀĥƦ.txt')
+ filename = parser.get_filename(self.stream, None, self.parser_context)
+ self.assertEqual(filename, 'fallback.txt')
+
+ def __replace_content_disposition(self, disposition):
+ self.parser_context['request'].META['HTTP_CONTENT_DISPOSITION'] = disposition
| Support encoded filename as per rfc6266 in FileUploadParser.
| 2014-09-05T22:41:27 |
|
encode/django-rest-framework | 1,841 | encode__django-rest-framework-1841 | [
"1713"
] | f4e02446f99cef42f18f57a2712c435a84451868 | diff --git a/rest_framework/utils/formatting.py b/rest_framework/utils/formatting.py
--- a/rest_framework/utils/formatting.py
+++ b/rest_framework/utils/formatting.py
@@ -2,11 +2,12 @@
Utility functions to return a formatted name and description for a given view.
"""
from __future__ import unicode_literals
+import re
from django.utils.html import escape
from django.utils.safestring import mark_safe
-from rest_framework.compat import apply_markdown
-import re
+
+from rest_framework.compat import apply_markdown, force_text
def remove_trailing_string(content, trailing):
@@ -28,6 +29,7 @@ def dedent(content):
as it fails to dedent multiline docstrings that include
unindented text on the initial line.
"""
+ content = force_text(content)
whitespace_counts = [len(line) - len(line.lstrip(' '))
for line in content.splitlines()[1:] if line.lstrip()]
| diff --git a/tests/test_description.py b/tests/test_description.py
--- a/tests/test_description.py
+++ b/tests/test_description.py
@@ -98,6 +98,30 @@ class MockView(APIView):
pass
self.assertEqual(MockView().get_view_description(), '')
+ def test_view_description_can_be_promise(self):
+ """
+ Ensure a view may have a docstring that is actually a lazily evaluated
+ class that can be converted to a string.
+
+ See: https://github.com/tomchristie/django-rest-framework/issues/1708
+ """
+ # use a mock object instead of gettext_lazy to ensure that we can't end
+ # up with a test case string in our l10n catalog
+ class MockLazyStr(object):
+ def __init__(self, string):
+ self.s = string
+
+ def __str__(self):
+ return self.s
+
+ def __unicode__(self):
+ return self.s
+
+ class MockView(APIView):
+ __doc__ = MockLazyStr("a gettext string")
+
+ self.assertEqual(MockView().get_view_description(), 'a gettext string')
+
def test_markdown(self):
"""
Ensure markdown to HTML works as expected.
| Ensure docstrings are coerced into a string, in order to support translated strings.
Closes #1708
| 2014-09-05T23:04:37 |
|
encode/django-rest-framework | 1,844 | encode__django-rest-framework-1844 | [
"1533",
"1533"
] | e8fac28d8848dce62a31879e07300842bd1755bd | diff --git a/rest_framework/fields.py b/rest_framework/fields.py
--- a/rest_framework/fields.py
+++ b/rest_framework/fields.py
@@ -563,7 +563,7 @@ def valid_value(self, value):
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
- if value == smart_text(k2):
+ if value == smart_text(k2) or value == k2:
return True
else:
if value == smart_text(k) or value == k:
| diff --git a/tests/test_validation.py b/tests/test_validation.py
--- a/tests/test_validation.py
+++ b/tests/test_validation.py
@@ -1,5 +1,6 @@
from __future__ import unicode_literals
from django.core.validators import MaxValueValidator
+from django.core.exceptions import ValidationError
from django.db import models
from django.test import TestCase
from rest_framework import generics, serializers, status
@@ -146,3 +147,42 @@ def test_max_value_validation_fail(self):
response = view(request, pk=obj.pk).render()
self.assertEqual(response.content, b'{"number_value": ["Ensure this value is less than or equal to 100."]}')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
+
+
+class TestChoiceFieldChoicesValidate(TestCase):
+ CHOICES = [
+ (0, 'Small'),
+ (1, 'Medium'),
+ (2, 'Large'),
+ ]
+
+ CHOICES_NESTED = [
+ ('Category', (
+ (1, 'First'),
+ (2, 'Second'),
+ (3, 'Third'),
+ )),
+ (4, 'Fourth'),
+ ]
+
+ def test_choices(self):
+ """
+ Make sure a value for choices works as expected.
+ """
+ f = serializers.ChoiceField(choices=self.CHOICES)
+ value = self.CHOICES[0][0]
+ try:
+ f.validate(value)
+ except ValidationError:
+ self.fail("Value %s does not validate" % str(value))
+
+ def test_nested_choices(self):
+ """
+ Make sure a nested value for choices works as expected.
+ """
+ f = serializers.ChoiceField(choices=self.CHOICES_NESTED)
+ value = self.CHOICES_NESTED[0][1][0][0]
+ try:
+ f.validate(value)
+ except ValidationError:
+ self.fail("Value %s does not validate" % str(value))
| Support grouped choices.
**Update from @tomchristie**
**Closing off some related tickets and rolling them into this one...**
- [ ] Include core support.
- [ ] Render in browsable API. - #1636
- [ ] Display as metadata in response to `OPTIONS` requests. - #3101
---
One of our models has a Choicefield with following choices:
```
TYPE_CHOICES = (
('Default', (
(1, 'Option 1'),
(2, 'Option 2'),
(3, 'Option 3'))),
(4, 'Option 4'))
```
Now in the serializer if i have something _self.field.from_native(3)_, we get a ValidationError. Upon inspection of the DRF code we found that logic for valid_value method in the Choicefield is incorrect if you have nested sets with integer keys.
```
def valid_value(self, value):
"""
Check to see if the provided value is a valid choice.
"""
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == smart_text(k2):
return True
else:
if value == smart_text(k) or value == k:
return True
return False
```
Line _if value == smart_text(k2):_ should really be: _if value == smart_text(k2) or value == k2:_
Support grouped choices.
**Update from @tomchristie**
**Closing off some related tickets and rolling them into this one...**
- [ ] Include core support.
- [ ] Render in browsable API. - #1636
- [ ] Display as metadata in response to `OPTIONS` requests. - #3101
---
One of our models has a Choicefield with following choices:
```
TYPE_CHOICES = (
('Default', (
(1, 'Option 1'),
(2, 'Option 2'),
(3, 'Option 3'))),
(4, 'Option 4'))
```
Now in the serializer if i have something _self.field.from_native(3)_, we get a ValidationError. Upon inspection of the DRF code we found that logic for valid_value method in the Choicefield is incorrect if you have nested sets with integer keys.
```
def valid_value(self, value):
"""
Check to see if the provided value is a valid choice.
"""
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == smart_text(k2):
return True
else:
if value == smart_text(k) or value == k:
return True
return False
```
Line _if value == smart_text(k2):_ should really be: _if value == smart_text(k2) or value == k2:_
| Yup, that seems valid to me.
DjangoCon sprint guidance - check out Django's docs on grouped choices. Demonstrate to yourself that the browsable API in REST framework does not currently support them. Make a pull request with the fix, including a screenshot of before and after. Ensure that `OPTIONS` requests continue to function identically before and after for endpoints that include choice fields.
I'll take a look at solving this at the DjangoCon Sprints today/tomorrow.
Ace.
Tom,
Thanks for the guidance on this. However I don't believe this issue affects the browsable API because integer values are always set as strings in the <option> tag. As a result of this, they are POSTed as strings, and appear as such in the request QueryDict. For this reason, the condition `value == smart_text(k2)` actually evaluates as `True` because smart_text() effectively typecasts `k2` as a string, matching the string sent by the browser and causing validation to succeed in this case.
However, using curl the issue is easily repeatable, e.g.: `curl -H "Content-Type: application/json" -d '{"this_is_an_integer_field": 1 }' http://127.0.0.1:8000/api/v1/widgets/` causes validation to fail unexpectedly as reported by @ameyc.
For this reason on the pull request I'll just include a targeted unit test that demonstrates the main failure case and succeeds with the change. Looking at the test suite structure I'm planning to add it to test_validation.py. Good for you?
Chris
Sounds okay.
Yup, that seems valid to me.
DjangoCon sprint guidance - check out Django's docs on grouped choices. Demonstrate to yourself that the browsable API in REST framework does not currently support them. Make a pull request with the fix, including a screenshot of before and after. Ensure that `OPTIONS` requests continue to function identically before and after for endpoints that include choice fields.
I'll take a look at solving this at the DjangoCon Sprints today/tomorrow.
Ace.
Tom,
Thanks for the guidance on this. However I don't believe this issue affects the browsable API because integer values are always set as strings in the <option> tag. As a result of this, they are POSTed as strings, and appear as such in the request QueryDict. For this reason, the condition `value == smart_text(k2)` actually evaluates as `True` because smart_text() effectively typecasts `k2` as a string, matching the string sent by the browser and causing validation to succeed in this case.
However, using curl the issue is easily repeatable, e.g.: `curl -H "Content-Type: application/json" -d '{"this_is_an_integer_field": 1 }' http://127.0.0.1:8000/api/v1/widgets/` causes validation to fail unexpectedly as reported by @ameyc.
For this reason on the pull request I'll just include a targeted unit test that demonstrates the main failure case and succeeds with the change. Looking at the test suite structure I'm planning to add it to test_validation.py. Good for you?
Chris
Sounds okay.
| 2014-09-06T21:21:25 |
encode/django-rest-framework | 1,963 | encode__django-rest-framework-1963 | [
"1907"
] | 0951523300c627cbc52c992427069aac562ab70a | diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -22,6 +22,7 @@
from django.forms import widgets
from django.utils import six
from django.utils.datastructures import SortedDict
+from django.utils.functional import cached_property
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.settings import api_settings
@@ -197,7 +198,6 @@ def __init__(self, instance=None, data=None, files=None,
self.init_data = data
self.init_files = files
self.object = instance
- self.fields = self.get_fields()
self._data = None
self._files = None
@@ -212,6 +212,10 @@ def __init__(self, instance=None, data=None, files=None,
#####
# Methods to determine which fields to use when (de)serializing objects.
+ @cached_property
+ def fields(self):
+ return self.get_fields()
+
def get_default_fields(self):
"""
Return the complete set of default fields for the object, as a dict.
| diff --git a/tests/test_relations.py b/tests/test_relations.py
--- a/tests/test_relations.py
+++ b/tests/test_relations.py
@@ -102,7 +102,7 @@ class ClassWithQuerysetMethod(object):
self.assertEqual(value, ['BlogPost object'])
# Regression for #1129
- def test_exception_for_incorect_fk(self):
+ def test_exception_for_incorrect_fk(self):
"""
Check that the exception message are correct if the source field
doesn't exist.
@@ -123,8 +123,9 @@ class Meta:
(serializers.ModelSerializer,),
attrs
)
+ serializer = TestSerializer(data={'name': 'foo'})
with self.assertRaises(AttributeError):
- TestSerializer(data={'name': 'foo'})
+ serializer.fields
@unittest.skipIf(get_version() < '1.6.0', 'Upstream behaviour changed in v1.6')
diff --git a/tests/test_serializer.py b/tests/test_serializer.py
--- a/tests/test_serializer.py
+++ b/tests/test_serializer.py
@@ -327,7 +327,9 @@ def test_invalid_read_only_fields(self):
"""
Regression test for #652.
"""
- self.assertRaises(AssertionError, PersonSerializerInvalidReadOnly, [])
+ serializer = PersonSerializerInvalidReadOnly()
+ with self.assertRaises(AssertionError):
+ serializer.fields
def test_serializer_data_is_cleared_on_save(self):
"""
| using serializer as field fails at import time (e.g. with unit tests) with django 1.7
I'm using DRF 2.4.2 and due to some custom methods / representations, I'm writing some unit tests for my serializers using `py.test` and `pytest-django`, which all worked very well.
I wanted to change a related field (just showed the ID) to the serialized representation so I used an existing serializer as a field. (as explained in the [docs](http://www.django-rest-framework.org/api-guide/relations#nested-relationships)
Running tests using py.test (with pytest-django) will blow up with the giant (sanitized) stack trace shown at the bottom of this post.
Consider the documentation example:
```
class TrackSerializer(serializers.ModelSerializer):
class Meta:
model = Track
fields = ('order', 'title')
class AlbumSerializer(serializers.ModelSerializer):
tracks = TrackSerializer(many=True)
class Meta:
model = Album
fields = ('album_name', 'artist', 'tracks')
```
and this key part of the stack trace:
```
env/lib/python2.7/site-packages/rest_framework/fields.py:146: in __init__
self.help_text = strip_multiple_choice_msg(smart_text(help_text))
env/lib/python2.7/site-packages/rest_framework/fields.py:116: in strip_multiple_choice_msg
multiple_choice_msg = force_text(multiple_choice_msg)
```
The short version based on my investigation:
- the instantiation of the `TrackSerializer` will happen when this is imported
- when the `TrackSerializer` is instantiated, it creates all the fields from the model and sets up help text for them (I assume for the html form).
- the help text is using django's force_text method which (check the rest of the trace) will use the "underscore" translation service
- this doesn't happen using `runserver`, which I assume is because something to do with the import timing of loading a WSGI app.
Where I'm at:
- I'm not sure if this works using django test runner
- I can work around it in my code, so not blocking
- I'm not sure what the action is, especially if it works on django test runner
- I was hoping that I could avoid initialization of the serializer by setting the value equal to the class instead of an instance, but no luck, that may be a conceptually simple/ok solution
- this may all get thrown out in a pending major release, since I know that serializers were a big focus of the upcoming development calendar
```
_______________________________________________ ERROR collecting tests/test_serializers.py _______________________________________________
my/app.py: in <module>
class MySerializer(serializers.ModelSerializer):
hass/endpoint/serializers.py:35: in EndpointSerializer
related = RelatedSerialzer()
env/lib/python2.7/site-packages/rest_framework/serializers.py:200: in __init__
self.fields = self.get_fields()
env/lib/python2.7/site-packages/rest_framework/serializers.py:236: in get_fields
default_fields = self.get_default_fields()
env/lib/python2.7/site-packages/rest_framework/serializers.py:691: in get_default_fields
serializer_pk_field = self.get_pk_field(pk_field)
env/lib/python2.7/site-packages/rest_framework/serializers.py:818: in get_pk_field
return self.get_field(model_field)
env/lib/python2.7/site-packages/rest_framework/serializers.py:925: in get_field
return serializer_field_class(**kwargs)
env/lib/python2.7/site-packages/rest_framework/fields.py:468: in __init__
super(CharField, self).__init__(*args, **kwargs)
env/lib/python2.7/site-packages/rest_framework/fields.py:272: in __init__
super(WritableField, self).__init__(source=source, label=label, help_text=help_text)
env/lib/python2.7/site-packages/rest_framework/fields.py:146: in __init__
self.help_text = strip_multiple_choice_msg(smart_text(help_text))
env/lib/python2.7/site-packages/rest_framework/fields.py:116: in strip_multiple_choice_msg
multiple_choice_msg = force_text(multiple_choice_msg)
env/lib/python2.7/site-packages/django/utils/encoding.py:85: in force_text
s = six.text_type(s)
env/lib/python2.7/site-packages/django/utils/functional.py:144: in __text_cast
return func(*self.__args, **self.__kw)
env/lib/python2.7/site-packages/django/utils/translation/__init__.py:83: in ugettext
return _trans.ugettext(message)
env/lib/python2.7/site-packages/django/utils/translation/trans_real.py:325: in ugettext
return do_translate(message, 'ugettext')
env/lib/python2.7/site-packages/django/utils/translation/trans_real.py:306: in do_translate
_default = translation(settings.LANGUAGE_CODE)
env/lib/python2.7/site-packages/django/utils/translation/trans_real.py:209: in translation
default_translation = _fetch(settings.LANGUAGE_CODE)
env/lib/python2.7/site-packages/django/utils/translation/trans_real.py:189: in _fetch
"The translation infrastructure cannot be initialized before the "
E AppRegistryNotReady: The translation infrastructure cannot be initialized before the apps registry is ready. Check that you don't make non-lazy gettext calls at import time.
```
| I have this same issue but it's happening when I'm starting my wsgi server instead.
Tests are running fine, the app is also running fine when ran with `runserver`.
@Geekfish Is your case also tracing back to the translation infrastructure, or are you seeing something that's related to `AppRegistryNotReady` some other ways?
edit: didn't realize I clarified that I was using `runserver` in the OP, editing in another clarification about the unit test framework I was using
@tomchristie
The stack trace is very similar:
```
Traceback (most recent call last):
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/django/core/handlers/wsgi.py", line 187, in __call__
response = self.get_response(request)
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/django/core/handlers/base.py", line 199, in get_response
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/django/core/handlers/base.py", line 239, in handle_uncaught_exception
if resolver.urlconf_module is None:
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/django/core/urlresolvers.py", line 361, in urlconf_module
self._urlconf_module = import_module(self.urlconf_name)
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/var/www/tt/api/builds/my_project/staging/urls.py", line 6, in <module>
from my_project.data.legacy_views import (
File "/var/www/tt/api/builds/my_project/staging/my_project/data/legacy_views.py", line 12, in <module>
from my_project.data.serializers import SomeObjectSerializer
File "/var/www/tt/api/builds/my_project/staging/my_project/data/serializers.py", line 10, in <module>
class SomeOtherObject(BaseSerializer):
File "/var/www/tt/api/builds/my_project/staging/my_project/data/serializers.py", line 12, in SomeOtherObject
identifier = fields.CharField(help_text='Unique string identifier.')
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/rest_framework/fields.py", line 470, in __init__
super(CharField, self).__init__(*args, **kwargs)
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/rest_framework/fields.py", line 275, in __init__
super(WritableField, self).__init__(source=source, label=label, help_text=help_text)
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/rest_framework/fields.py", line 142, in __init__
self.help_text = strip_multiple_choice_msg(smart_text(help_text))
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/rest_framework/fields.py", line 112, in strip_multiple_choice_msg
multiple_choice_msg = force_text(multiple_choice_msg)
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/django/utils/encoding.py", line 85, in force_text
s = six.text_type(s)
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/django/utils/functional.py", line 144, in __text_cast
return func(*self.__args, **self.__kw)
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/django/utils/translation/__init__.py", line 83, in ugettext
return _trans.ugettext(message)
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/django/utils/translation/trans_real.py", line 325, in ugettext
return do_translate(message, 'ugettext')
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/django/utils/translation/trans_real.py", line 306, in do_translate
_default = translation(settings.LANGUAGE_CODE)
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/django/utils/translation/trans_real.py", line 209, in translation
default_translation = _fetch(settings.LANGUAGE_CODE)
File "/var/www/tt/api/virtualenvs/my_project/staging/local/lib/python2.7/site-packages/django/utils/translation/trans_real.py", line 189, in _fetch
"The translation infrastructure cannot be initialized before the "
django.core.exceptions.AppRegistryNotReady: The translation infrastructure cannot be initialized before the apps registry is ready. Check that you don't make non-lazy gettext calls at import time.
```
Actually I should clarify, the error doesn't happen on startup, it happens when receiving a request.
Are there any ideas on how to proceed with this one? I think I understand why this is happening, but I'm not sure of what's the best way to fix.
Would checking for the django version before deciding whether to apply the fix in `strip_multiple_choice_msg` work?
Thanks for taking on this issue, I appreciate it :)
@Geekfish Does the linked commit fix this for you? I'm curious because (although I think it's a good change nonetheless) it doesn't fix the underlying issue for me - I think this goes deeper.
When I comment out the call to `strip_multiple_choice_msg`, my traceback just changes to this:
```
coachhub/tests/converse/test_viewmodels.py:3: in <module>
from coachhub.converse.viewmodels import ViewThread
coachhub/converse/viewmodels.py:3: in <module>
from coachhub.api.serializers import MessageSerializer
coachhub/api/serializers.py:108: in <module>
class CoachProfileSerializer(Serializer):
coachhub/api/serializers.py:109: in CoachProfileSerializer
user = UserSerializer(required=True)
../../../.venvs/coachhub/lib/python2.7/site-packages/rest_framework/serializers.py:200: in __init__
self.fields = self.get_fields()
../../../.venvs/coachhub/lib/python2.7/site-packages/rest_framework/serializers.py:236: in get_fields
default_fields = self.get_default_fields()
../../../.venvs/coachhub/lib/python2.7/site-packages/rest_framework/serializers.py:1095: in get_default_fields
fields = super(HyperlinkedModelSerializer, self).get_default_fields()
../../../.venvs/coachhub/lib/python2.7/site-packages/rest_framework/serializers.py:749: in get_default_fields
reverse_rels = opts.get_all_related_objects()
../../../.venvs/coachhub/lib/python2.7/site-packages/django/db/models/options.py:498: in get_all_related_objects
include_proxy_eq=include_proxy_eq)]
../../../.venvs/coachhub/lib/python2.7/site-packages/django/db/models/options.py:510: in get_all_related_objects_with_model
self._fill_related_objects_cache()
../../../.venvs/coachhub/lib/python2.7/site-packages/django/db/models/options.py:533: in _fill_related_objects_cache
for klass in self.apps.get_models(include_auto_created=True):
../../../.venvs/coachhub/lib/python2.7/site-packages/django/utils/lru_cache.py:101: in wrapper
result = user_function(*args, **kwds)
../../../.venvs/coachhub/lib/python2.7/site-packages/django/apps/registry.py:168: in get_models
self.check_models_ready()
../../../.venvs/coachhub/lib/python2.7/site-packages/django/apps/registry.py:131: in check_models_ready
raise AppRegistryNotReady("Models aren't loaded yet.")
E AppRegistryNotReady: Models aren't loaded yet.
```
Rather than hitting the problem when it tries to load translations, it just hits a bit later when it tries to load all related objects for a model. (This is with a `ModelSerializer`).
Basically the issue is that a `ModelSerializer` does a lot of model introspection when it's instantiated, introspection that really shouldn't be done at import time when not all models are loaded yet, and that simply isn't allowed at import time in Django 1.7. I think fixing this may require non-trivial rethinking of how serializers-as-fields work. Maybe something to be considered in the 3.0 serializer rewrite?
(Note: you can work around this by just being careful about where you import your serializers module; you end up doing a lot of imports-within-functions instead of imports at module level.)
:-|
@carljm I've since run into this in a second project, and importing inside functions is my current work-around.
The cost comes with more friction in adding particular types of tests, but at least I haven't personally seen this new project blow up on requests despite using a serializer as a field (using 2.4.3).
In general it doesn't cause a problem on actual requests, as long as you don't import your serializers from a `models.py` or anything imported from a `models.py` (in other words, a place that Django actively imports for you at setup time.) Usually that's not the case - your serializers are imported from views or somewhere else that only gets imported on-demand.
| 2014-10-16T18:19:35 |
encode/django-rest-framework | 2,077 | encode__django-rest-framework-2077 | [
"2013"
] | 4e035184384db8ed1227fdcb1dad2ea6ddb1cf68 | diff --git a/rest_framework/serializers.py b/rest_framework/serializers.py
--- a/rest_framework/serializers.py
+++ b/rest_framework/serializers.py
@@ -34,6 +34,7 @@
)
import copy
import inspect
+import sys
import warnings
# Note: We do the following so that users of the framework can use this style:
@@ -593,7 +594,18 @@ def create(self, validated_attrs):
if relation_info.to_many and (field_name in validated_attrs):
many_to_many[field_name] = validated_attrs.pop(field_name)
- instance = ModelClass.objects.create(**validated_attrs)
+ try:
+ instance = ModelClass.objects.create(**validated_attrs)
+ except TypeError as exc:
+ msg = (
+ 'The mentioned argument might be a field on the serializer '
+ 'that is not part of the model. You need to override the '
+ 'create() method in your ModelSerializer subclass to support '
+ 'this.')
+ six.reraise(
+ type(exc),
+ type(exc)(str(exc) + '. ' + msg),
+ sys.exc_info()[2])
# Save many-to-many relationships after the instance is created.
if many_to_many:
| diff --git a/tests/test_model_serializer.py b/tests/test_model_serializer.py
--- a/tests/test_model_serializer.py
+++ b/tests/test_model_serializer.py
@@ -10,6 +10,7 @@
from django.db import models
from django.test import TestCase
from rest_framework import serializers
+import pytest
def dedent(blocktext):
@@ -26,6 +27,10 @@ class CustomField(models.Field):
pass
+class OneFieldModel(models.Model):
+ char_field = models.CharField(max_length=100)
+
+
class RegularFieldsModel(models.Model):
"""
A model class for testing regular flat fields.
@@ -68,6 +73,29 @@ class FieldOptionsModel(models.Model):
choices_field = models.CharField(max_length=100, choices=COLOR_CHOICES)
+class TestModelSerializer(TestCase):
+ def test_create_method(self):
+ class TestSerializer(serializers.ModelSerializer):
+ non_model_field = serializers.CharField()
+
+ class Meta:
+ model = OneFieldModel
+ fields = ('char_field', 'non_model_field')
+
+ serializer = TestSerializer(data={
+ 'char_field': 'foo',
+ 'non_model_field': 'bar',
+ })
+ serializer.is_valid()
+ with pytest.raises(TypeError):
+ serializer.save()
+
+ try:
+ serializer.save()
+ except TypeError as exc:
+ assert 'ModelSerializer' in str(exc)
+
+
class TestRegularFieldMappings(TestCase):
def test_regular_fields(self):
"""
| More helpful error message when default `.create` fails.
If the default `.create` method on `ModelSerializer` fails with `TypeError` then it'll typically be because it's passed an incorrect parameter to the `MyModel.objects.create()` method. Eg.
```
TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
```
We should highlight this type of issue to users, and make it more obvious & easy to resolve.
| Example case where this can happen... user creates a non-model field on the `ModelSerializer` class.
| 2014-11-15T14:30:36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.