repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
sequencelengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
OpenMined/PySyft
5,345
OpenMined__PySyft-5345
[ "5341" ]
9a0ff1a3e9da7830b57419ea0955a6c6417a096b
diff --git a/src/syft/lib/python/__init__.py b/src/syft/lib/python/__init__.py --- a/src/syft/lib/python/__init__.py +++ b/src/syft/lib/python/__init__.py @@ -277,12 +277,12 @@ def create_python_ast(client: Optional[AbstractNodeClient] = None) -> Globals: ("syft.lib.python.Dict.fromkeys", "syft.lib.python.Dict"), # Rename get to dict_get because of conflict ("syft.lib.python.Dict.dict_get", "syft.lib.python.Any"), - ("syft.lib.python.Dict.items", "syft.lib.python.List"), - ("syft.lib.python.Dict.keys", "syft.lib.python.List"), + ("syft.lib.python.Dict.items", "syft.lib.python.Iterator"), + ("syft.lib.python.Dict.keys", "syft.lib.python.Iterator"), ("syft.lib.python.Dict.pop", "syft.lib.python.Any"), ("syft.lib.python.Dict.popitem", "syft.lib.python.Tuple"), ("syft.lib.python.Dict.setdefault", "syft.lib.python.Any"), - ("syft.lib.python.Dict.values", "syft.lib.python.List"), + ("syft.lib.python.Dict.values", "syft.lib.python.Iterator"), # Int methods - subject to further change ("syft.lib.python.Int.__add__", "syft.lib.python.Int"), ("syft.lib.python.Int.__truediv__", "syft.lib.python.Float"), @@ -446,6 +446,7 @@ def create_python_ast(client: Optional[AbstractNodeClient] = None) -> Globals: "syft.lib.python.collections.OrderedDict.__le__", "syft.lib.python.Bool", ), + ("syft.lib.python.collections.OrderedDict.__iter__", "syft.lib.python.Any"), ("syft.lib.python.collections.OrderedDict.__len__", "syft.lib.python.Int"), ( "syft.lib.python.collections.OrderedDict.__lt__", @@ -471,8 +472,8 @@ def create_python_ast(client: Optional[AbstractNodeClient] = None) -> Globals: "syft.lib.python.collections.OrderedDict.fromkeys", "syft.lib.python.collections.OrderedDict", ), - ("syft.lib.python.collections.OrderedDict.items", "syft.lib.python.List"), - ("syft.lib.python.collections.OrderedDict.keys", "syft.lib.python.List"), + ("syft.lib.python.collections.OrderedDict.items", "syft.lib.python.Iterator"), + ("syft.lib.python.collections.OrderedDict.keys", "syft.lib.python.Iterator"), ( "syft.lib.python.collections.OrderedDict.move_to_end", "syft.lib.python._SyNone", @@ -489,7 +490,7 @@ def create_python_ast(client: Optional[AbstractNodeClient] = None) -> Globals: ), ( "syft.lib.python.collections.OrderedDict.values", - "syft.lib.python.List", + "syft.lib.python.Iterator", ), ("syft.lib.python.collections.OrderedDict.items", "syft.lib.python.List"), ( diff --git a/src/syft/lib/python/collections/ordered_dict.py b/src/syft/lib/python/collections/ordered_dict.py --- a/src/syft/lib/python/collections/ordered_dict.py +++ b/src/syft/lib/python/collections/ordered_dict.py @@ -1,5 +1,8 @@ # stdlib from collections import OrderedDict as PyOrderedDict +from collections.abc import ItemsView +from collections.abc import KeysView +from collections.abc import ValuesView from typing import Any from typing import Optional @@ -11,9 +14,11 @@ from .... import serialize from ....core.common.serde.serializable import bind_protobuf from ....core.common.uid import UID +from ....logger import traceback_and_raise from ....proto.lib.python.collections.ordered_dict_pb2 import ( OrderedDict as OrderedDict_PB, ) +from ..iterator import Iterator from ..primitive_factory import PrimitiveFactory from ..primitive_factory import isprimitive from ..primitive_interface import PyPrimitive @@ -59,6 +64,9 @@ def __getitem__(self, other: Any) -> SyPrimitiveRet: # we can have torch.Tensor and other types return res + def __iter__(self, max_len: Optional[int] = None) -> Iterator: + return Iterator(super().__iter__(), max_len=max_len) + def __len__(self) -> SyPrimitiveRet: res = super().__len__() return PrimitiveFactory.generate_primitive(value=res) @@ -102,13 +110,11 @@ def dict_get(self, other: Any) -> Any: # we can have torch.Tensor and other types return res - def items(self) -> SyPrimitiveRet: - res = list(super().items()) - return PrimitiveFactory.generate_primitive(value=res) + def items(self, max_len: Optional[int] = None) -> Iterator: # type: ignore + return Iterator(ItemsView(self), max_len=max_len) - def keys(self) -> SyPrimitiveRet: - res = list(super().keys()) - return PrimitiveFactory.generate_primitive(value=res) + def keys(self, max_len: Optional[int] = None) -> Iterator: # type: ignore + return Iterator(KeysView(self), max_len=max_len) def move_to_end(self, other: Any, last: Any = True) -> Any: res = super().move_to_end(other, last) @@ -130,9 +136,15 @@ def update(self, *args, **kwds: Any) -> SyPrimitiveRet: # type: ignore res = super().update(*args, **kwds) return PrimitiveFactory.generate_primitive(value=res) - def values(self) -> SyPrimitiveRet: - res = list(super().values()) - return PrimitiveFactory.generate_primitive(value=res) + def values(self, *args: Any, max_len: Optional[int] = None) -> Iterator: # type: ignore + # this is what the super type does and there is a test in dict_test.py + # test_values which checks for this so we could disable the test or + # keep this workaround + if len(args) > 0: + traceback_and_raise( + TypeError("values() takes 1 positional argument but 2 were given") + ) + return Iterator(ValuesView(self), max_len=max_len) def _object2proto(self) -> OrderedDict_PB: id_ = serialize(obj=self.id)
diff --git a/tests/syft/lib/python/collections/ordered_dict/ordered_dict_sanity_test.py b/tests/syft/lib/python/collections/ordered_dict/ordered_dict_sanity_test.py --- a/tests/syft/lib/python/collections/ordered_dict/ordered_dict_sanity_test.py +++ b/tests/syft/lib/python/collections/ordered_dict/ordered_dict_sanity_test.py @@ -21,6 +21,7 @@ # syft absolute from syft.core.common.uid import UID +from syft.lib.python import SyNone from syft.lib.python.collections import OrderedDict as SyOrderedDict @@ -209,9 +210,9 @@ def test_iterators(): assertEqual(list(od.values()), [t[1] for t in pairs]) assertEqual(list(od.items()), pairs) assertEqual(list(reversed(od)), [t[0] for t in reversed(pairs)]) - assertEqual(list(reversed(od.keys())), [t[0] for t in reversed(pairs)]) - assertEqual(list(reversed(od.values())), [t[1] for t in reversed(pairs)]) - assertEqual(list(reversed(od.items())), list(reversed(pairs))) + assertEqual(list(reversed(list(od.keys()))), [t[0] for t in reversed(pairs)]) + assertEqual(list(reversed(list(od.values()))), [t[1] for t in reversed(pairs)]) + assertEqual(list(reversed(list(od.items()))), list(reversed(pairs))) def test_detect_deletion_during_iteration(): @@ -248,9 +249,9 @@ def test_iterators_empty(): assertEqual(list(od.values()), empty) assertEqual(list(od.items()), empty) assertEqual(list(reversed(od)), empty) - assertEqual(list(reversed(od.keys())), empty) - assertEqual(list(reversed(od.values())), empty) - assertEqual(list(reversed(od.items())), empty) + assertEqual(list(reversed(list(od.keys()))), empty) + assertEqual(list(reversed(list(od.values()))), empty) + assertEqual(list(reversed(list(od.items()))), empty) def test_popitem(): @@ -406,7 +407,8 @@ def test_repr_recursive(): od = OrderedDict.FromKeys("abc") od["x"] = od assertEqual( - repr(od), "OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])" + repr(od), + f"OrderedDict([('a', {repr(SyNone)}), ('b', {repr(SyNone)}), ('c', {repr(SyNone)}), ('x', ...)])", ) @@ -508,8 +510,8 @@ def test_views(): # See http://bugs.python.org/issue24286 s = "the quick brown fox jumped over a lazy dog yesterday before dawn".split() od = OrderedDict.FromKeys(s) - assertEqual(od.keys(), list(dict(od).keys())) - assertEqual(od.items(), list(dict(od).items())) + assertEqual(list(od.keys()), list(dict(od).keys())) + assertEqual(list(od.items()), list(dict(od).items())) def test_override_update(): @@ -612,11 +614,11 @@ def __hash__(self): od[key] = i # These should not crash. - with pytest.raises(KeyError): + with pytest.raises(RuntimeError): list(od.values()) - with pytest.raises(KeyError): + with pytest.raises(RuntimeError): list(od.items()) - with pytest.raises(KeyError): + with pytest.raises(RuntimeError): repr(od) with pytest.raises(KeyError): od.copy() @@ -675,7 +677,7 @@ def test_dict_delitem(): od["spam"] = 1 od["ham"] = 2 dict.__delitem__(od, "spam") - with pytest.raises(KeyError): + with pytest.raises(RuntimeError): repr(od) @@ -694,7 +696,7 @@ def test_dict_pop(): od["spam"] = 1 od["ham"] = 2 dict.pop(od, "spam") - with pytest.raises(KeyError): + with pytest.raises(RuntimeError): repr(od) @@ -704,7 +706,7 @@ def test_dict_popitem(): od["spam"] = 1 od["ham"] = 2 dict.popitem(od) - with pytest.raises(KeyError): + with pytest.raises(RuntimeError): repr(od) diff --git a/tests/syft/lib/python/collections/ordered_dict/ordered_dict_serde_test.py b/tests/syft/lib/python/collections/ordered_dict/ordered_dict_serde_test.py --- a/tests/syft/lib/python/collections/ordered_dict/ordered_dict_serde_test.py +++ b/tests/syft/lib/python/collections/ordered_dict/ordered_dict_serde_test.py @@ -2,6 +2,7 @@ from collections import OrderedDict as PyOrderectDict # third party +import pytest import torch as th # syft absolute @@ -69,3 +70,19 @@ def test_list_send() -> None: res = ptr.get() for res_el, original_el in zip(res, syft_list): assert res_el == original_el + + [email protected]("method_name", ["items", "keys", "values"]) +def test_iterator_methods(method_name: str) -> None: + alice = sy.VirtualMachine(name="alice") + alice_client = alice.get_root_client() + + d = OrderedDict({"#1": 1, "#2": 2}) + dptr = d.send(alice_client) + + itemsptr = getattr(dptr, method_name)() + assert type(itemsptr).__name__ == "IteratorPointer" + + for itemptr, local_item in zip(itemsptr, getattr(d, method_name)()): + get_item = itemptr.get() + assert get_item == local_item diff --git a/tests/syft/lib/python/collections/ordered_dict/pointer_test.py b/tests/syft/lib/python/collections/ordered_dict/pointer_test.py --- a/tests/syft/lib/python/collections/ordered_dict/pointer_test.py +++ b/tests/syft/lib/python/collections/ordered_dict/pointer_test.py @@ -111,6 +111,10 @@ def test_pointer_objectives(test_objects, func): if func in ["items", "values", "keys"]: py_res = list(py_res) + sy_res = list(sy_res) assert py_res == sy_res - assert sy_res == remote_sy_res + + # TODO: support `.get` for IteratorPointer objects + if func not in ("items", "keys", "values"): + assert sy_res == remote_sy_res diff --git a/tests/syft/lib/python/dict/dict_serde_test.py b/tests/syft/lib/python/dict/dict_serde_test.py --- a/tests/syft/lib/python/dict/dict_serde_test.py +++ b/tests/syft/lib/python/dict/dict_serde_test.py @@ -2,6 +2,7 @@ from collections import UserDict # third party +import pytest import torch as th # syft absolute @@ -75,3 +76,19 @@ def test_list_send() -> None: res = ptr.get() for res_el, original_el in zip(res, syft_list): assert res_el == original_el + + [email protected]("method_name", ["items", "keys", "values"]) +def test_iterator_methods(method_name: str) -> None: + alice = sy.VirtualMachine(name="alice") + alice_client = alice.get_root_client() + + d = Dict({"#1": 1, "#2": 2}) + dptr = d.send(alice_client) + + itemsptr = getattr(dptr, method_name)() + assert type(itemsptr).__name__ == "IteratorPointer" + + for itemptr, local_item in zip(itemsptr, getattr(d, method_name)()): + get_item = itemptr.get() + assert get_item == local_item
DictPointer.items() can't be iterated ## Description I got error when trying to iterate on `DictPointer.items()`, (while I can iterate on `ListPointer`). ## How to Reproduce ```python # on DO side duet.requests.add_handler(action="accept") # on DS side d = sy.lib.python.Dict({"1":1,"2":2}) dptr = d.send(duet) itemsptr = dptr.items() # you get a ListPointer itemsptr.__len__() # got error for e in itemsptr: # got error too print(e) ``` ## Expected Behavior A clear and concise description of what you expected to happen. ## Screenshots If applicable, add screenshots to help explain your problem. ## System Information - OS: [e.g. iOS] - OS Version: [e.g. 22] - Language Version: [e.g. Python 3.7, Node 10.18.1] - Package Manager Version: [e.g. Conda 4.6.1, NPM 6.14.1] - Browser (if applicable): [e.g. Google Chrome] - Browser Version (if applicable): [e.g. 81.0.4044.138] ## Additional Context Add any other context about the problem here.
@tudorcebere Did we end up supporting Dicts in the `Iterator`? Seems like you can get `.len()` on the `dptr` but not on the `itemsptr` interestingly.
2021-03-23T08:36:23
OpenMined/PySyft
5,383
OpenMined__PySyft-5383
[ "5368" ]
6bb8d8d00276064c32fa8fb3161791f380f8ca1f
diff --git a/src/syft/lib/torchvision/allowlist.py b/src/syft/lib/torchvision/allowlist.py --- a/src/syft/lib/torchvision/allowlist.py +++ b/src/syft/lib/torchvision/allowlist.py @@ -2,10 +2,8 @@ from typing import Dict from typing import Union -# TODO: Refactor out all the test data. -# Issue: https://github.com/OpenMined/PySyft/issues/5325 - -allowlist: Dict[str, Union[str, Dict[str, str]]] = {} # (path: str, return_type:type) +# (path: str, return_type:type) +allowlist: Dict[str, Union[str, Dict[str, str]]] = {} allowlist["torchvision.__version__"] = "syft.lib.python.String" # MNIST @@ -23,288 +21,158 @@ # Datasets -allowlist["torchvision.datasets.MNIST"] = { - "return_type": "torchvision.datasets.MNIST", - "test_parameters": "('../data', download=False,)", -} - +allowlist["torchvision.datasets.MNIST"] = "torchvision.datasets.MNIST" allowlist["torchvision.datasets.MNIST.__len__"] = "syft.lib.python.Int" - -allowlist["torchvision.datasets.CelebA"] = { - "return_type": "torchvision.datasets.CelebA", - "test_parameters": "('../data')", -} +allowlist["torchvision.datasets.CelebA"] = "torchvision.datasets.CelebA" allowlist["torchvision.datasets.CelebA.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.CIFAR10"] = { - "return_type": "torchvision.datasets.CIFAR10", - "test_parameters": "('../data',)", -} +allowlist["torchvision.datasets.CIFAR10"] = "torchvision.datasets.CIFAR10" allowlist["torchvision.datasets.CIFAR10.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.CIFAR100"] = { - "return_type": "torchvision.datasets.CIFAR100", - "test_parameters": "('../data',)", -} +allowlist["torchvision.datasets.CIFAR100"] = "torchvision.datasets.CIFAR100" allowlist["torchvision.datasets.CIFAR10.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.Cityscapes"] = { - "return_type": "torchvision.datasets.Cityscapes", - "test_parameters": "('../data',)", -} +allowlist["torchvision.datasets.Cityscapes"] = "torchvision.datasets.Cityscapes" allowlist["torchvision.datasets.Cityscapes.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.CocoCaptions"] = { - "return_type": "torchvision.datasets.CocoCaptions", - "test_parameters": "('../data','../data/captions.txt')", -} +allowlist["torchvision.datasets.CocoCaptions"] = "torchvision.datasets.CocoCaptions" allowlist["torchvision.datasets.CocoCaptions.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.CocoDetection"] = { - "return_type": "torchvision.datasets.CocoDetection", - "test_parameters": "('../data', '../data/captions.txt')", -} +allowlist["torchvision.datasets.CocoDetection"] = "torchvision.datasets.CocoDetection" allowlist["torchvision.datasets.CocoDetection.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.EMNIST"] = { - "return_type": "torchvision.datasets.EMNIST", - "test_parameters": "('../data',split = \"mnist\")", -} +allowlist["torchvision.datasets.EMNIST"] = "torchvision.datasets.EMNIST" allowlist["torchvision.datasets.EMNIST.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.FakeData"] = { - "return_type": "torchvision.datasets.FakeData", - "test_parameters": "('../data', )", -} +allowlist["torchvision.datasets.FakeData"] = "torchvision.datasets.FakeData" allowlist["torchvision.datasets.FakeData.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.FashionMNIST"] = { - "return_type": "torchvision.datasets.FashionMNIST", - "test_parameters": "('../data',)", -} +allowlist["torchvision.datasets.FashionMNIST"] = "torchvision.datasets.FashionMNIST" allowlist["torchvision.datasets.FashionMNIST.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.Flickr8k"] = { - "return_type": "torchvision.datasets.Flickr8k", - "test_parameters": "('../data', '../data/annfile.txt')", -} +allowlist["torchvision.datasets.Flickr8k"] = "torchvision.datasets.Flickr8k" allowlist["torchvision.datasets.Flickr8k.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.Flickr30k"] = { - "return_type": "torchvision.datasets.Flickr30k", - "test_parameters": "('../data', '../data/annfile.txt')", -} +allowlist["torchvision.datasets.Flickr30k"] = "torchvision.datasets.Flickr30k" allowlist["torchvision.datasets.Flickr30k.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.HMDB51"] = { - "return_type": "torchvision.datasets.HMDB51", - "test_parameters": "('../data', '../data/annfile.txt', 20, )", -} +allowlist["torchvision.datasets.HMDB51"] = "torchvision.datasets.HMDB51" allowlist["torchvision.datasets.HMDB51.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.ImageNet"] = { - "return_type": "torchvision.datasets.ImageNet", - "test_parameters": "('../data',)", -} +allowlist["torchvision.datasets.ImageNet"] = "torchvision.datasets.ImageNet" allowlist["torchvision.datasets.ImageNet.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.Kinetics400"] = { - "return_type": "torchvision.datasets.Kinetics400", - "test_parameters": "('../data', 20)", -} +allowlist["torchvision.datasets.Kinetics400"] = "torchvision.datasets.Kinetics400" allowlist["torchvision.datasets.Kinetics400.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.KMNIST"] = { - "return_type": "torchvision.datasets.KMNIST", - "test_parameters": "('../data', )", -} +allowlist["torchvision.datasets.KMNIST"] = "torchvision.datasets.KMNIST" allowlist["torchvision.datasets.KMNIST.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.LSUN"] = { - "return_type": "torchvision.datasets.LSUN", - "test_parameters": "('../data', )", -} +allowlist["torchvision.datasets.LSUN"] = "torchvision.datasets.LSUN" allowlist["torchvision.datasets.LSUN.__len__"] = "syft.lib.python.Int" allowlist["torchvision.datasets.Omniglot"] = { "return_type": "torchvision.datasets.Omniglot", "min_version": "0.8.0", - "test_parameters": "('../data', )", } allowlist["torchvision.datasets.Omniglot.__len__"] = { "return_type": "syft.lib.python.Int", "min_version": "0.8.0", } -allowlist["torchvision.datasets.PhotoTour"] = { - "return_type": "torchvision.datasets.PhotoTour", - "test_parameters": "('../data', name = 'data')", -} +allowlist["torchvision.datasets.PhotoTour"] = "torchvision.datasets.PhotoTour" allowlist["torchvision.datasets.PhotoTour.__len__"] = "syft.lib.python.Int" allowlist["torchvision.datasets.Places365"] = { "return_type": "torchvision.datasets.Places365", "min_version": "0.8.0", - "test_parameters": "('../data',)", } allowlist["torchvision.datasets.Places365.__len__"] = { "return_type": "syft.lib.python.Int", "min_version": "0.8.0", } -allowlist["torchvision.datasets.QMNIST"] = { - "return_type": "torchvision.datasets.QMNIST", - "test_parameters": "('../data',)", -} +allowlist["torchvision.datasets.QMNIST"] = "torchvision.datasets.QMNIST" allowlist["torchvision.datasets.QMNIST.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.SBDataset"] = { - "return_type": "torchvision.datasets.SBDataset", - "test_parameters": "('../data',)", -} +allowlist["torchvision.datasets.SBDataset"] = "torchvision.datasets.SBDataset" allowlist["torchvision.datasets.SBDataset.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.SBU"] = { - "return_type": "torchvision.datasets.SBU", - "test_parameters": "('../data', download = False)", -} +allowlist["torchvision.datasets.SBU"] = "torchvision.datasets.SBU" allowlist["torchvision.datasets.SBU.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.STL10"] = { - "return_type": "torchvision.datasets.STL10", - "test_parameters": "('../data',)", -} +allowlist["torchvision.datasets.STL10"] = "torchvision.datasets.STL10" allowlist["torchvision.datasets.STL10.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.SVHN"] = { - "return_type": "torchvision.datasets.SVHN", - "test_parameters": "('../data',)", -} +allowlist["torchvision.datasets.SVHN"] = "torchvision.datasets.SVHN" allowlist["torchvision.datasets.SVHN.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.UCF101"] = { - "return_type": "torchvision.datasets.UCF101", - "test_parameters": "('../data', frames_per_clip = 20, annotation_path = '../data/annfile.txt')", -} +allowlist["torchvision.datasets.UCF101"] = "torchvision.datasets.UCF101" allowlist["torchvision.datasets.UCF101.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.USPS"] = { - "return_type": "torchvision.datasets.USPS", - "test_parameters": "('../data',)", -} +allowlist["torchvision.datasets.USPS"] = "torchvision.datasets.USPS" allowlist["torchvision.datasets.USPS.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.VOCSegmentation"] = { - "return_type": "torchvision.datasets.VOCSegmentation", - "test_parameters": "('../data',)", -} +allowlist[ + "torchvision.datasets.VOCSegmentation" +] = "torchvision.datasets.VOCSegmentation" allowlist["torchvision.datasets.VOCSegmentation.__len__"] = "syft.lib.python.Int" -allowlist["torchvision.datasets.VOCDetection"] = { - "return_type": "torchvision.datasets.VOCDetection", - "test_parameters": "('../data',)", -} +allowlist["torchvision.datasets.VOCDetection"] = "torchvision.datasets.VOCDetection" allowlist[ "torchvision.datasets.VOCDetection.__len__" ] = "torchvision.datasets.VOCDetection" # Transforms -allowlist["torchvision.transforms.CenterCrop"] = { - "return_type": "torchvision.transforms.CenterCrop", - "test_parameters": "(10)", -} -allowlist["torchvision.transforms.ColorJitter"] = { - "return_type": "torchvision.transforms.ColorJitter", - "test_parameters": "(brightness=0, contrast=0, saturation=0, hue=0)", -} +allowlist["torchvision.transforms.CenterCrop"] = "torchvision.transforms.CenterCrop" +allowlist["torchvision.transforms.ColorJitter"] = "torchvision.transforms.ColorJitter" -# This is an interesting case, for some versions p = 0.2 is needed, for others its not needed -allowlist["torchvision.transforms.FiveCrop"] = { - "return_type": "torchvision.transforms.FiveCrop", - # "test_parameters": "(size = 10, p = 0.2)", -} +allowlist["torchvision.transforms.FiveCrop"] = "torchvision.transforms.FiveCrop" -allowlist["torchvision.transforms.Grayscale"] = { - "return_type": "torchvision.transforms.Grayscale", - "test_parameters": "(num_output_channels=1)", -} -allowlist["torchvision.transforms.Pad"] = { - "return_type": "torchvision.transforms.Pad", - "test_parameters": "(2, fill=0, padding_mode='constant')", -} -allowlist["torchvision.transforms.RandomAffine"] = { - "return_type": "torchvision.transforms.RandomAffine", - "test_parameters": "(degrees = 2)", -} +allowlist["torchvision.transforms.Grayscale"] = "torchvision.transforms.Grayscale" +allowlist["torchvision.transforms.Pad"] = "torchvision.transforms.Pad" +allowlist["torchvision.transforms.RandomAffine"] = "torchvision.transforms.RandomAffine" -# transforms error -allowlist["torchvision.transforms.RandomApply"] = { - "return_type": "torchvision.transforms.RandomApply", - # "test_parameters": "(torchvision.transforms.CenterCrop(10))", -} +allowlist["torchvision.transforms.RandomApply"] = "torchvision.transforms.RandomApply" -allowlist["torchvision.transforms.RandomCrop"] = { - "return_type": "torchvision.transforms.RandomCrop", - "test_parameters": "(size = 10)", -} -allowlist["torchvision.transforms.RandomGrayscale"] = { - "return_type": "torchvision.transforms.RandomGrayscale", - "test_parameters": "(p=0.1)", -} +allowlist["torchvision.transforms.RandomCrop"] = "torchvision.transforms.RandomCrop" +allowlist[ + "torchvision.transforms.RandomGrayscale" +] = "torchvision.transforms.RandomGrayscale" -allowlist["torchvision.transforms.RandomHorizontalFlip"] = { - "return_type": "torchvision.transforms.RandomHorizontalFlip", - "test_parameters": "(p=0.1)", -} -allowlist["torchvision.transforms.RandomPerspective"] = { - "return_type": "torchvision.transforms.RandomPerspective", - "test_parameters": "(distortion_scale=0.5, p=0.5)", -} +allowlist[ + "torchvision.transforms.RandomHorizontalFlip" +] = "torchvision.transforms.RandomHorizontalFlip" +allowlist[ + "torchvision.transforms.RandomPerspective" +] = "torchvision.transforms.RandomPerspective" -allowlist["torchvision.transforms.RandomResizedCrop"] = { - "return_type": "torchvision.transforms.RandomResizedCrop", - "test_parameters": "(10, scale=(0.08, 1.0), ratio=(0.75, 1.25))", -} -allowlist["torchvision.transforms.RandomRotation"] = { - "return_type": "torchvision.transforms.RandomRotation", - "test_parameters": "(degrees = 2)", -} -allowlist["torchvision.transforms.RandomSizedCrop"] = { - "return_type": "torchvision.transforms.RandomSizedCrop", - "test_parameters": "(10)", -} -allowlist["torchvision.transforms.RandomVerticalFlip"] = { - "return_type": "torchvision.transforms.RandomVerticalFlip", - "test_parameters": "(p=0.5)", -} -allowlist["torchvision.transforms.Resize"] = { - "return_type": "torchvision.transforms.Resize", - "test_parameters": "(size = 15)", -} -allowlist["torchvision.transforms.Scale"] = { - "return_type": "torchvision.transforms.Scale", - "test_parameters": "(10)", -} -allowlist["torchvision.transforms.TenCrop"] = { - "return_type": "torchvision.transforms.TenCrop", - "test_parameters": "(10)", -} +allowlist[ + "torchvision.transforms.RandomResizedCrop" +] = "torchvision.transforms.RandomResizedCrop" +allowlist[ + "torchvision.transforms.RandomRotation" +] = "torchvision.transforms.RandomRotation" +allowlist[ + "torchvision.transforms.RandomSizedCrop" +] = "torchvision.transforms.RandomSizedCrop" +allowlist[ + "torchvision.transforms.RandomVerticalFlip" +] = "torchvision.transforms.RandomVerticalFlip" +allowlist["torchvision.transforms.Resize"] = "torchvision.transforms.Resize" +allowlist["torchvision.transforms.Scale"] = "torchvision.transforms.Scale" +allowlist["torchvision.transforms.TenCrop"] = "torchvision.transforms.TenCrop" allowlist["torchvision.transforms.GaussianBlur"] = { "return_type": "torchvision.transforms.GaussianBlur", "min_version": "0.8.0", - "test_parameters": "(kernel_size = 3)", } -allowlist["torchvision.transforms.RandomChoice"] = { - "return_type": "torchvision.transforms.RandomChoice", -} -allowlist["torchvision.transforms.RandomOrder"] = { - "return_type": "torchvision.transforms.RandomOrder", -} +allowlist["torchvision.transforms.RandomChoice"] = "torchvision.transforms.RandomChoice" +allowlist["torchvision.transforms.RandomOrder"] = "torchvision.transforms.RandomOrder" allowlist[ "torchvision.transforms.LinearTransformation" @@ -320,162 +188,71 @@ allowlist["torchvision.transforms.Lambda"] = "torchvision.transforms.Lambda" # Functional Transformers -allowlist["torchvision.transforms.functional.adjust_brightness"] = { - "return_type": "torch.Tensor", - "min_version": "0.8.0", - "test_parameters": "(tens, 0.5)", -} -allowlist["torchvision.transforms.functional.adjust_contrast"] = { - "return_type": "torch.Tensor", - "min_version": "0.8.0", - "test_parameters": "(tens, 0.5)", -} -allowlist["torchvision.transforms.functional.adjust_gamma"] = { - "return_type": "torch.Tensor", - "min_version": "0.8.0", - "test_parameters": "(tens, 1, 0.5)", - # Torch 1.6 expects input to be PIL image, so minimum version as 0.7 (Torch 1.7.0) -} -allowlist["torchvision.transforms.functional.adjust_hue"] = { - "return_type": "torch.Tensor", - "min_version": "0.8.0", - "test_parameters": "(tens, 0)" - # Torch 1.6 expects input to be PIL image, so minimum version as 0.7 (Torch 1.7.0) -} -allowlist["torchvision.transforms.functional.adjust_saturation"] = { - "return_type": "torch.Tensor", - "min_version": "0.8.0", - "test_parameters": "(tens, 0.5)", -} +allowlist["torchvision.transforms.functional.adjust_brightness"] = "torch.Tensor" +allowlist["torchvision.transforms.functional.adjust_contrast"] = "torch.Tensor" +allowlist["torchvision.transforms.functional.adjust_gamma"] = "torch.Tensor" +allowlist["torchvision.transforms.functional.adjust_hue"] = "torch.Tensor" +allowlist["torchvision.transforms.functional.adjust_saturation"] = "torch.Tensor" allowlist["torchvision.transforms.functional.adjust_sharpness"] = { "return_type": "torch.Tensor", "min_version": "0.9.0", - "test_parameters": "(tens, 0.5)", -} -allowlist["torchvision.transforms.functional.affine"] = { - "return_type": "torch.Tensor", - "min_version": "0.8.0", - "test_parameters": "(tens,0.2,[1,2],0.2,[1,2])", - # Torch 1.6 expects input to be PIL image, so minimum version as 0.7 (Torch 1.7.0) } +allowlist["torchvision.transforms.functional.affine"] = "torch.Tensor" allowlist["torchvision.transforms.functional.autocontrast"] = { "return_type": "torch.Tensor", "min_version": "0.9.0", - "test_parameters": "(tens)", -} -allowlist["torchvision.transforms.functional.center_crop"] = { - "return_type": "torch.Tensor", - "min_version": "0.8.0", - "test_parameters": "(tens, 10)", - # Torch 1.6 expects input to be PIL image, so minimum version as 0.7 (Torch 1.7.0) } +allowlist["torchvision.transforms.functional.center_crop"] = "torch.Tensor" allowlist["torchvision.transforms.functional.convert_image_dtype"] = { "return_type": "torch.Tensor", + "min_version": "0.7.0", } -allowlist["torchvision.transforms.functional.crop"] = { - "return_type": "torch.Tensor", - "min_version": "0.8.0", - "test_parameters": "(tens, 10 , 20, 30, 40)", - # Torch 1.6 expects input to be PIL image, so minimum version as 0.7 (Torch 1.7.0) -} +allowlist["torchvision.transforms.functional.crop"] = "torch.Tensor" allowlist["torchvision.transforms.functional.equalize"] = { "return_type": "torch.Tensor", "min_version": "0.9.0", - "test_parameters": "(tens)", -} -allowlist["torchvision.transforms.functional.erase"] = { - "return_type": "torch.Tensor", - "test_parameters": "(tens, 10, 20, 30, 40, 250)", - "min_version": "0.8.0" - # Torch 1.6 expects input to be PIL image, so minimum version as 0.7 (Torch 1.7.0) -} -allowlist["torchvision.transforms.functional.five_crop"] = { - "return_type": "torch.Tensor", - "test_parameters": "(tens, 10)", - "min_version": "0.8.0" - # Torch 1.6 expects input to be PIL image, so minimum version as 0.7 (Torch 1.7.0) } +allowlist["torchvision.transforms.functional.erase"] = "torch.Tensor" +allowlist["torchvision.transforms.functional.five_crop"] = "torch.Tensor" allowlist["torchvision.transforms.functional.gaussian_blur"] = { "return_type": "torch.Tensor", "min_version": "0.8.0", - "test_parameters": "(tens, 3)", } allowlist["torchvision.transforms.functional.hflip"] = "torch.Tensor" allowlist["torchvision.transforms.functional.invert"] = { "return_type": "torch.Tensor", "min_version": "0.9.0", - "test_parameters": "(tens)", -} -allowlist["torchvision.transforms.functional.normalize"] = { - "return_type": "torch.Tensor", - # "test_parameters": "(tens, [0.5, 0.5, 0.5], [1, 1, 1]).unsqueeze(0)", - # currently commenting because of test issues with hier versions - # (//) works for 1.6.0 and / works for higher version :( } +allowlist["torchvision.transforms.functional.normalize"] = "torch.Tensor" +# "test_parameters (tests/lib/torchvision/allowlist_test_parameters.json): (tens, [0.5, +# 0.5, 0.5], [1, 1, 1]).unsqueeze(0)", +# currently not added because of test issues with hier versions +# (//) works for 1.6.0 and / works for higher version :( + allowlist["torchvision.transforms.functional.pad"] = { "return_type": "torch.Tensor", - "test_parameters": "(tens, 10)", - "min_version": "0.8.0" - # # Torch 1.6 expects input to be PIL image, so commenting this currently -} -allowlist["torchvision.transforms.functional.perspective"] = { - "return_type": "torch.Tensor", - "test_parameters": "(tens, [[10,20],[20,30],[30, 40],[40,50]], [[20,30],[30,40],[40, 50],[50,60]])", - "min_version": "0.8.0" - # # Torch 1.6 expects input to be PIL image, so commenting this currently + "min_version": "0.6.0", } +allowlist["torchvision.transforms.functional.perspective"] = "torch.Tensor" -# Converts PIL to tensor, currently not supported -# allowlist["torchvision.transforms.functional.pil_to_tensor"] = "torch.Tensor" +allowlist["torchvision.transforms.functional.pil_to_tensor"] = "torch.Tensor" allowlist["torchvision.transforms.functional.posterize"] = { "return_type": "torch.Tensor", "min_version": "0.9.0", - "test_parameters": "(tens, 2)", -} -allowlist["torchvision.transforms.functional.resize"] = { - "return_type": "torch.Tensor", - "test_parameters": "(tens, 10)", - "min_version": "0.8.0" - # Torch 1.6 expects input to be PIL image, so minimum version as 0.7 (Torch 1.7.0) -} -allowlist["torchvision.transforms.functional.resized_crop"] = { - "return_type": "torch.Tensor", - "test_parameters": "(tens, 10, 15, 20, 25, 30)", - "min_version": "0.8.0" - # Torch 1.6 expects input to be PIL image, so minimum version as 0.7 (Torch 1.7.0) } +allowlist["torchvision.transforms.functional.resize"] = "torch.Tensor" +allowlist["torchvision.transforms.functional.resized_crop"] = "torch.Tensor" allowlist["torchvision.transforms.functional.rgb_to_grayscale"] = { "return_type": "torch.Tensor", "min_version": "0.8.0", - "test_parameters": "(tens)", -} -allowlist["torchvision.transforms.functional.rotate"] = { - "return_type": "torch.Tensor", - "test_parameters": "(tens, angle = 10)", - "min_version": "0.8.0" - # Torch 1.6 expects input to be PIL image, so minimum version as 0.7 (Torch 1.7.0) } +allowlist["torchvision.transforms.functional.rotate"] = "torch.Tensor" allowlist["torchvision.transforms.functional.solarize"] = { "return_type": "torch.Tensor", "min_version": "0.9.0", - "test_parameters": "(tens, threshold = 0.5)", -} -allowlist["torchvision.transforms.functional.ten_crop"] = { - "return_type": "torch.Tensor", - "test_parameters": "(tens, size = 10)", - "min_version": "0.8.0" - # Torch 1.6 expects input to be PIL image, so minimum version as 0.7 (Torch 1.7.0) -} -allowlist["torchvision.transforms.functional.to_grayscale"] = { - "return_type": "PIL.Image.Image" -} -allowlist["torchvision.transforms.functional.to_pil_image"] = { - "return_type": "PIL.Image.Image", -} -allowlist["torchvision.transforms.functional.to_tensor"] = { - "return_type": "torch.Tensor", -} -allowlist["torchvision.transforms.functional.vflip"] = { - "return_type": "torch.Tensor", - "test_parameters": "(tens)", } +allowlist["torchvision.transforms.functional.ten_crop"] = "torch.Tensor" +allowlist["torchvision.transforms.functional.to_grayscale"] = "PIL.Image.Image" +allowlist["torchvision.transforms.functional.to_pil_image"] = "PIL.Image.Image" +allowlist["torchvision.transforms.functional.to_tensor"] = "torch.Tensor" +allowlist["torchvision.transforms.functional.vflip"] = "torch.Tensor"
diff --git a/tests/syft/lib/torchvision/allowlist_test.py b/tests/syft/lib/torchvision/allowlist_test.py --- a/tests/syft/lib/torchvision/allowlist_test.py +++ b/tests/syft/lib/torchvision/allowlist_test.py @@ -1,10 +1,12 @@ # stdlib +import json +import os from os import path -import os.path from typing import Dict from typing import Union # third party +import PIL from packaging import version import pytest import torch @@ -14,19 +16,23 @@ import syft as sy from syft.lib.torchvision.allowlist import allowlist -fileName = "imageTensor.pt" - TORCHVISION_VERSION = version.parse(tv.__version__) @pytest.fixture(scope="function") -def tens() -> torch.Tensor: - if path.isfile("imageTensor.pt"): - return torch.load("imageTensor.pt") +def pil_img() -> PIL.Image.Image: + img_file = "../../../../docs/img/logo.png" + if path.isfile(img_file): + return PIL.Image.open(img_file).convert("RGB") else: cwd = os.getcwd() - path_file = cwd + "/tests/syft/lib/torchvision/" + fileName - return torch.load(path_file) + img_file = cwd + "/docs/img/logo.png" + return PIL.Image.open(img_file).convert("RGB") + + [email protected](scope="function") +def tens(pil_img: PIL.Image.Image) -> torch.Tensor: + return tv.transforms.functional.to_tensor(pil_img).type(torch.uint8) def version_supported(support_dict: Union[str, Dict[str, str]]) -> bool: @@ -38,7 +44,11 @@ def version_supported(support_dict: Union[str, Dict[str, str]]) -> bool: return TORCHVISION_VERSION >= version.parse(support_dict["min_version"]) -def test_allowlist(root_client: sy.VirtualMachineClient, tens: torch.Tensor) -> None: +def test_allowlist( + root_client: sy.VirtualMachineClient, tens: torch.Tensor, pil_img: PIL.Image.Image +) -> None: + # Required for testing on torchvision==1.6.0 + sy.load("PIL") torchvision = root_client.torchvision torch = root_client.torch try: @@ -46,6 +56,14 @@ def test_allowlist(root_client: sy.VirtualMachineClient, tens: torch.Tensor) -> tx = tx * 2 except Exception as e: print(e) + + try: + with open(__file__.replace(".py", "_params.json"), "r") as f: + TEST_PARAMS = json.loads(f.read()) + except Exception as e: + print(f"Exception {e} triggered") + raise e + transforms = torchvision.transforms transforms.RandomAffine(2) for item in allowlist: @@ -54,13 +72,11 @@ def test_allowlist(root_client: sy.VirtualMachineClient, tens: torch.Tensor) -> if ( arr[1] == "datasets" and len(arr) <= 3 - and isinstance(allowlist[item], dict) - and "test_parameters" in allowlist[item].keys() + and item in TEST_PARAMS.keys() and version_supported(support_dict=allowlist[item]) ): - print(item) try: - exec(item + allowlist[item]["test_parameters"]) + exec(item + TEST_PARAMS[item]) except RuntimeError as e: assert ( "not found" in str(e) @@ -75,9 +91,8 @@ def test_allowlist(root_client: sy.VirtualMachineClient, tens: torch.Tensor) -> assert "No module named" in str(e) except KeyError: pass - elif ( - isinstance(allowlist[item], dict) - and version_supported(support_dict=allowlist[item]) - and "test_parameters" in allowlist[item].keys() + elif item in TEST_PARAMS.keys() and version_supported( + support_dict=allowlist[item] ): - exec(item + allowlist[item]["test_parameters"]) + print(item + TEST_PARAMS[item]) + exec(item + TEST_PARAMS[item]) diff --git a/tests/syft/lib/torchvision/allowlist_test_params.json b/tests/syft/lib/torchvision/allowlist_test_params.json new file mode 100644 --- /dev/null +++ b/tests/syft/lib/torchvision/allowlist_test_params.json @@ -0,0 +1,73 @@ +{ + "torchvision.datasets.CIFAR10": "('../data',)", + "torchvision.datasets.CIFAR100": "('../data',)", + "torchvision.datasets.CelebA": "('../data')", + "torchvision.datasets.Cityscapes": "('../data',)", + "torchvision.datasets.CocoCaptions": "('../data','../data/captions.txt')", + "torchvision.datasets.CocoDetection": "('../data', '../data/captions.txt')", + "torchvision.datasets.EMNIST": "('../data',split = \"mnist\")", + "torchvision.datasets.FakeData": "('../data', )", + "torchvision.datasets.FashionMNIST": "('../data',)", + "torchvision.datasets.Flickr30k": "('../data', '../data/annfile.txt')", + "torchvision.datasets.Flickr8k": "('../data', '../data/annfile.txt')", + "torchvision.datasets.HMDB51": "('../data', '../data/annfile.txt', 20, )", + "torchvision.datasets.ImageNet": "('../data',)", + "torchvision.datasets.KMNIST": "('../data', )", + "torchvision.datasets.Kinetics400": "('../data', 20)", + "torchvision.datasets.LSUN": "('../data', )", + "torchvision.datasets.MNIST": "('../data', download=False,)", + "torchvision.datasets.Omniglot": "('../data', )", + "torchvision.datasets.PhotoTour": "('../data', name = 'data')", + "torchvision.datasets.Places365": "('../data',)", + "torchvision.datasets.QMNIST": "('../data',)", + "torchvision.datasets.SBDataset": "('../data',)", + "torchvision.datasets.SBU": "('../data', download = False)", + "torchvision.datasets.STL10": "('../data',)", + "torchvision.datasets.SVHN": "('../data',)", + "torchvision.datasets.UCF101": "('../data', frames_per_clip = 20, annotation_path = '../data/annfile.txt')", + "torchvision.datasets.USPS": "('../data',)", + "torchvision.datasets.VOCDetection": "('../data',)", + "torchvision.datasets.VOCSegmentation": "('../data',)", + "torchvision.transforms.CenterCrop": "(10)", + "torchvision.transforms.ColorJitter": "(brightness=0, contrast=0, saturation=0, hue=0)", + "torchvision.transforms.GaussianBlur": "(kernel_size = 3)", + "torchvision.transforms.Grayscale": "(num_output_channels=1)", + "torchvision.transforms.Pad": "(2, fill=0, padding_mode='constant')", + "torchvision.transforms.RandomAffine": "(degrees = 2)", + "torchvision.transforms.RandomCrop": "(size = 10)", + "torchvision.transforms.RandomGrayscale": "(p=0.1)", + "torchvision.transforms.RandomHorizontalFlip": "(p=0.1)", + "torchvision.transforms.RandomPerspective": "(distortion_scale=0.5, p=0.5)", + "torchvision.transforms.RandomResizedCrop": "(10, scale=(0.08, 1.0), ratio=(0.75, 1.25))", + "torchvision.transforms.RandomRotation": "(degrees = 2)", + "torchvision.transforms.RandomSizedCrop": "(10)", + "torchvision.transforms.RandomVerticalFlip": "(p=0.5)", + "torchvision.transforms.Resize": "(size = 15)", + "torchvision.transforms.Scale": "(10)", + "torchvision.transforms.TenCrop": "(10)", + "torchvision.transforms.functional.adjust_brightness": "(pil_img, 0.5)", + "torchvision.transforms.functional.adjust_contrast": "(pil_img, 0.5)", + "torchvision.transforms.functional.adjust_gamma": "(pil_img, 1, 0.5)", + "torchvision.transforms.functional.adjust_hue": "(pil_img, 0)", + "torchvision.transforms.functional.adjust_saturation": "(pil_img, 0.5)", + "torchvision.transforms.functional.adjust_sharpness": "(pil_img, 0.5)", + "torchvision.transforms.functional.affine": "(pil_img,0.2,[1,2],0.2,[1,2])", + "torchvision.transforms.functional.autocontrast": "(pil_img)", + "torchvision.transforms.functional.center_crop": "(pil_img, 10)", + "torchvision.transforms.functional.crop": "(pil_img, 10 , 20, 30, 40)", + "torchvision.transforms.functional.equalize": "(tens)", + "torchvision.transforms.functional.erase": "(tens, 10, 20, 30, 40, 250)", + "torchvision.transforms.functional.five_crop": "(pil_img, 10)", + "torchvision.transforms.functional.gaussian_blur": "(tens, 3)", + "torchvision.transforms.functional.invert": "(tens)", + "torchvision.transforms.functional.pad": "(pil_img, 10)", + "torchvision.transforms.functional.perspective": "(pil_img, [[10,20],[20,30],[30, 40],[40,50]], [[20,30],[30,40],[40, 50],[50,60]])", + "torchvision.transforms.functional.posterize": "(tens, 2)", + "torchvision.transforms.functional.resize": "(pil_img, 10)", + "torchvision.transforms.functional.resized_crop": "(pil_img, 10, 15, 20, 25, 30)", + "torchvision.transforms.functional.rgb_to_grayscale": "(pil_img)", + "torchvision.transforms.functional.rotate": "(pil_img, angle = 10)", + "torchvision.transforms.functional.solarize": "(tens, threshold = 0.5)", + "torchvision.transforms.functional.ten_crop": "(pil_img, size = 10)", + "torchvision.transforms.functional.vflip": "(tens)" +} \ No newline at end of file diff --git a/tests/syft/lib/torchvision/imageTensor.pt b/tests/syft/lib/torchvision/imageTensor.pt deleted file mode 100644 Binary files a/tests/syft/lib/torchvision/imageTensor.pt and /dev/null differ diff --git a/tests/syft/lib/torchvision/logo.png b/tests/syft/lib/torchvision/logo.png new file mode 100644 Binary files /dev/null and b/tests/syft/lib/torchvision/logo.png differ
Refactor TorchVision tests to use PIL Image ## Description We can now support PIL images so the torchvision tests can be updated to take advantage of this. As per this comment: https://github.com/OpenMined/PySyft/pull/5332#issuecomment-805979800 ## Definition of Done This method is enabled and tested and unused `imageTensor.pt` removed. ```python allowlist["torchvision.transforms.functional.pil_to_tensor"] = "torch.Tensor" ```
2021-03-28T08:26:21
OpenMined/PySyft
5,384
OpenMined__PySyft-5384
[ "5365" ]
4a62fd51efbde38ce807bad59e2eb81d00703026
diff --git a/src/syft/ast/klass.py b/src/syft/ast/klass.py --- a/src/syft/ast/klass.py +++ b/src/syft/ast/klass.py @@ -232,6 +232,12 @@ def __iter__(self: Any) -> Iterable: attr_name = "__iter__" iter_target = attrs[attr_name] + + # skip if __iter__ has already been wrapped + qual_name = getattr(iter_target, "__qualname__", None) + if qual_name and "wrap_iter" in qual_name: + return + if not callable(iter_target): traceback_and_raise(AttributeError("Can't wrap a non callable iter attribute")) else:
Improving Dict Iterator creation ## Description Currently, if you trace it, we are making 4 remote `__len__` requests when creating a Dict/OrderedDict remote iterator. Ideally, we should stick to one. The same goes for any Iterator that is being created from Dict (`.values()`, `.items()`, etc). Snippet to reproduce: ``` import syft as sy client = sy.VirtualMachine().get_root_client() iter(client.syft.lib.python.collections.OrderedDict({1: 1, 2: 2})) ``` with a `print(attr_path_and_name)` in `syft/ast/klass.py` at the top of the function `run_class_method`.
This is caused due to the `__iter__` being wrapped multiple times by: https://github.com/OpenMined/PySyft/blob/261869e50852a24b2d76f3b44a5819050acd9eb8/src/syft/ast/klass.py#L338-L342 For `OrderedDict`: (`items`, `values`, `keys`) return an `Iterator`, hence the `__iter__` gets wrapped thrice. I believe the wrapping was done in this manner to account for types that can't be iterated directly (no `__iter__`) but have inbuilt functions that can return an `Iterator`. To avoid the wrapping multiple times, either a flag can be added within this `for` loop. Or`wrap_iterator` function can check if `__iter__` has already been wrapped, and skip re-wrapping it (current PR). @tudorcebere if this seems fine, I'll create a PR.
2021-03-28T09:02:55
OpenMined/PySyft
5,385
OpenMined__PySyft-5385
[ "5344" ]
98cedf8ac02a4ecbfce3b3124f478297b09818c2
diff --git a/src/syft/lib/__init__.py b/src/syft/lib/__init__.py --- a/src/syft/lib/__init__.py +++ b/src/syft/lib/__init__.py @@ -5,7 +5,11 @@ from typing import Any from typing import Any as TypeAny from typing import Dict as TypeDict +from typing import Iterable +from typing import List as TypeList from typing import Optional +from typing import Set as TypeSet +from typing import Tuple as TypeTuple from typing import Union as TypeUnion import warnings @@ -142,20 +146,42 @@ def _load_lib(*, lib: str, options: TypeDict[str, TypeAny] = {}) -> None: _regenerate_unions(lib_ast=lib_ast, client=client) -def load(lib: str, options: TypeDict[str, TypeAny] = {}) -> None: +def load( + *libs: TypeUnion[TypeList[str], TypeTuple[str], TypeSet[str], str], + options: TypeDict[str, TypeAny] = {}, + **kwargs: str, +) -> None: """ Load and Update Node with given library module Args: - lib: name of library to load and update Node with + *libs: names of libraries to load and update Node with (can be variadic, tuple, list, set) options: external requirements for loading library successfully + **kwargs: for backward compatibility with calls like `syft.load(lib = "opacus")` """ - try: - _load_lib(lib=lib, options=options) - except VendorLibraryImportException as e: - critical(e) - except Exception as e: - critical(f"Unable to load package support for: {lib}. {e}") + # For backward compatibility with calls like `syft.load(lib = "opacus")` + if "lib" in kwargs.keys(): + libs += tuple(kwargs["lib"]) + + if isinstance(libs[0], Iterable): + if not isinstance(libs[0], str): + libs = tuple(libs[0]) + for lib in libs: + if isinstance(lib, str): + try: + _load_lib(lib=str(lib), options=options) + except VendorLibraryImportException as e: + critical(e) + except Exception as e: + critical(f"Unable to load package support for: {lib}. {e}") + else: + critical( + f"Unable to load package support for: {lib}. Pass lib name as string object." + ) + else: + critical( + "Unable to load package support for any library. Iterable object not found." + ) def load_lib(lib: str, options: TypeDict[str, TypeAny] = {}) -> None:
diff --git a/tests/syft/api/load_lib_test.py b/tests/syft/api/load_lib_test.py new file mode 100644 --- /dev/null +++ b/tests/syft/api/load_lib_test.py @@ -0,0 +1,36 @@ +# stdlib +import logging + +# third party +import _pytest + +# syft absolute +import syft as sy + + +def test_load_lib() -> None: + assert sy.lib.load(lib="tenseal") is None + assert sy.lib.load("tenseal", "opacus") is None + assert sy.lib.load(["tenseal", "opacus"]) is None + assert sy.lib.load(("tenseal", "opacus")) is None + assert sy.lib.load({"tenseal"}) is None + + +def test_load_errors(caplog: _pytest.logging.LogCaptureFixture) -> None: + # Error if a non-supported library is loaded + with caplog.at_level(logging.CRITICAL, logger="syft.logger"): + sy.lib.load("non_compatible") + assert "Unable to load package support for: non_compatible." in caplog.text + caplog.clear() + + # Error if non-string object type is attempted to be loaded + with caplog.at_level(logging.CRITICAL, logger="syft.logger"): + sy.lib.load([True]) + assert "Unable to load package support for: True." in caplog.text + caplog.clear() + + # Error if a non-iterable object is passed + with caplog.at_level(logging.CRITICAL, logger="syft.logger"): + sy.lib.load(True) + assert "Unable to load package support for any library." in caplog.text + caplog.clear()
Add ability to load multiple libs in one call ## Description `sy.load()` should accept any valid sequence of strings, variadic args, a list, tuple or set of strings like: ``` sy.load("pydp", "opacus") sy.load(["pydp", "opacus"]) sy.load(("pydp", "opacus")) sy.load({"pydp", "opacus"}) ``` ## Definition of Done Loading multiple libs in one go is possible with tests to show it works.
2021-03-28T17:28:56
OpenMined/PySyft
5,397
OpenMined__PySyft-5397
[ "5395" ]
fdca1ddfbf09b2e1a307a30f4b1687050d8fc697
diff --git a/src/syft/logger.py b/src/syft/logger.py --- a/src/syft/logger.py +++ b/src/syft/logger.py @@ -1,4 +1,5 @@ # stdlib +import logging import os from typing import Any from typing import Callable @@ -20,7 +21,7 @@ def remove() -> None: def add( - sink: Union[None, str, os.PathLike, TextIO] = None, + sink: Union[None, str, os.PathLike, TextIO, logging.Handler] = None, level: str = "ERROR", ) -> None: sink = DEFAULT_SINK if sink is None else sink @@ -40,7 +41,6 @@ def add( logger.add( sink=sink, format=LOG_FORMAT, - enqueue=True, colorize=False, diagnose=True, backtrace=True,
diff --git a/tests/conftest.py b/tests/conftest.py --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,8 +7,10 @@ """ # stdlib +import logging from typing import Any as TypeAny from typing import Dict as TypeDict +from typing import Generator from typing import List as TypeList # third party @@ -24,6 +26,17 @@ logger.remove() [email protected] +def caplog(caplog: _pytest.logging.LogCaptureFixture) -> Generator: + class PropogateHandler(logging.Handler): + def emit(self, record: logging.LogRecord) -> None: + logging.getLogger(record.name).handle(record) + + logger.add(PropogateHandler()) + yield caplog + logger.remove() + + def pytest_addoption(parser: _pytest.config.argparsing.Parser) -> None: parser.addoption( "--runslow", action="store_true", default=False, help="run slow tests"
Adding loguru compatiblity with pytest caplog ## Description `caplog` fixture in pytest captures the logging output for testing if appropriate warnings have been raised. By default pytest uses the standard `logging` module, but since we are using `loguru` appropriate patching needs to be added. ## Additional Context https://loguru.readthedocs.io/en/stable/resources/migration.html#making-things-work-with-pytest-and-caplog
2021-03-30T04:02:03
OpenMined/PySyft
5,443
OpenMined__PySyft-5443
[ "5442" ]
ce2510e65f5bad382e88806bcde30fa38c3c76c4
diff --git a/src/syft/lib/python/__init__.py b/src/syft/lib/python/__init__.py --- a/src/syft/lib/python/__init__.py +++ b/src/syft/lib/python/__init__.py @@ -76,7 +76,6 @@ def create_python_ast(client: Optional[AbstractNodeClient] = None) -> Globals: ("syft.lib.python.Slice.__le__", "syft.lib.python.Bool"), ("syft.lib.python.Slice.__lt__", "syft.lib.python.Bool"), ("syft.lib.python.Slice.__ne__", "syft.lib.python.Bool"), - ("syft.lib.python.Slice.__repr__", "syft.lib.python.String"), ("syft.lib.python.Slice.__str__", "syft.lib.python.String"), ("syft.lib.python.Slice.indices", "syft.lib.python.Tuple"), ( diff --git a/src/syft/lib/python/slice.py b/src/syft/lib/python/slice.py --- a/src/syft/lib/python/slice.py +++ b/src/syft/lib/python/slice.py @@ -69,9 +69,6 @@ def __ne__(self, other: Any) -> SyPrimitiveRet: res = self.value.__ne__(other) return PrimitiveFactory.generate_primitive(value=res) - def __repr__(self) -> str: - return self.value.__repr__() - def __str__(self) -> str: return self.value.__str__()
slice __repr__ ## Description slice __repr__ causes unnecessary interference, not required. ## How to Reproduce ``` ...connect duet... slice_ptr = sy.lib.python.Slice(1, 2).send(duet) print(duet.store) ``` `TypeError: __repr__ returned non-string (type StringPointer)` ## Expected Behavior `[<syft.proxy.syft.lib.python.SlicePointer object at (0xAddress)>]`
2021-04-04T09:05:16
OpenMined/PySyft
5,457
OpenMined__PySyft-5457
[ "4794" ]
95398461c710b67e461ab821a775e1df3cbe5170
diff --git a/src/syft/lib/gym/__init__.py b/src/syft/lib/gym/__init__.py new file mode 100644 --- /dev/null +++ b/src/syft/lib/gym/__init__.py @@ -0,0 +1,63 @@ +# stdlib +import functools +from typing import Any as TypeAny +from typing import List as TypeList +from typing import Tuple as TypeTuple + +# third party +import gym + +# syft relative +from ...ast import add_classes +from ...ast import add_methods +from ...ast import add_modules +from ...ast.globals import Globals +from ..util import generic_update_ast + +LIB_NAME = "gym" +PACKAGE_SUPPORT = { + "lib": LIB_NAME, +} + + +def create_ast(client: TypeAny = None) -> Globals: + ast = Globals(client) + + modules: TypeList[TypeTuple[str, TypeAny]] = [ + ("gym", gym), + ("gym.wrappers", gym.wrappers), + ("gym.wrappers.time_limit", gym.wrappers.time_limit), + ] + classes: TypeList[TypeTuple[str, str, TypeAny]] = [ + ( + "gym.wrappers.time_limit.TimeLimit", + "gym.wrappers.time_limit.TimeLimit", + gym.wrappers.time_limit.TimeLimit, + ), + ( + "gym.Wrapper", + "gym.Wrapper", + gym.Wrapper, + ), + ] + + methods = [ + ("gym.make", "gym.wrappers.time_limit.TimeLimit"), + ("gym.wrappers.time_limit.TimeLimit.seed", "syft.lib.python.List"), + ("gym.wrappers.time_limit.TimeLimit.reset", "numpy.ndarray"), + ("gym.wrappers.time_limit.TimeLimit.step", "syft.lib.python.Tuple"), + ] + + add_modules(ast, modules) + add_classes(ast, classes) + add_methods(ast, methods) + + for klass in ast.classes: + klass.create_pointer_class() + klass.create_send_method() + klass.create_storable_object_attr_convenience_methods() + + return ast + + +update_ast = functools.partial(generic_update_ast, LIB_NAME, create_ast) diff --git a/src/syft/lib/torch/allowlist.py b/src/syft/lib/torch/allowlist.py --- a/src/syft/lib/torch/allowlist.py +++ b/src/syft/lib/torch/allowlist.py @@ -1156,6 +1156,7 @@ } allowlist["torch.bitwise_xor"] = "torch.Tensor" allowlist["torch.bmm"] = "torch.Tensor" +allowlist["torch.cat"] = "torch.Tensor" allowlist["torch.ceil_"] = "torch.Tensor" allowlist["torch.ceil"] = "torch.Tensor" allowlist["torch.cholesky_inverse"] = "torch.Tensor" @@ -1221,6 +1222,7 @@ allowlist["torch.fmod"] = "torch.Tensor" allowlist["torch.frac_"] = "torch.Tensor" allowlist["torch.frac"] = "torch.Tensor" +allowlist["torch.from_numpy"] = "torch.Tensor" allowlist["torch.gather"] = "torch.Tensor" allowlist["torch.ge"] = "torch.Tensor" allowlist["torch.geqrf"] = "torch.return_types.geqrf" @@ -1345,6 +1347,7 @@ "min_version": "1.5.0", } allowlist["torch.squeeze"] = "torch.Tensor" +allowlist["torch.stack"] = "torch.Tensor" allowlist["torch.std"] = "torch.Tensor" allowlist["torch.stft"] = "torch.Tensor" allowlist["torch.sub"] = "torch.Tensor" @@ -3329,6 +3332,9 @@ allowlist["torch.nn.ZeroPad2d.load_state_dict"] = "syft.lib.python._SyNone" allowlist["torch.nn.ZeroPad2d.extra_repr"] = "syft.lib.python.String" +allowlist["torch.distributions.Categorical"] = "torch.distributions.Categorical" +allowlist["torch.distributions.Categorical.sample"] = "torch.Tensor" +allowlist["torch.distributions.Categorical.log_prob"] = "torch.Tensor" allowlist["torch.Tensor.xpu"] = { "return_type": "torch.Tensor",
diff --git a/scripts/nb_duet_test.py b/scripts/nb_duet_test.py --- a/scripts/nb_duet_test.py +++ b/scripts/nb_duet_test.py @@ -62,6 +62,7 @@ + list(Path("examples/differential-privacy/opacus").rglob("*.ipynb")) + list(Path("examples/duet/mnist").rglob("*.ipynb")) + list(Path("examples/duet/mnist_lightning").rglob("*.ipynb")) + + list(Path("examples/duet/reinforcement_learning").rglob("*.ipynb")) ): if ".ipynb_checkpoints" in str(path): continue diff --git a/tests/syft/lib/gym/__init__.py b/tests/syft/lib/gym/__init__.py new file mode 100644 diff --git a/tests/syft/lib/gym/gym_test.py b/tests/syft/lib/gym/gym_test.py new file mode 100644 --- /dev/null +++ b/tests/syft/lib/gym/gym_test.py @@ -0,0 +1,36 @@ +# third party +import pytest + +# syft absolute +import syft as sy + + [email protected](lib="gym") +def test_remote_gym(root_client: sy.VirtualMachineClient) -> None: + sy.load("gym") + sy.load("numpy") + + # third party + import gym + import numpy as np + + remote_gym = root_client.gym + + env = gym.make("CartPole-v0") + remote_env = remote_gym.make("CartPole-v0") + + env.seed(42) + remote_env.seed(42) + assert remote_env.__name__ == "TimeLimitPointer" + + initial_state = env.reset() + remote_initial_state = remote_env.reset().get() + assert np.array_equal(initial_state, remote_initial_state) + + state, reward, done, info = env.step(0) + remote_state, remote_reward, remote_done, remote_info = remote_env.step(0).get() + + assert np.array_equal(state, remote_state) + assert reward == remote_reward + assert done == remote_done + assert info == remote_info
Add Reinforcement learning Duet Notebooks ## Description Add two notebook's which reflect the Reinforcement learning example split into DO (Data Owner) and DS (Data Scientist): https://github.com/pytorch/examples/blob/master/reinforcement_learning/ ## Definition of Done The partially runnable Reinforcement learning notebook should be in the examples/duet/reinforcement_learning folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook.
This issue has been marked stale because it has been open 30 days with no activity. Leave a comment or remove the `stale` label to unmark it. Otherwise, this will be closed in 7 days.
2021-04-10T08:43:54
OpenMined/PySyft
5,546
OpenMined__PySyft-5546
[ "5538" ]
2374afadba111c2b63f7360038c4e1f49795b63b
diff --git a/src/syft/proto/grid/messages/setup_messages_pb2.py b/src/syft/proto/grid/messages/setup_messages_pb2.py --- a/src/syft/proto/grid/messages/setup_messages_pb2.py +++ b/src/syft/proto/grid/messages/setup_messages_pb2.py @@ -25,7 +25,7 @@ syntax="proto3", serialized_options=None, create_key=_descriptor._internal_create_key, - serialized_pb=b'\n(proto/grid/messages/setup_messages.proto\x12\x12syft.grid.messages\x1a%proto/core/common/common_object.proto\x1a\x1bproto/core/io/address.proto"\xa4\x01\n\x19\x43reateInitialSetUpMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\'\n\x08reply_to\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x91\x01\n\x1a\x43reateInitialSetUpResponse\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x13\n\x0bstatus_code\x18\x02 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12&\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x9a\x01\n\x0fGetSetUpMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\'\n\x08reply_to\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x87\x01\n\x10GetSetUpResponse\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x13\n\x0bstatus_code\x18\x02 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12&\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Addressb\x06proto3', + serialized_pb=b'\n(proto/grid/messages/setup_messages.proto\x12\x12syft.grid.messages\x1a%proto/core/common/common_object.proto\x1a\x1bproto/core/io/address.proto"\xa4\x01\n\x19\x43reateInitialSetUpMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\'\n\x08reply_to\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x91\x01\n\x1a\x43reateInitialSetUpResponse\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x13\n\x0bstatus_code\x18\x02 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12&\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x9a\x01\n\x0fGetSetUpMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\'\n\x08reply_to\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x87\x01\n\x10GetSetUpResponse\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x13\n\x0bstatus_code\x18\x02 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12&\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x9d\x01\n\x12UpdateSetupMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\'\n\x08reply_to\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Address"\x8a\x01\n\x13UpdateSetupResponse\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x13\n\x0bstatus_code\x18\x02 \x01(\x05\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12&\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x15.syft.core.io.Addressb\x06proto3', dependencies=[ proto_dot_core_dot_common_dot_common__object__pb2.DESCRIPTOR, proto_dot_core_dot_io_dot_address__pb2.DESCRIPTOR, @@ -424,6 +424,202 @@ serialized_end=740, ) + +_UPDATESETUPMESSAGE = _descriptor.Descriptor( + name="UpdateSetupMessage", + full_name="syft.grid.messages.UpdateSetupMessage", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="msg_id", + full_name="syft.grid.messages.UpdateSetupMessage.msg_id", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="address", + full_name="syft.grid.messages.UpdateSetupMessage.address", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="content", + full_name="syft.grid.messages.UpdateSetupMessage.content", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="reply_to", + full_name="syft.grid.messages.UpdateSetupMessage.reply_to", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=743, + serialized_end=900, +) + + +_UPDATESETUPRESPONSE = _descriptor.Descriptor( + name="UpdateSetupResponse", + full_name="syft.grid.messages.UpdateSetupResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="msg_id", + full_name="syft.grid.messages.UpdateSetupResponse.msg_id", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="status_code", + full_name="syft.grid.messages.UpdateSetupResponse.status_code", + index=1, + number=2, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="content", + full_name="syft.grid.messages.UpdateSetupResponse.content", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="address", + full_name="syft.grid.messages.UpdateSetupResponse.address", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=903, + serialized_end=1041, +) + _CREATEINITIALSETUPMESSAGE.fields_by_name[ "msg_id" ].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID @@ -454,6 +650,21 @@ _GETSETUPRESPONSE.fields_by_name[ "address" ].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS +_UPDATESETUPMESSAGE.fields_by_name[ + "msg_id" +].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID +_UPDATESETUPMESSAGE.fields_by_name[ + "address" +].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS +_UPDATESETUPMESSAGE.fields_by_name[ + "reply_to" +].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS +_UPDATESETUPRESPONSE.fields_by_name[ + "msg_id" +].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID +_UPDATESETUPRESPONSE.fields_by_name[ + "address" +].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS DESCRIPTOR.message_types_by_name[ "CreateInitialSetUpMessage" ] = _CREATEINITIALSETUPMESSAGE @@ -462,6 +673,8 @@ ] = _CREATEINITIALSETUPRESPONSE DESCRIPTOR.message_types_by_name["GetSetUpMessage"] = _GETSETUPMESSAGE DESCRIPTOR.message_types_by_name["GetSetUpResponse"] = _GETSETUPRESPONSE +DESCRIPTOR.message_types_by_name["UpdateSetupMessage"] = _UPDATESETUPMESSAGE +DESCRIPTOR.message_types_by_name["UpdateSetupResponse"] = _UPDATESETUPRESPONSE _sym_db.RegisterFileDescriptor(DESCRIPTOR) CreateInitialSetUpMessage = _reflection.GeneratedProtocolMessageType( @@ -508,5 +721,27 @@ ) _sym_db.RegisterMessage(GetSetUpResponse) +UpdateSetupMessage = _reflection.GeneratedProtocolMessageType( + "UpdateSetupMessage", + (_message.Message,), + { + "DESCRIPTOR": _UPDATESETUPMESSAGE, + "__module__": "proto.grid.messages.setup_messages_pb2" + # @@protoc_insertion_point(class_scope:syft.grid.messages.UpdateSetupMessage) + }, +) +_sym_db.RegisterMessage(UpdateSetupMessage) + +UpdateSetupResponse = _reflection.GeneratedProtocolMessageType( + "UpdateSetupResponse", + (_message.Message,), + { + "DESCRIPTOR": _UPDATESETUPRESPONSE, + "__module__": "proto.grid.messages.setup_messages_pb2" + # @@protoc_insertion_point(class_scope:syft.grid.messages.UpdateSetupResponse) + }, +) +_sym_db.RegisterMessage(UpdateSetupResponse) + # @@protoc_insertion_point(module_scope)
[PyGrid] Add Protobuf Messages to update Domain Settings ## Description This Issue is an extension to PySyft functionality on OpenMined/PyGrid#847. ## Are you interested in working on this improvement yourself? - Yes, I am.
2021-05-06T10:30:15
OpenMined/PySyft
5,626
OpenMined__PySyft-5626
[ "5338" ]
e8624fa79d82c60d2e05c76249cfd752cd99409a
diff --git a/packages/syft/src/syft/lib/torch/__init__.py b/packages/syft/src/syft/lib/torch/__init__.py --- a/packages/syft/src/syft/lib/torch/__init__.py +++ b/packages/syft/src/syft/lib/torch/__init__.py @@ -13,9 +13,11 @@ from . import return_types # noqa: 401 from . import size # noqa: 401 from . import uppercase_tensor # noqa: 401 +from ...ast import add_dynamic_objects from ...ast.globals import Globals from ...logger import info from .allowlist import allowlist +from .allowlist import dynamic_allowlist TORCH_VERSION = version.parse(torch.__version__.split("+")[0]) @@ -68,8 +70,11 @@ def create_torch_ast(client: Any = None) -> Globals: else: info(f"Skipping {method} not supported in {TORCH_VERSION}") + add_dynamic_objects(ast, list(dynamic_allowlist.items())) + for klass in ast.classes: klass.create_pointer_class() klass.create_send_method() klass.create_storable_object_attr_convenience_methods() + return ast diff --git a/packages/syft/src/syft/lib/torch/allowlist.py b/packages/syft/src/syft/lib/torch/allowlist.py --- a/packages/syft/src/syft/lib/torch/allowlist.py +++ b/packages/syft/src/syft/lib/torch/allowlist.py @@ -6,7 +6,7 @@ from ..misc.union import UnionGenerator allowlist: Dict[str, Union[str, Dict[str, str]]] = {} # (path: str, return_type:type) - +dynamic_allowlist: Dict[str, str] = {} # -------------------------------------------------------------------------------------- # SECTION - Tensor methods which are intentionally disabled # -------------------------------------------------------------------------------------- @@ -1811,6 +1811,7 @@ allowlist["torch.nn.Linear.load_state_dict"] = "syft.lib.python._SyNone" allowlist["torch.nn.Linear.extra_repr"] = "syft.lib.python.String" + # DataLoader allowlist["torch.utils.data.DataLoader"] = "torch.utils.data.DataLoader" allowlist["torch.utils.data.DataLoader.__iter__"] = "syft.lib.python.Iterator" @@ -3586,3 +3587,6 @@ "return_type": "torch.Tensor", "min_version": "1.8.0", } + +dynamic_allowlist["torch.nn.Linear.weight"] = "torch.nn.Parameter" +dynamic_allowlist["torch.nn.Linear.bias"] = "torch.nn.Parameter"
diff --git a/packages/syft/tests/syft/lib/torch/weight_bias_test.py b/packages/syft/tests/syft/lib/torch/weight_bias_test.py new file mode 100644 --- /dev/null +++ b/packages/syft/tests/syft/lib/torch/weight_bias_test.py @@ -0,0 +1,33 @@ +# third party +import pytest +import torch + +# syft absolute +import syft as sy + +input_list = [torch.nn.Linear(100, 10)] + + [email protected]("target_layer", input_list) +def test_weights_and_bias( + root_client: sy.VirtualMachineClient, target_layer: torch.nn.Module +) -> None: + original_bias = target_layer.bias + original_weight = target_layer.weight + + remote_target_mod = target_layer.send(root_client) + + remote_original_bias = remote_target_mod.bias + remote_original_weight = remote_target_mod.weight + + assert torch.equal(original_bias, remote_original_bias.get()) + assert torch.equal(original_weight, remote_original_weight.get()) + + new_bias = torch.nn.Parameter(torch.zeros_like(original_bias)) + new_weight = torch.nn.Parameter(torch.zeros_like(original_weight)) + + remote_target_mod.weight = new_weight + remote_target_mod.bias = new_bias + + assert torch.equal(remote_target_mod.weight.get(), new_weight) + assert torch.equal(remote_target_mod.bias.get(), new_bias)
Allow Object Attributes in AST ## Description Allow for object attributes to be allowed in the allowlist AST. The issue being that they do not exist at Class analysis time, and we would want the bound function to have the context of the target data item. One method could be to provide a flag that these are dynamic and not bind the method but attach generated function to the Pointer so that on the retrieval side we can attempt to locate the attribute at execution time. ## Definition of Done Items like the following can be enabled and work as expected: ``` allowlist["torch.nn.Linear.weight"] = "torch.Tensor" allowlist["torch.nn.Linear.bias"] = "torch.Tensor" ```
Additional use case is `grad` and `data` for Tensors which are used in the `UnionGenerator` for `Plans`. ``` torch.Tensor.grad torch.Tensor.data ``` Also see `torchvision`: https://github.com/OpenMined/PySyft/issues/5032 and https://github.com/OpenMined/PySyft/issues/5033 and https://github.com/OpenMined/PySyft/issues/5031 Hi, I did create a PR for the same issue a few months ago but it was closed due to some reason. I would like to work on this PR and try out the method that you have suggested. Also, you can refer to the conversation we had previously in this PR #5117 .
2021-06-01T10:54:34
OpenMined/PySyft
5,732
OpenMined__PySyft-5732
[ "5710" ]
ff7128e3aa8b14e47dd13ead4b28f2055d003603
diff --git a/packages/grid/apps/domain/src/main/core/manager/role_manager.py b/packages/grid/apps/domain/src/main/core/manager/role_manager.py --- a/packages/grid/apps/domain/src/main/core/manager/role_manager.py +++ b/packages/grid/apps/domain/src/main/core/manager/role_manager.py @@ -2,6 +2,8 @@ from typing import List from typing import Union +from flask_sqlalchemy import BaseQuery + # grid relative from ..database.tables.roles import Role from ..exceptions import RoleNotFoundError @@ -32,8 +34,7 @@ def compliance_officer_role(self): def admin_role(self): return self.first(name="Administrator") - @property - def common_roles(self): + def _common_roles(self) -> BaseQuery: return self.db.session.query(self._schema).filter_by( can_triage_requests=False, can_edit_settings=False, @@ -44,9 +45,13 @@ def common_roles(self): can_manage_infrastructure=False, ) + @property + def common_roles(self): + return self._common_roles().all() + @property def org_roles(self): - return self.db.session.query(self._schema).except_(self.common_roles) + return self.db.session.query(self._schema).except_(self._common_roles).all() def first(self, **kwargs) -> Union[None, List]: result = super().first(**kwargs)
diff --git a/packages/grid/apps/domain/tests/test_core/test_manager/test_role_manager.py b/packages/grid/apps/domain/tests/test_core/test_manager/test_role_manager.py new file mode 100644 --- /dev/null +++ b/packages/grid/apps/domain/tests/test_core/test_manager/test_role_manager.py @@ -0,0 +1,385 @@ +# third party +from bcrypt import checkpw +import pytest +from src.main.core.database import * +from src.main.core.exceptions import RoleNotFoundError, InvalidCredentialsError +from src.main.core.manager.role_manager import RoleManager + +user_role = ("User", False, False, False, False, False, False, False) +admin_role = ("Administrator", True, True, True, True, False, False, True) +owner_role = ("Owner", True, True, True, True, True, True, True) +officer_role = ("Compliance Officer", True, False, False, False, False, False, False) + + [email protected] +def cleanup(database): + yield + try: + database.session.query(User).delete() + database.session.query(Role).delete() + database.session.query(Group).delete() + database.session.query(UserGroup).delete() + database.session.commit() + except: + database.session.rollback() + + +def test_create_role_manager(database, cleanup): + users = RoleManager(database) + + +def test_user_role(database, cleanup): + role_manager = RoleManager(database) + new_role = create_role(*admin_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + database.session.commit() + + retrieved_role = role_manager.user_role + + assert retrieved_role.id == 3 + assert retrieved_role.name == "User" + assert retrieved_role.can_edit_settings == False + assert retrieved_role.can_create_groups == False + assert retrieved_role.can_manage_infrastructure == False + assert retrieved_role.can_upload_data == False + assert retrieved_role.can_create_users == False + assert retrieved_role.can_triage_requests == False + assert retrieved_role.can_edit_roles == False + + +def test_user_role_mutiple_roles(database, cleanup): + role_manager = RoleManager(database) + new_role_1 = create_role(*user_role) + new_role_2 = create_role(*user_role) + new_role_3 = create_role(*user_role) + database.session.add(new_role_1) + database.session.add(new_role_2) + database.session.add(new_role_3) + database.session.commit() + + database.session.delete(new_role_1) + database.session.commit() + + retrieved_role = role_manager.user_role + + assert retrieved_role.id == 2 + assert retrieved_role.name == "User" + assert retrieved_role.can_edit_settings == False + assert retrieved_role.can_create_groups == False + assert retrieved_role.can_manage_infrastructure == False + assert retrieved_role.can_upload_data == False + assert retrieved_role.can_create_users == False + assert retrieved_role.can_triage_requests == False + assert retrieved_role.can_edit_roles == False + + +def test_admin_role(database, cleanup): + role_manager = RoleManager(database) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + database.session.commit() + + retrieved_role = role_manager.admin_role + + assert retrieved_role.id == 3 + assert retrieved_role.name == "Administrator" + assert retrieved_role.can_edit_settings == True + assert retrieved_role.can_create_groups == True + assert retrieved_role.can_manage_infrastructure == False + assert retrieved_role.can_upload_data == True + assert retrieved_role.can_create_users == True + assert retrieved_role.can_triage_requests == True + assert retrieved_role.can_edit_roles == False + + +def test_owner_role(database, cleanup): + role_manager = RoleManager(database) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*owner_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + database.session.commit() + new_role = create_role(*owner_role) + database.session.add(new_role) + + retrieved_role = role_manager.owner_role + + assert retrieved_role.id == 3 + assert retrieved_role.name == "Owner" + assert retrieved_role.can_edit_settings == True + assert retrieved_role.can_create_groups == True + assert retrieved_role.can_manage_infrastructure == True + assert retrieved_role.can_upload_data == True + assert retrieved_role.can_create_users == True + assert retrieved_role.can_triage_requests == True + assert retrieved_role.can_edit_roles == True + + +def test_compliance_officer_role(database, cleanup): + role_manager = RoleManager(database) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*owner_role) + database.session.add(new_role) + new_role = create_role(*officer_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + database.session.commit() + new_role = create_role(*officer_role) + database.session.add(new_role) + + retrieved_role = role_manager.compliance_officer_role + + assert retrieved_role.id == 3 + assert retrieved_role.name == "Compliance Officer" + assert retrieved_role.can_edit_settings == False + assert retrieved_role.can_create_groups == False + assert retrieved_role.can_manage_infrastructure == False + assert retrieved_role.can_upload_data == False + assert retrieved_role.can_create_users == False + assert retrieved_role.can_triage_requests == True + assert retrieved_role.can_edit_roles == False + + +def test_common_roles(database, cleanup): + role_manager = RoleManager(database) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*owner_role) + database.session.add(new_role) + new_role = create_role(*officer_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + new_role = create_role(*officer_role) + database.session.add(new_role) + database.session.commit() + + common_roles = role_manager.common_roles + + assert len(common_roles) == 2 + + assert common_roles[0].id == 1 + assert common_roles[0].name == "User" + assert common_roles[0].can_edit_settings == False + assert common_roles[0].can_create_groups == False + assert common_roles[0].can_manage_infrastructure == False + assert common_roles[0].can_upload_data == False + assert common_roles[0].can_create_users == False + assert common_roles[0].can_triage_requests == False + assert common_roles[0].can_edit_roles == False + + assert common_roles[1].id == 4 + assert common_roles[1].name == "User" + assert common_roles[1].can_edit_settings == False + assert common_roles[1].can_create_groups == False + assert common_roles[1].can_manage_infrastructure == False + assert common_roles[1].can_upload_data == False + assert common_roles[1].can_create_users == False + assert common_roles[1].can_triage_requests == False + assert common_roles[1].can_edit_roles == False + + +def test_org_roles(database, cleanup): + role_manager = RoleManager(database) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*owner_role) + database.session.add(new_role) + new_role = create_role(*officer_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + database.session.commit() + + org_roles = role_manager.org_roles + + assert len(org_roles) == 3 + + assert org_roles[0].id == 2 + assert org_roles[0].name == "Owner" + assert org_roles[0].can_edit_settings == True + assert org_roles[0].can_create_groups == True + assert org_roles[0].can_manage_infrastructure == True + assert org_roles[0].can_upload_data == True + assert org_roles[0].can_create_users == True + assert org_roles[0].can_triage_requests == True + assert org_roles[0].can_edit_roles == True + + assert org_roles[1].id == 3 + assert org_roles[1].name == "Compliance Officer" + assert org_roles[1].can_edit_settings == False + assert org_roles[1].can_create_groups == False + assert org_roles[1].can_manage_infrastructure == False + assert org_roles[1].can_upload_data == False + assert org_roles[1].can_create_users == False + assert org_roles[1].can_triage_requests == True + assert org_roles[1].can_edit_roles == False + + assert org_roles[2].id == 5 + assert org_roles[2].name == "Administrator" + assert org_roles[2].can_edit_settings == True + assert org_roles[2].can_create_groups == True + assert org_roles[2].can_manage_infrastructure == False + assert org_roles[2].can_upload_data == True + assert org_roles[2].can_create_users == True + assert org_roles[2].can_triage_requests == True + assert org_roles[2].can_edit_roles == False + + +def test_first(database, cleanup): + role_manager = RoleManager(database) + new_role = create_role(*owner_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*officer_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + database.session.commit() + + retrieved_role = role_manager.first(**{"name": "User"}) + + assert retrieved_role.id == 2 + assert retrieved_role.name == "User" + assert retrieved_role.can_edit_settings == False + assert retrieved_role.can_create_groups == False + assert retrieved_role.can_manage_infrastructure == False + assert retrieved_role.can_upload_data == False + assert retrieved_role.can_create_users == False + assert retrieved_role.can_triage_requests == False + assert retrieved_role.can_edit_roles == False + + +def test_first_fail(database, cleanup): + role_manager = RoleManager(database) + new_role = create_role(*owner_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*officer_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + database.session.commit() + + with pytest.raises(RoleNotFoundError): + retrieved_role = role_manager.first(**{"name": "Invalid"}) + + +def test_query(database, cleanup): + role_manager = RoleManager(database) + new_role = create_role(*owner_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*officer_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + database.session.commit() + + retrieved_roles = role_manager.query(name="Compliance Officer") + + assert len(retrieved_roles) == 1 + assert retrieved_roles[0].id == 3 + assert retrieved_roles[0].name == "Compliance Officer" + assert retrieved_roles[0].can_edit_settings == False + assert retrieved_roles[0].can_create_groups == False + assert retrieved_roles[0].can_manage_infrastructure == False + assert retrieved_roles[0].can_upload_data == False + assert retrieved_roles[0].can_create_users == False + assert retrieved_roles[0].can_triage_requests == True + assert retrieved_roles[0].can_edit_roles == False + + +def test_query_fail(database, cleanup): + role_manager = RoleManager(database) + new_role = create_role(*owner_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*officer_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + database.session.commit() + + with pytest.raises(RoleNotFoundError): + retrieved_roles = role_manager.query(name="404 Officer") + + +def test_set(database, cleanup): + role_manager = RoleManager(database) + new_role = create_role(*owner_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*officer_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + database.session.commit() + + role_manager.set(3, {"name": "404 Officer", "can_upload_data": True}) + + edited_role = database.session.query(Role).get(3) + + assert edited_role.name == "404 Officer" + assert edited_role.can_edit_settings == False + assert edited_role.can_create_groups == False + assert edited_role.can_manage_infrastructure == False + assert edited_role.can_upload_data == True + assert edited_role.can_create_users == False + assert edited_role.can_triage_requests == True + assert edited_role.can_edit_roles == False + + +def test_set_fail(database, cleanup): + role_manager = RoleManager(database) + new_role = create_role(*owner_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*officer_role) + database.session.add(new_role) + new_role = create_role(*user_role) + database.session.add(new_role) + new_role = create_role(*admin_role) + database.session.add(new_role) + database.session.commit() + + with pytest.raises(RoleNotFoundError): + role_manager.set(10, {"name": "404 Officer", "can_upload_data": True})
Add tests for domain's RoleManager ## Description Add unit tests for grid domain's RoleManager at the module `grid/apps/domain/src/main/core/manager` ## Type of Test - [x] **Unit test** (e.g. checking a loop, method, or function is working as intended) - [ ] **Integration test** (e.g. checking if a certain group or set of functionality is working as intended) - [ ] **Regression test** (e.g. checking if by adding or removing a module of code allows other systems to continue to function as intended) - [ ] **Stress test** (e.g. checking to see how well a system performs under various situations, including heavy usage) - [ ] **Performance test** (e.g. checking to see how efficient a system is as performing the intended task) - [ ] Other... ## Expected Behavior Ideally, the tests should cover as many methods as possible and within reason.
2021-06-24T23:47:37
OpenMined/PySyft
5,993
OpenMined__PySyft-5993
[ "5892" ]
6827e2faa7f16da0f7d16e2d6767a5aa2e86c4cc
diff --git a/packages/syft/src/syft/core/tensor/__init__.py b/packages/syft/src/syft/core/tensor/__init__.py --- a/packages/syft/src/syft/core/tensor/__init__.py +++ b/packages/syft/src/syft/core/tensor/__init__.py @@ -178,6 +178,14 @@ def create_tensor_ast(client: Optional[AbstractNodeClient] = None) -> Globals: "syft.core.tensor.smpc.share_tensor.ShareTensor.__mul__", "syft.core.tensor.smpc.share_tensor.ShareTensor", ), + ( + "syft.core.tensor.smpc.share_tensor.ShareTensor.__matmul__", + "syft.core.tensor.smpc.share_tensor.ShareTensor", + ), + ( + "syft.core.tensor.smpc.share_tensor.ShareTensor.__rmatmul__", + "syft.core.tensor.smpc.share_tensor.ShareTensor", + ), ( "syft.core.tensor.smpc.share_tensor.ShareTensor.sum", "syft.core.tensor.smpc.share_tensor.ShareTensor", diff --git a/packages/syft/src/syft/core/tensor/smpc/mpc_tensor.py b/packages/syft/src/syft/core/tensor/smpc/mpc_tensor.py --- a/packages/syft/src/syft/core/tensor/smpc/mpc_tensor.py +++ b/packages/syft/src/syft/core/tensor/smpc/mpc_tensor.py @@ -364,8 +364,11 @@ def __apply_private_op(self, other: MPCTensor, op_str: str) -> List[ShareTensor] def __apply_public_op(self, y: Any, op_str: str) -> List[ShareTensor]: op = getattr(operator, op_str) - if op_str in {"mul", "matmul", "add", "sub"}: + if op_str in {"mul", "matmul"}: res_shares = [op(share, y) for share in self.child] + elif op_str in {"add", "sub"}: + res_shares = self.child + res_shares[0] = op(res_shares[0], y) else: raise ValueError(f"{op_str} not supported") @@ -481,6 +484,24 @@ def mul( return res + def matmul( + self, y: Union[int, float, np.ndarray, torch.tensor, "MPCTensor"] + ) -> MPCTensor: + """Apply the "matmul" operation between "self" and "y" + + Args: + y (Union[int, float, np.ndarray, torch.tensor, "MPCTensor"]): self @ y + + Returns: + MPCTensor: Result of the opeartion. + """ + if isinstance(y, ShareTensor): + raise ValueError("Private matmul not supported yet") + + res = self.__apply_op(y, "matmul") + + return res + def __str__(self) -> str: res = "MPCTensor" for share in self.child: @@ -529,6 +550,7 @@ def put( __rsub__ = rsub __mul__ = mul __rmul__ = mul + __matmul__ = matmul @implements(MPCTensor, np.add) diff --git a/packages/syft/src/syft/core/tensor/smpc/share_tensor.py b/packages/syft/src/syft/core/tensor/smpc/share_tensor.py --- a/packages/syft/src/syft/core/tensor/smpc/share_tensor.py +++ b/packages/syft/src/syft/core/tensor/smpc/share_tensor.py @@ -315,7 +315,10 @@ def matmul( Returns: ShareTensor: Result of the operation. """ - ShareTensor.sanity_checks(y) + if isinstance(y, ShareTensor): + raise ValueError("Private matmul not supported yet") + + ShareTensor.sanity_check(y) new_share = self.apply_function(y, "matmul") return new_share @@ -328,8 +331,12 @@ def rmatmul(self, y: torch.Tensor) -> "ShareTensor": Returns: ShareTensor. Result of the operation. """ - ShareTensor.sanity_checks(y) - return y.matmul(self) + if isinstance(y, ShareTensor): + raise ValueError("Private matmul not supported yet") + + ShareTensor.sanity_check(y) + new_share = y.apply_function(self, "matmul") + return new_share # TRASK: commenting out because ShareTEnsor doesn't appear to have .session_uuid or .config # def div(
diff --git a/packages/syft/tests/syft/core/tensor/smpc/smpc_tensor_test.py b/packages/syft/tests/syft/core/tensor/smpc/smpc_tensor_test.py --- a/packages/syft/tests/syft/core/tensor/smpc/smpc_tensor_test.py +++ b/packages/syft/tests/syft/core/tensor/smpc/smpc_tensor_test.py @@ -90,6 +90,25 @@ def test_mpc_private_public_op(op_str: str, public_value_type: str) -> None: assert (res == expected).all() [email protected]("op_str", ["matmul"]) +def test_mpc_matmul_op(op_str: str) -> None: + value_1 = np.array([[1, 7], [3, -7]], dtype=np.int32) + value_2 = np.array([[6, 2], [-6, 5]], dtype=np.int32) + + remote_value_1 = clients[0].syft.core.tensor.tensor.Tensor(value_1) + + mpc_tensor_1 = MPCTensor( + parties=clients, secret=remote_value_1, shape=(2, 2), seed_shares=52 + ) + + op = getattr(operator, op_str) + + res = op(mpc_tensor_1, value_2).reconstruct() + expected = op(value_1, value_2) + + assert (res == expected).all() + + @pytest.mark.parametrize("public_value_type", ["int", "torch_tensor", "numpy_array"]) @pytest.mark.parametrize("op_str", ["add", "sub", "mul"]) def test_mpc_public_private_op(op_str: str, public_value_type: str) -> None:
[SMPC] Implement matmul ## Feature Description Implement matrix multiplication for the SMPC. Follow the example of `mul` operation from the `mpc_tensor.py` file. ## Is your feature request related to a problem? Yes. Have the possiblity to run `matrix multiplication`. ## What alternatives have you considered? ## Acceptance Criteria - [ ] Implement the functionality - [ ] Add tests - [ ] Update the SMPC Notebook
2021-09-21T03:37:24
OpenMined/PySyft
6,421
OpenMined__PySyft-6421
[ "6420" ]
b858ab54b8785396cb40c3f05fd8258009e8356a
diff --git a/packages/syft/src/syft/core/tensor/smpc/share_tensor.py b/packages/syft/src/syft/core/tensor/smpc/share_tensor.py --- a/packages/syft/src/syft/core/tensor/smpc/share_tensor.py +++ b/packages/syft/src/syft/core/tensor/smpc/share_tensor.py @@ -307,7 +307,7 @@ def generate_przs( getattr(value, "dtype", None), None ) if ring_size_from_type is None: - logger.warning("Could not get ring size from {value}") + logger.warning(f"Could not get ring size from {value}") else: ring_size_final = ring_size_from_type numpy_type = value.dtype diff --git a/packages/syft/src/syft/core/tensor/smpc/utils.py b/packages/syft/src/syft/core/tensor/smpc/utils.py --- a/packages/syft/src/syft/core/tensor/smpc/utils.py +++ b/packages/syft/src/syft/core/tensor/smpc/utils.py @@ -90,7 +90,7 @@ def get_ring_size( """ if x_ring_size != y_ring_size: raise ValueError( - "Expected the same ring size for x and y ({x_ring_size} vs {y_ring_size})" + f"Expected the same ring size for x and y ({x_ring_size} vs {y_ring_size})" ) return x_ring_size
diff --git a/packages/syft/tests/syft/grid/messages/messages_test.py b/packages/syft/tests/syft/grid/messages/messages_test.py --- a/packages/syft/tests/syft/grid/messages/messages_test.py +++ b/packages/syft/tests/syft/grid/messages/messages_test.py @@ -297,7 +297,7 @@ def test_message(message_name: str, node: sy.VirtualMachine) -> None: if response_content is None: pytest.skip( - "{message_name} does not have a response added to the test configuration" + f"{message_name} does not have a response added to the test configuration" ) res_func = getattr(lib, message_name + "Response")
Missing `f` prefix on f-strings Some strings looks like they're meant to be f-strings but are missing the `f` prefix meaning variable interpolation won't happen. https://github.com/OpenMined/PySyft/blob/ded31aa5e99bcbfc628cf2027958679b55b48c5c/packages/syft/src/syft/core/tensor/smpc/share_tensor.py#L283 https://github.com/OpenMined/PySyft/blob/ded31aa5e99bcbfc628cf2027958679b55b48c5c/packages/syft/src/syft/core/tensor/smpc/utils.py#L85 https://github.com/OpenMined/PySyft/blob/ded31aa5e99bcbfc628cf2027958679b55b48c5c/packages/syft/tests/syft/grid/messages/messages_test.py#L300 I found this issue automatically. I'm a bot. Beep Boop 🦊. See other issues I found in your repo [here](https://codereview.doctor/OpenMined/PySyft)
2022-04-23T22:21:00
OpenMined/PySyft
6,639
OpenMined__PySyft-6639
[ "6604" ]
a591dea8bbdc53a3df8b8022f7a9341e75c907c5
diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -2,7 +2,9 @@ from datetime import datetime import json import os +from pathlib import Path import re +import shutil import socket import stat import subprocess # nosec @@ -24,6 +26,7 @@ import requests import rich from rich.live import Live +from virtualenvapi.manage import VirtualEnvironment # relative from . import __version__ @@ -65,6 +68,8 @@ from .rand_sec import generate_sec_random_password from .style import RichGroup +LATEST_STABLE_SYFT = "0.6" + def get_azure_image(short_name: str) -> str: prebuild_070 = ( @@ -2405,14 +2410,195 @@ def check( # add Hagrid info to the cli [email protected](help="Show Hagrid info") [email protected](help="Show HAGrid info") def version() -> None: - print(f"Hagrid version: {__version__}") + print(f"HAGrid version: {__version__}") cli.add_command(version) [email protected](help="Launch a Syft + Jupyter Session with a Notebook URL / Path") [email protected]("url", type=str, required=False) [email protected]( + "--reset", + is_flag=True, + show_default=True, + default=False, + help="Force hagrid quickstart to setup a fresh virtualenv", +) [email protected]( + "--syft", + default="latest", + help="Choose a syft version or just use latest", +) [email protected]( + "--quiet", + is_flag=True, + show_default=True, + default=False, + help="Silence confirmation prompts", +) [email protected]( + "--pre", + is_flag=True, + show_default=True, + default=False, + help="Install pre-release versions of syft", +) [email protected]( + "--python", + default=None, + help="Specify the path to which python to use", +) +def quickstart( + url: Optional[str] = None, + syft: str = "latest", + reset: bool = False, + quiet: bool = False, + pre: bool = False, + python: Optional[str] = None, +) -> None: + try: + directory = os.path.expanduser("~/.hagrid/quickstart/") + confirm_reset = None + if reset: + if not quiet: + confirm_reset = click.confirm( + "This will create a new quickstart virtualenv and reinstall Syft and " + "Jupyter. Are you sure you want to continue?" + ) + else: + confirm_reset = True + if confirm_reset is False: + return + + if reset and confirm_reset or not os.path.isdir(directory): + quickstart_setup( + directory=directory, + syft_version=syft, + reset=reset, + pre=pre, + python=python, + ) + + if url: + file_path = quickstart_download_notebook( + url=url, directory=directory, reset=reset + ) + else: + file_path = add_intro_notebook(directory=directory, reset=reset) + + # add virtualenv path + environ = os.environ.copy() + environ["PATH"] = directory + ".venv/bin" + os.pathsep + environ["PATH"] + try: + print( + f"Running Jupyter Lab in: {directory}\nUse Control-C to stop this server." + ) + proc = subprocess.Popen( # nosec + f"jupyter lab --notebook-dir={directory} {file_path}".split(" "), + cwd=directory, + env=environ, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + proc.communicate() + except KeyboardInterrupt: + proc.kill() # make sure jupyter gets killed + sys.exit(1) + except Exception as e: + print(f"Error running quickstart: {e}") + + +def quickstart_setup( + directory: str, + syft_version: str, + reset: bool = False, + pre: bool = False, + python: Optional[str] = None, +) -> None: + os.makedirs(directory, exist_ok=True) + virtual_env_dir = os.path.abspath(directory + ".venv/") + if reset and os.path.exists(virtual_env_dir): + shutil.rmtree(virtual_env_dir) + env = VirtualEnvironment(virtual_env_dir, python=python) + + print("Installing Jupyter Labs") + env.install("jupyterlab") + env.install("ipywidgets") + + if EDITABLE_MODE: + local_syft_dir = Path(os.path.abspath(Path(hagrid_root()) / "../syft")) + print("Installing Syft in Editable Mode") + env.install("-e " + str(local_syft_dir)) + else: + options = [] + options.append("--force") + if syft_version == "latest": + syft_version = LATEST_STABLE_SYFT + package = f"syft>={syft_version}" + if pre: + package = f"{package}.dev0" # force pre release + else: + package = f"syft=={syft_version}" + + if pre: + options.append("--pre") + print(f"Installing {package} --pre") + else: + print(f"Installing {package}") + env.install(package, options=options) + + +def quickstart_download_notebook(url: str, directory: str, reset: bool = False) -> str: + file_name = os.path.basename(url).replace("%20", "_") + file_path = os.path.abspath(directory + file_name) + + file_exists = os.path.isfile(file_path) + + if file_exists and not reset: + reset = click.confirm( + f"You already have the notebook {file_name}. " + "Are you sure you want to overwrite it?" + ) + + if not file_exists or file_exists and reset: + print(f"Downloading the notebook: {file_name}") + r = requests.get(url, allow_redirects=True) + with open(os.path.expanduser(file_path), "wb") as f: + f.write(r.content) + return file_path + + +def add_intro_notebook(directory: str, reset: bool = False) -> str: + files = os.listdir(directory) + files.remove(".venv") + + filename = "00-quickstart.ipynb" + file_path = os.path.abspath(f"{directory}/{filename}") + + if len(files) == 0 or reset: + if EDITABLE_MODE: + local_src_dir = Path(os.path.abspath(Path(hagrid_root()) / "../../")) + shutil.copyfile( + local_src_dir / f"notebooks/quickstart/{filename}", + file_path, + ) + else: + url = ( + "https://raw.githubusercontent.com/OpenMined/PySyft/dev/" + + f"notebooks/quickstart/{filename}" + ) + file_path = quickstart_download_notebook( + url=url, directory=directory, reset=reset + ) + return file_path + + +cli.add_command(quickstart) + + def ssh_into_remote_machine( host_ip: str, username: str, diff --git a/packages/hagrid/setup.py b/packages/hagrid/setup.py --- a/packages/hagrid/setup.py +++ b/packages/hagrid/setup.py @@ -21,6 +21,7 @@ "requests", "rich", "setuptools", + "virtualenv-api", ] if platform.system().lower() != "windows":
diff --git a/.github/workflows/hagrid-pr_tests.yml b/.github/workflows/hagrid-pr_tests.yml --- a/.github/workflows/hagrid-pr_tests.yml +++ b/.github/workflows/hagrid-pr_tests.yml @@ -170,3 +170,8 @@ jobs: if: steps.changes.outputs.hagrid == 'true' run: | hagrid debug + + - name: Run hagrid quickstart + if: steps.changes.outputs.hagrid == 'true' + run: | + hagrid quickstart --reset --quiet diff --git a/tests/course/tests/lesson_4_node_maintenance_test.py b/tests/course/tests/lesson_4_node_maintenance_test.py --- a/tests/course/tests/lesson_4_node_maintenance_test.py +++ b/tests/course/tests/lesson_4_node_maintenance_test.py @@ -17,10 +17,13 @@ def test_login(tb): # Check if login messages were printed assert tb.cell_output_text(4) is not None # Check if users are present in the domain node - assert tb.ref("list(domain_node.users.pandas()['email'].values)") == [ + expected_emails = [ "[email protected]", "[email protected]", ] + emails = tb.ref("list(domain_node.users.pandas()['email'].values)") + assert set(emails) == set(expected_emails) + # Check if data scientist client is initialized assert tb.ref("data_scientist_node") is not None assert tb.ref("data_scientist_node.version") is not None
Hagrid command ## Feature Description `hagrid quickstart` or similar command that does all setup and works as a beautiful quickstart command for first-timer and beginners as suggested by @madhavajay. ## Is your feature request related to a problem? This would work perfectly for first-timers as well as the planned "Quickstart" section in the readme. ## Additional Context I would love to co-work on this with you and would be incorporating a GIF out of this for our README
2022-07-15T19:55:37
OpenMined/PySyft
6,686
OpenMined__PySyft-6686
[ "6677" ]
d541f8909d02841dd9c0bee1b53ebcde1dc7dd1a
diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -13,12 +13,14 @@ from typing import Any from typing import Callable from typing import Dict as TypeDict +from typing import List from typing import List as TypeList from typing import Optional from typing import Tuple from typing import Tuple as TypeTuple from typing import Union from typing import cast +from urllib.parse import urlparse # third party import click @@ -2499,6 +2501,20 @@ def version() -> None: is_flag=True, help="CI Test Mode, don't hang on Jupyter", ) [email protected]( + "--repo", + default=arg_cache.repo, + help="Choose a repo to fetch the notebook from or just use OpenMined/PySyft", +) [email protected]( + "--branch", + default=DEFAULT_BRANCH, + help="Choose a branch to fetch from or just use dev", +) [email protected]( + "--commit", + help="Choose a specific commit to fetch the notebook from", +) def quickstart_cli( url: Optional[str] = None, syft: str = "latest", @@ -2506,6 +2522,9 @@ def quickstart_cli( quiet: bool = False, pre: bool = False, test: bool = False, + repo: str = arg_cache.repo, + branch: str = DEFAULT_BRANCH, + commit: Optional[str] = None, python: Optional[str] = None, ) -> None: try: @@ -2532,9 +2551,32 @@ def quickstart_cli( ) if url: - file_path, _ = quickstart_download_notebook( - url=url, directory=directory, reset=reset - ) + allowed_schemes_as_url = ["http", "https"] + url_scheme = urlparse(url).scheme + + if url_scheme not in allowed_schemes_as_url: + notebooks = get_urls_from_dir( + repo=repo, branch=branch, commit=commit, url=url + ) + overwrite_all_notebooks = False + if not reset: + overwrite_all_notebooks = click.confirm( + text=f"You have {len(notebooks)} conflicting notebooks. Would you like to overwrite them all?", + default=False, + ) + + for notebook_url in notebooks: + file_path, _ = quickstart_download_notebook( + url=notebook_url, + directory=directory + "/" + url + "/", + reset=reset, + overwrite_all_notebooks=overwrite_all_notebooks, + ) + + else: + file_path, _ = quickstart_download_notebook( + url=url, directory=directory, reset=reset + ) else: file_path = add_intro_notebook(directory=directory, reset=reset) @@ -2562,6 +2604,12 @@ def quickstart_cli( sys.exit(1) print(f"Jupyter exists at: {jupyter_path}. CI Test mode exiting.") sys.exit(0) + + disable_toolbar_extension = f"{jupyter_binary} labextension disable @jupyterlab/cell-toolbar-extension" + + subprocess.run( # nosec + disable_toolbar_extension.split(" "), cwd=directory, env=environ + ) proc = subprocess.Popen( # nosec cmd.split(" "), cwd=directory, @@ -2631,6 +2679,53 @@ def quickstart_setup( raise e +def get_urls_from_dir( + repo: str, + branch: str, + commit: Optional[str], + url: str, +) -> List[str]: + notebooks = [] + if commit is not None: + gh_api_call = ( + "https://api.github.com/repos/" + + repo + + "/git/trees/" + + commit + + "?recursive=1" + ) + else: + gh_api_call = ( + "https://api.github.com/repos/" + + repo + + "/git/trees/" + + branch + + "?recursive=1" + ) + r = requests.get(gh_api_call) + if r.status_code != 200: + print( + f"Failed to fetch notebook from: {gh_api_call}.\nPlease try again with the correct parameters!" + ) + sys.exit(1) + + res = r.json() + + for file in res["tree"]: + if file["path"].startswith("notebooks/quickstart/" + url): + if file["path"].endswith(".ipynb"): + temp_url = ( + "https://raw.githubusercontent.com/" + + repo + + "/" + + branch + + "/" + + file["path"] + ) + notebooks.append(temp_url) + return notebooks + + def add_intro_notebook(directory: str, reset: bool = False) -> str: files = os.listdir(directory) try: diff --git a/packages/hagrid/hagrid/quickstart_ui.py b/packages/hagrid/hagrid/quickstart_ui.py --- a/packages/hagrid/hagrid/quickstart_ui.py +++ b/packages/hagrid/hagrid/quickstart_ui.py @@ -15,13 +15,15 @@ def quickstart_download_notebook( - url: str, directory: str, reset: bool = False + url: str, directory: str, reset: bool = False, overwrite_all_notebooks: bool = False ) -> Tuple[str, bool]: os.makedirs(directory, exist_ok=True) file_name = os.path.basename(url).replace("%20", "_") file_path = os.path.abspath(directory + file_name) file_exists = os.path.isfile(file_path) + if overwrite_all_notebooks: + reset = True if file_exists and not reset: reset = click.confirm(
GSOD - Finish first two DO Notebooks ## Description Awesome work on `quickstart` and the first two notebooks. I think we should finish these up end to end and then once theyre mergable we can plan out the next few. ### Notebooks - [x] Finish DO Create Domain and Upload Flow - [x] rename files to be 00, 01, use hyphens and lowercase, remove redundant text like Data Owner - [x] Create breadcrum and link top of Notebook to Install Wizard - [x] Remove utils import from Upload notebook - [x] Update README.md with links to published docs - [x] Update HAGrid to download files and folders based off dev - [x] Add relative path downloads to quickstart - [x] Deploy notebook should use port 8081 or 9082 (just conventions we use in other places) - [x] Add --silent to hagrid launch cell - [x] Change hagrid check cell to use localhost:8081 (or 9082) - [x] Change login to use no url but to include port=8081 (or 9082) - [x] Link Notebooks in Sequence at the bottom of each tutorial - [x] Move `hagrid quickstart` command to the top right of RST Docs ### HAGrid ``` $ hagrid quickstart data-owner # downloads all files in dev branch data-owner # and launches the 0th notebook $ hagrid quickstart data-owner/00-deploy-domain # downloads just that notebook ``` - [x] Quickstart download overwrite prompt should include (all option) (y/n/a) - [x] Add repo and branch/tag override to quickstart ``` $ hagrid quickstart data-owner/00-deploy-domain --repo=madhavajay/PySyft --branch=mybranch $ hagrid quickstart data-owner/00-deploy-domain --repo=madhavajay/PySyft --commit=c75d98c3543954bfc8d1386205fbba1855f72b95 # downloads from those repo and or commit / branches where the default is `openmined/PySyft` and `dev` ``` - [x] hagrid disable jupyter extension on install ``` $ jupyter labextension disable @jupyterlab/cell-toolbar-extension ``` ### Linting and Testing - [ ] Make sure the notebooks run - [x] Keep an eye out for formatting of code in cells - [x] remove excess spacing on headings - [x] Use normal font size for main text and custom headings - [x] Proof read spelling and punctuation - [x] Check for consistency
@abhiwalia15 and @ShubhamPalriwala can you comment on this issue so I can assign you both? Hey @madhavajay, I would like to start with the Notebooks. Please assign this task to me; thanks!! Yep, on it with @abhiwalia15 > Hey @madhavajay, I would like to start with the Notebooks. Please assign this task to me; thanks!! Hello @madhavajay, can you please assign this to me so I can tickmark the tasks I am completing?
2022-08-25T09:49:40
OpenMined/PySyft
6,751
OpenMined__PySyft-6751
[ "6724" ]
868dceed4f08a74c5d568d854b4df7348ab04954
diff --git a/packages/hagrid/hagrid/deps.py b/packages/hagrid/hagrid/deps.py --- a/packages/hagrid/hagrid/deps.py +++ b/packages/hagrid/hagrid/deps.py @@ -750,7 +750,7 @@ def hagrid_update_available( "A new release of HAGrid is available: " + f"{str(current_version)} -> {str(new_version)}." ), - command=f"pip install hagrid=={new_version}", + command=f"pip install -U hagrid=={new_version}", solution="You can upgrade HAGrid with pip.", )
Update hagrid install README instructions We should change `pip install hagrid` to `pip install -U hagrid` to ensure users get an updated version at all times. Check the repo for areas where this is written and replace them.
2022-09-14T09:03:01
OpenMined/PySyft
6,752
OpenMined__PySyft-6752
[ "6736" ]
835535f3f846963acb49dcb200d475f32b0f10fa
diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -42,8 +42,6 @@ from .deps import allowed_hosts from .deps import check_docker_version from .deps import gather_debug -from .deps import gitpod_url -from .deps import is_gitpod from .deps import is_windows from .exceptions import MissingDependency from .grammar import BadGrammar @@ -61,7 +59,9 @@ from .lib import docker_desktop_memory from .lib import generate_process_status_table from .lib import generate_user_table +from .lib import gitpod_url from .lib import hagrid_root +from .lib import is_gitpod from .lib import name_tag from .lib import save_vm_details_as_json from .lib import update_repo @@ -2372,6 +2372,7 @@ def debug(args: TypeTuple[str], **kwargs: TypeDict[str, Any]) -> None: cli.add_command(debug) + DEFAULT_HEALTH_CHECKS = ["host", "UI (βeta)", "api", "ssh", "jupyter"] HEALTH_CHECK_FUNCTIONS = { "host": check_host, @@ -2422,6 +2423,16 @@ def get_health_checks(ip_address: str) -> TypeTuple[bool, TypeList[TypeList[str] health_status = check_host_health(ip_address=ip_address, keys=keys) complete_status = all(health_status.values()) + # find port from ip_address + try: + port = int(ip_address.split(":")[1]) + except Exception: + # default to 80 + port = 80 + + # url to display based on running environment + display_url = gitpod_url(port).split("//")[1] if is_gitpod() else ip_address + # figure out how to add this back? # console.print("[bold magenta]Checking host:[/bold magenta]", ip_address, ":mage:") table_contents = [] @@ -2430,7 +2441,7 @@ def get_health_checks(ip_address: str) -> TypeTuple[bool, TypeList[TypeList[str] [ HEALTH_CHECK_ICONS[key], key, - HEALTH_CHECK_URLS[key].replace("{ip_address}", ip_address), + HEALTH_CHECK_URLS[key].replace("{ip_address}", display_url), icon_status(value), ] ) @@ -2443,7 +2454,7 @@ def create_check_table( ) -> rich.table.Table: table = rich.table.Table() table.add_column("PyGrid", style="magenta") - table.add_column("Info", justify="left") + table.add_column("Info", justify="left", overflow="fold") time_left_str = "" if time_left == 0 else str(time_left) table.add_column(time_left_str, justify="left") for row in table_contents: diff --git a/packages/hagrid/hagrid/deps.py b/packages/hagrid/hagrid/deps.py --- a/packages/hagrid/hagrid/deps.py +++ b/packages/hagrid/hagrid/deps.py @@ -400,17 +400,6 @@ def is_apple_silicon() -> bool: ENVIRONMENT["apple_silicon"] = is_apple_silicon() -def is_gitpod() -> bool: - return bool(os.environ.get("GITPOD_WORKSPACE_URL", None)) - - -def gitpod_url(port: Optional[int] = None) -> str: - workspace_url = os.environ.get("GITPOD_WORKSPACE_URL", "") - if port: - workspace_url = workspace_url.replace("https://", f"https://{port}-") - return workspace_url - - def is_windows() -> bool: if "platform" in ENVIRONMENT and ENVIRONMENT["platform"].lower() == "windows": return True diff --git a/packages/hagrid/hagrid/lib.py b/packages/hagrid/hagrid/lib.py --- a/packages/hagrid/hagrid/lib.py +++ b/packages/hagrid/hagrid/lib.py @@ -80,6 +80,17 @@ def check_is_git(path: Path) -> bool: return is_repo +def is_gitpod() -> bool: + return bool(os.environ.get("GITPOD_WORKSPACE_URL", None)) + + +def gitpod_url(port: Optional[int] = None) -> str: + workspace_url = os.environ.get("GITPOD_WORKSPACE_URL", "") + if port: + workspace_url = workspace_url.replace("https://", f"https://{port}-") + return workspace_url + + def get_git_repo() -> git.Repo: is_git = check_is_git(path=repo_src_path()) if not EDITABLE_MODE and not is_git:
Change HAGrid check to support Gitpod hagrid check needs to detect gitpod and output a gitpod friendly url. This works in the quickstart jupyter output so just copy the same code which tests for gitpod and calculates the correct url.
2022-09-14T15:55:27
OpenMined/PySyft
6,757
OpenMined__PySyft-6757
[ "6731" ]
a79716b94710923a28d80572f252aafe90ab7028
diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -2311,9 +2311,15 @@ def create_land_docker_cmd(verb: GrammarVerb) -> str: is_flag=True, help="Optional: prevent lots of land output", ) [email protected]( + "--force", + is_flag=True, + help="Optional: bypass the prompt during hagrid land ", +) def land(args: TypeTuple[str], **kwargs: TypeDict[str, Any]) -> None: verb = get_land_verb() silent = bool(kwargs["silent"]) if "silent" in kwargs else False + force = bool(kwargs["force"]) if "force" in kwargs else False try: grammar = parse_grammar(args=args, verb=verb) verb.load_grammar(grammar=grammar) @@ -2331,28 +2337,41 @@ def land(args: TypeTuple[str], **kwargs: TypeDict[str, Any]) -> None: except Exception as e: print(f"{e}") return - if not silent: - print("Running: \n", hide_password(cmd=cmd)) - if "cmd" not in kwargs or str_to_bool(cast(str, kwargs["cmd"])) is False: - if not silent: - print("Running: \n", cmd) - try: - if silent: - process = subprocess.Popen( # nosec - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - cwd=GRID_SRC_PATH, - shell=True, - ) - process.communicate() - target = verb.get_named_term_grammar("node_name").input - print(f"HAGrid land {target} complete!") - else: - subprocess.call(cmd, shell=True, cwd=GRID_SRC_PATH) # nosec - except Exception as e: - print(f"Failed to run cmd: {cmd}. {e}") + target = verb.get_named_term_grammar("node_name").input + + if not force: + _land_domain = ask( + Question( + var_name="_land_domain", + question=f"Are you sure you want to land {target} (y/n)", + kind="yesno", + ), + kwargs={}, + ) + + if force or _land_domain == "y": + if "cmd" not in kwargs or str_to_bool(cast(str, kwargs["cmd"])) is False: + if not silent: + print("Running: \n", cmd) + try: + if silent: + process = subprocess.Popen( # nosec + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=GRID_SRC_PATH, + shell=True, + ) + process.communicate() + + print(f"HAGrid land {target} complete!") + else: + subprocess.call(cmd, shell=True, cwd=GRID_SRC_PATH) # nosec + except Exception as e: + print(f"Failed to run cmd: {cmd}. {e}") + else: + print("Hagrid land aborted.") cli.add_command(launch)
Change hagrid land to prompt before acting Make hagrid land ask the user if they are sure (y/n) and then ensure the install wizard final step is clear by saying, if you are done now you can go ahead and shutdown your domain, or if you would prefer to keep it running skip this step.
2022-09-15T08:35:11
OpenMined/PySyft
6,773
OpenMined__PySyft-6773
[ "6735" ]
7e859d9a12430e16aef7a744b70094e0793966d4
diff --git a/packages/hagrid/hagrid/__init__.py b/packages/hagrid/hagrid/__init__.py --- a/packages/hagrid/hagrid/__init__.py +++ b/packages/hagrid/hagrid/__init__.py @@ -5,6 +5,7 @@ from typing import Any # relative +from .cli import check_status as check # noqa: F401 from .quickstart_ui import QuickstartUI from .wizard_ui import WizardUI diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -2499,6 +2499,12 @@ def create_check_table( ) def check( ip_addresses: TypeList[str], wait: bool = False, silent: bool = False +) -> None: + check_status(ip_addresses=ip_addresses, wait=wait, silent=silent) + + +def check_status( + ip_addresses: TypeList[str], wait: bool = False, silent: bool = False ) -> None: console = rich.get_console() if len(ip_addresses) == 0:
Create python + html output version of hagrid check Allow hagrid check to be runnable via python rather than CLI and make the output nice and formatted with HTML the way quickstart / install wizard works. Once this works remove the `!hagrid check` terminal command from the install wizard.
2022-09-22T07:01:49
OpenMined/PySyft
6,846
OpenMined__PySyft-6846
[ "6835" ]
2bcaa62e1085745ee9037e913d31f4ac129e7b40
diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -932,7 +932,10 @@ def create_launch_cmd( f"\tWindows Help: https://docs.docker.com/desktop/windows/\n\n" f"Then re-run your hagrid command.\n\n" f"If you see this warning on Linux then something isn't right. " - f"Please file a Github Issue on PySyft's Github" + f"Please file a Github Issue on PySyft's Github.\n\n" + f"Alternatively in case no more memory could be allocated, " + f"you can run hagrid on the cloud with GitPod by visiting " + f"https://gitpod.io/#https://github.com/OpenMined/PySyft." ) if is_windows() and not DEPENDENCIES["wsl"]:
Add instructions to use GitPod as an alternative when docker memory requirements fail ## Description Add instructions to use GitPod as an alternative in the Exception message when the check for `docker_desktop_memory` fails.
2022-10-12T05:20:47
OpenMined/PySyft
6,858
OpenMined__PySyft-6858
[ "6820" ]
296a0ecf7a5531be3362050ca963e8f66bdc7e53
diff --git a/packages/hagrid/hagrid/deps.py b/packages/hagrid/hagrid/deps.py --- a/packages/hagrid/hagrid/deps.py +++ b/packages/hagrid/hagrid/deps.py @@ -103,7 +103,8 @@ class DependencySyftOS(Dependency): def check(self) -> None: self.display = "✅ " + ENVIRONMENT["os"] if is_windows(): - self.issues.append(windows_jaxlib()) + if not get_pip_package("jaxlib"): + self.issues.append(windows_jaxlib()) elif is_apple_silicon(): pass
Incorrect Jax Prompt in Windows. ## Description In the Install Wizard, during `wizard.check_syft`even if jax was installed on windows, the prompt is displayed to install jax again. The incorrect prompt should be rectified.
2022-10-14T04:15:30
OpenMined/PySyft
6,864
OpenMined__PySyft-6864
[ "6834" ]
78b2cf7362912b2946e42de3f5e4246015bba856
diff --git a/packages/hagrid/hagrid/art.py b/packages/hagrid/hagrid/art.py --- a/packages/hagrid/hagrid/art.py +++ b/packages/hagrid/hagrid/art.py @@ -8,9 +8,6 @@ import rich from rich.emoji import Emoji -# relative -from .lib import asset_path - def motorcycle() -> None: print( @@ -91,6 +88,9 @@ def hold_on_tight() -> None: def hagrid1() -> None: + # relative + from .lib import asset_path + try: ascii_magic.to_terminal( ascii_magic.from_image_file( @@ -102,6 +102,9 @@ def hagrid1() -> None: def hagrid2() -> None: + # relative + from .lib import asset_path + try: ascii_magic.to_terminal( ascii_magic.from_image_file( diff --git a/packages/hagrid/hagrid/lib.py b/packages/hagrid/hagrid/lib.py --- a/packages/hagrid/hagrid/lib.py +++ b/packages/hagrid/hagrid/lib.py @@ -18,6 +18,9 @@ # third party import git import requests +import rich +from rich import console +from rich import progress from rich.table import Table # relative @@ -26,6 +29,83 @@ from .mode import hagrid_root +class GitRemoteProgress(git.RemoteProgress): + # CREDITS: https://splunktool.com/python-progress-bar-for-git-clone + OP_CODES = [ + "BEGIN", + "CHECKING_OUT", + "COMPRESSING", + "COUNTING", + "END", + "FINDING_SOURCES", + "RECEIVING", + "RESOLVING", + "WRITING", + ] + OP_CODE_MAP = { + getattr(git.RemoteProgress, _op_code): _op_code for _op_code in OP_CODES + } + + def __init__(self) -> None: + super().__init__() + self.progressbar = progress.Progress( + progress.SpinnerColumn(), + # *progress.Progress.get_default_columns(), + progress.TextColumn("[progress.description]{task.description}"), + progress.BarColumn(), + progress.TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), + "eta", + progress.TimeRemainingColumn(), + progress.TextColumn("{task.fields[message]}"), + console=console.Console(), + transient=False, + ) + self.progressbar.start() + self.active_task = None + + def __del__(self) -> None: + # logger.info("Destroying bar...") + self.progressbar.stop() + + @classmethod + def get_curr_op(cls, op_code: int) -> str: + """Get OP name from OP code.""" + # Remove BEGIN- and END-flag and get op name + op_code_masked = op_code & cls.OP_MASK + return cls.OP_CODE_MAP.get(op_code_masked, "?").title() + + def update( + self, + op_code: int, + cur_count: Union[str, float], + max_count: Optional[Union[str, float]] = None, + message: Optional[str] = None, + ) -> None: + # Start new bar on each BEGIN-flag + if op_code & self.BEGIN: + self.curr_op = self.get_curr_op(op_code) + # logger.info("Next: %s", self.curr_op) + self.active_task = self.progressbar.add_task( + description=self.curr_op, + total=max_count, + message=message, + ) + + self.progressbar.update( + task_id=self.active_task, + completed=cur_count, + message=message, + ) + + # End progress monitoring on each END-flag + if op_code & self.END: + # logger.info("Done: %s", self.curr_op) + self.progressbar.update( + task_id=self.active_task, + message=f"[bright_black]{message}", + ) + + class ProcessStatus(Enum): RUNNING = "[blue]Running" DONE = "[green]Done" @@ -92,10 +172,17 @@ def gitpod_url(port: Optional[int] = None) -> str: def get_git_repo() -> git.Repo: + # relative + from .art import RichEmoji + + OK_EMOJI = RichEmoji("white_heavy_check_mark").to_str() + is_git = check_is_git(path=repo_src_path()) + console = rich.get_console() if not EDITABLE_MODE and not is_git: github_repo = "OpenMined/PySyft.git" git_url = f"https://github.com/{github_repo}" + print(f"Fetching Syft + Grid Source from {git_url} to {repo_src_path()}") try: repo_branch = DEFAULT_BRANCH @@ -105,23 +192,36 @@ def get_git_repo() -> git.Repo: shutil.rmtree(str(repo_path)) git.Repo.clone_from( - git_url, str(repo_path), single_branch=False, b=repo_branch + git_url, + str(repo_path), + single_branch=False, + b=repo_branch, + progress=GitRemoteProgress(), ) + console.print(f"{OK_EMOJI} Fetched PySyft repo.") except Exception as e: # nosec print(f"Failed to clone {git_url} to {repo_src_path()} with error: {e}") return git.Repo(repo_src_path()) def update_repo(repo: git.Repo, branch: str) -> None: + # relative + from .art import RichEmoji + + OK_EMOJI = RichEmoji("white_heavy_check_mark").to_str() + console = rich.get_console() if not EDITABLE_MODE: - print(f"Updating HAGrid from branch: {branch}") - try: - if repo.is_dirty(): - repo.git.reset("--hard") - repo.git.checkout(branch) - repo.remotes.origin.pull() - except Exception as e: - print(f"Error checking out branch {branch}.", e) + with console.status("Updating hagrid") as console_status: + + console_status.update(f"[bold blue]Updating HAGrid from branch: {branch}") + try: + if repo.is_dirty(): + repo.git.reset("--hard") + repo.git.checkout(branch) + repo.remotes.origin.pull() + console.print(f"{OK_EMOJI} Updated HAGrid from branch: {branch}") + except Exception as e: + print(f"Error checking out branch {branch}.", e) def commit_hash() -> str:
Missing progress status when we clone the repo on installing hagrid for first time ## Description We are missing the progress/process status when we clone the repo on installing hagrid the first time. Refer to: - `get_git_repo` -> Clone repo for the first time - `update_repo` -> Update repo when launching domain using a different branch We can either print progress info or use rich.Console to produce a process status animation (Refer to `console.status` in `quickstart_setup` in hagrid/cli.py. [Rich Documentation](https://rich.readthedocs.io/en/stable/console.html#status))
2022-10-17T06:32:38
OpenMined/PySyft
6,905
OpenMined__PySyft-6905
[ "6887" ]
38a057ad7d2be59de03166232b90af0eb1f0001b
diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -10,6 +10,7 @@ import subprocess # nosec import sys import tempfile +from threading import Event from threading import Thread import time from typing import Any @@ -28,6 +29,7 @@ import click import requests import rich +from rich.console import Console from rich.live import Live from virtualenvapi.manage import VirtualEnvironment @@ -2497,33 +2499,140 @@ def create_check_table( return table +def shell(command: str) -> str: + try: + output = subprocess.check_output( # nosec + command, shell=True, stderr=subprocess.STDOUT + ) + except Exception: + output = b"" + return output.decode("utf-8") + + +def get_host_name(container_name: str) -> str: + # Assumption we always get proxy containers first. + # if users have old docker compose versios. + # the container names are _ instead of - + # canada_proxy_1 instead of canada-proxy-1 + try: + host_name = container_name[0 : container_name.find("proxy") - 1] # noqa: E203 + except Exception: + host_name = "" + return host_name + + +def from_url(url: str) -> Tuple[str, str, int, str, Union[Any, str]]: + + try: + # urlparse doesnt handle no protocol properly + if "://" not in url: + url = "http://" + url + parts = urlparse(url) + host_or_ip_parts = parts.netloc.split(":") + # netloc is host:port + port = 80 + if len(host_or_ip_parts) > 1: + port = int(host_or_ip_parts[1]) + host_or_ip = host_or_ip_parts[0] + return ( + host_or_ip, + parts.path, + port, + parts.scheme, + getattr(parts, "query", ""), + ) + except Exception as e: + print(f"Failed to convert url: {url} to GridURL. {e}") + raise e + + +def get_docker_status(ip_address: str) -> Tuple[bool, Tuple[str, str]]: + proxy_containers = shell("docker ps --format '{{.Names}}' | grep 'proxy' ").split() + backend_containers = shell( + "docker ps --format '{{.Names}}' | grep 'backend' " + ).split() + + # to prevent importing syft, have duplicated the from_url code from GridURL + url = from_url(ip_address) + container_name = None + for container in proxy_containers: + ports = shell(f"docker port {container}") + if ports.count(str(url[2])): + container_name = container + break + + if not container_name: + return False, ("", "") + host_name = get_host_name(container_name) + + _backend_exists = False + for container in backend_containers: + if host_name in container and "stream" not in container: + _backend_exists = True + break + if not _backend_exists: + return False, ("", "") + + # Identifying Type of Node. + headscale_containers = shell( + "docker ps --format '{{.Names}}' | grep 'headscale' " + ).split() + + node_type = "Domain" + for container in headscale_containers: + if host_name in container: + node_type = "Network" + break + + return True, (host_name, node_type) + + +def get_syft_install_status(host_name: str) -> bool: + backend_containers = shell( + "docker ps --format '{{.Names}}' | grep 'backend' " + ).split() + backend_container = None + for container in backend_containers: + if host_name in container and "stream" not in container: + backend_container = container + break + if not backend_container: + print(f"❌ Backend Docker Stack for: {host_name} not found") + exit(0) + else: + backend_log = shell(f"docker logs {backend_container}") + if "Application startup complete" not in backend_log: + return False + return True + + @click.command(help="Check health of an IP address/addresses or a resource group") @click.argument("ip_addresses", type=str, nargs=-1) @click.option( - "--wait", - is_flag=True, - help="Optional: wait until checks pass", + "--timeout", + default=300, + help="Timeout for hagrid check command,Default: 300 seconds", ) @click.option( "--silent", - is_flag=True, - help="Optional: don't refresh output during wait", + default=True, + help="Optional: don't refresh output,Defaults True", ) def check( - ip_addresses: TypeList[str], wait: bool = False, silent: bool = False + ip_addresses: TypeList[str], silent: bool = True, timeout: Union[int, str] = 300 ) -> None: - check_status(ip_addresses=ip_addresses, wait=wait, silent=silent) + check_status(ip_addresses=ip_addresses, silent=silent, timeout=timeout) -def check_status( - ip_addresses: Union[str, TypeList[str]], wait: bool = False, silent: bool = False -) -> None: +signal = Event() + +def _check_status(ip_addresses: Union[str, TypeList[str]], silent: bool = True) -> None: + OK_EMOJI = RichEmoji("white_heavy_check_mark").to_str() # Check if ip_addresses is str, then convert to list if ip_addresses and isinstance(ip_addresses, str): ip_addresses = [ip_addresses] - - console = rich.get_console() + console = Console() if len(ip_addresses) == 0: headers = {"User-Agent": "curl/7.79.1"} @@ -2537,14 +2646,42 @@ def check_status( status, table_contents = get_health_checks(ip_address=ip_address) table = create_check_table(table_contents=table_contents) max_timeout = 600 - if wait and not status: + if not status: table = create_check_table( table_contents=table_contents, time_left=max_timeout ) if silent: - print("Checking...") - while not status: - if not silent: + with console.status("Gathering Node information") as console_status: + console_status.update( + "[bold orange_red1]Waiting for Docker Container Creation" + ) + docker_status, domain_info = get_docker_status(ip_address) + while not docker_status: + docker_status, domain_info = get_docker_status(ip_address) + time.sleep(1) + if signal.is_set(): + return + console.print( + f"{OK_EMOJI} {domain_info[0]} {domain_info[1]} Docker Containers Created." + ) + console_status.update("[bold orange_red1]Installing Syft") + syft_install_status = get_syft_install_status(domain_info[0]) + while not syft_install_status: + syft_install_status = get_syft_install_status(domain_info[0]) + time.sleep(1) + if signal.is_set(): + return + console.print(f"{OK_EMOJI} Syft") + console.print(f"{OK_EMOJI} Containers Startup Complete.") + + status, table_contents = get_health_checks(ip_address) + table = create_check_table( + table_contents=table_contents, time_left=max_timeout + ) + else: + while not status: + if signal.is_set(): + return with Live( table, refresh_per_second=2, screen=True, auto_refresh=False ) as live: @@ -2558,14 +2695,7 @@ def check_status( if status: break time.sleep(1) - else: - max_timeout -= 1 - if max_timeout % 5 == 0: - status, table_contents = get_health_checks(ip_address) - table = create_check_table( - table_contents=table_contents, time_left=max_timeout - ) - time.sleep(1) + console.print(table) else: for ip_address in ip_addresses: @@ -2574,6 +2704,32 @@ def check_status( console.print(table) +def check_status( + ip_addresses: Union[str, TypeList[str]], + silent: bool = True, + timeout: Union[int, str] = 300, +) -> None: + timeout = int(timeout) + # third party + from rich import print + + t = Thread( + target=_check_status, kwargs={"ip_addresses": ip_addresses, "silent": silent} + ) + t.start() + t.join(timeout=timeout) + + if t.is_alive(): + signal.set() + print(f"Hagrid Check command timed out after: {timeout} seconds 🕛 ") + print( + "You could try increasing the timeout or kindly check the docker containers for error logs." + ) + print("Viewing Docker Container Logs:") + print("Tool: [link=https://ctop.sh]Ctop[/link]") + print("Video Explanation: [link=https://youtu.be/BJhlCxerQP4]Video[/link]") + + cli.add_command(check)
hagrid.check(silent=True, wait=True) is too silent ## Description In the quickstart guide, when I run "hagrid.check(['localhost:8081'],silent=True,wait=True)" with a domain node not deployed, the user experience is an infinite hang. Nothing is technically broken, but we need to have at least some feedback. At a minimum... it should print a single "." every 5 seconds or so just so the user knows that the thread isn't frozen. Additionally, "wait=True" should be changed to "timeout=5" where "timeout=-1" will wait forever but by default timeout=60 or something. This is a better ux. ## How to Reproduce 1. Run step 6 from hagrid quickstart without a domain running. ## Screenshots <img width="713" alt="Screenshot 2022-10-24 at 17 58 59" src="https://user-images.githubusercontent.com/4328594/197583033-26c95794-326b-4ecf-9a11-622cbc8d0087.png">
Also - hagrid.check() should accept a string url or a list of strings. Requiring a list of strings (and only a list of strings) in a user-facing API is bad python API design given that most people are only ever going to pass in a list with one thing in it. It's one of those "Well that's kindof annoying" types of bugs noobs will run into.
2022-10-27T05:37:29
OpenMined/PySyft
6,908
OpenMined__PySyft-6908
[ "6899" ]
3dc60769654b3a650ba97a56579824033ad40cc5
diff --git a/packages/syft/src/syft/core/node/common/node_manager/redis_store.py b/packages/syft/src/syft/core/node/common/node_manager/redis_store.py --- a/packages/syft/src/syft/core/node/common/node_manager/redis_store.py +++ b/packages/syft/src/syft/core/node/common/node_manager/redis_store.py @@ -192,6 +192,16 @@ def set(self, key: StoreKey, value: StorableObject) -> None: local_session = sessionmaker(bind=self.db)() if create_metadata: local_session.add(metadata_obj) + else: + local_session.query(ObjectMetadata).filter_by(obj=key_str).update( + { + "tags": value.tags, + "description": value.description, + "read_permissions": metadata_obj.read_permissions, + "write_permissions": metadata_obj.write_permissions, + "search_permissions": metadata_obj.search_permissions, + } + ) local_session.commit() local_session.close() diff --git a/packages/syft/src/syft/core/node/common/node_service/object_request/object_request_service.py b/packages/syft/src/syft/core/node/common/node_service/object_request/object_request_service.py --- a/packages/syft/src/syft/core/node/common/node_service/object_request/object_request_service.py +++ b/packages/syft/src/syft/core/node/common/node_service/object_request/object_request_service.py @@ -614,7 +614,7 @@ def accept_or_deny_request( ) tmp_obj.read_permissions[ VerifyKey(_req.verify_key.encode("utf-8"), encoder=HexEncoder) - ] = _req.id + ] = _req.user_id node.store[UID.from_string(_req.object_id)] = tmp_obj # TODO: In the future we'll probably need to keep a request history
DS can not retrieve the results of their computations even after being approved. ## Description Data Scientists can not retrieve the result of their computations even after having their data access request approved by the Data Owner. ## How to Reproduce ``` import syft as sy do_client = sy.login( email="[email protected]", password="changethis", port=8081 ) # Create a new Data Scientist User do_client.users.create(name='DataScientist', email='[email protected]', password = 'DataScientist', budget=0, role = "Data Scientist" ) # login as a Data Scientist User ds_client = sy.login( email="[email protected]", password="DataScientist", port=8081 ) # Send a tensor using the Data Owner Session x_s = x.send(do_client, tags=["#X", "#diabetes"], description="My diabetes y label") # Get a pointer to the data using the Data Scientist Session ds_x_pointer = ds_client.store[0] # Perform Arbitrary computation result = ds_x_pointer + ds_x_pointer # Submit Data Access request as a Data Scientist result.request(reason="I'd like to see my result ...") # Approve Data Access Request as a Data Owner do_client.requests[0].approve() # Try to retrieve your result as a Data Scientist result.get_copy() ``` ## Expected Behavior We should be able to retrieve the values of our result if the data access request was approved. ## Additional Comments - This workflow is important for the not fully automated use cases where we don't use Differential Privacy and the Data Owner still needs to inspect data access requests. - It's important to add some tests to keep tracking this workflow in future changes as well. - Special thanks to @mikaelapisani for discovering and reporting it.
2022-10-27T14:14:58
OpenMined/PySyft
6,980
OpenMined__PySyft-6980
[ "6942" ]
461c826dbb9c2ee57d193c4708b625a01a934d77
diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -32,7 +32,10 @@ import rich from rich.console import Console from rich.live import Live +from rich.progress import BarColumn from rich.progress import Progress +from rich.progress import SpinnerColumn +from rich.progress import TextColumn from virtualenvapi.manage import VirtualEnvironment # relative @@ -395,14 +398,14 @@ def check_pulling(line: str, cmd_name: str, progress_bar: Progress) -> None: if "Pulling" in line and "fs layer" not in line: progress_bar.update( 0, - description=f"⌛ [bold]{cmd_name} [{task.completed} / {task.total+1}]", + description=f" [bold]{cmd_name} [{task.completed} / {task.total+1}]", total=task.total + 1, refresh=True, ) if "Pulled" in line: progress_bar.update( 0, - description=f"⌛ [bold]{cmd_name} [{task.completed + 1} / {task.total}]", + description=f" [bold]{cmd_name} [{task.completed + 1} / {task.total}]", completed=task.completed + 1, refresh=True, ) @@ -427,14 +430,14 @@ def check_building(line: str, cmd_name: str, progress_bar: Progress) -> None: if load_pattern.match(line): progress_bar.update( 0, - description=f"⌛ [bold]{cmd_name} [{task.completed} / {task.total +1}]", + description=f" [bold]{cmd_name} [{task.completed} / {task.total +1}]", total=task.total + 1, refresh=True, ) if build_pattern.match(line): progress_bar.update( 0, - description=f"⌛ [bold]{cmd_name} [{task.completed+1} / {task.total}]", + description=f" [bold]{cmd_name} [{task.completed+1} / {task.total}]", completed=task.completed + 1, refresh=True, ) @@ -452,14 +455,14 @@ def check_launching(line: str, cmd_name: str, progress_bar: Progress) -> None: if "Starting" in line: progress_bar.update( 0, - description=f"⌛ [bold]{cmd_name} [{task.completed} / {task.total+1}]", + description=f" [bold]{cmd_name} [{task.completed} / {task.total+1}]", total=task.total + 1, refresh=True, ) if "Started" in line: progress_bar.update( 0, - description=f"⌛ [bold]{cmd_name} [{task.completed + 1} / {task.total}]", + description=f" [bold]{cmd_name} [{task.completed + 1} / {task.total}]", completed=task.completed + 1, refresh=True, ) @@ -610,7 +613,14 @@ def execute_commands( if isinstance(cmds, dict): console.print("[bold green]⠋[bold blue] Launching Docker Images [/bold blue]\t") for cmd_name, cmd in cmds.items(): - with Progress(console=console, auto_refresh=False) as progress: + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TextColumn("[progress.percentage]{task.percentage:.2f}% "), + console=console, + auto_refresh=True, + ) as progress: if silent: progress.add_task( f"[bold green]{cmd_name} Images",
UX: Spinner Animation to Hagrid Launch ## Feature Description Currently when silent flag is provided during launching of node through hagrid, progress bars are displayed to show - `Pull` - `Build` - `Launch` of docker containers.When we initially try to launch in a new system without any cached packages these steps take a `lot of time.` Since we cannot provide time estimation in the progress bars, we could replace the ⌛ emoji by a spinner animation of rich library. Current loading part: ![Screenshot 2022-11-03 at 9 45 39 AM](https://user-images.githubusercontent.com/43314053/199647158-c9a8f188-9194-4787-881d-05548973a8af.png) Spinner animation example: https://www.npmjs.com/package/cli-spinners Reference to solve the issue. https://rich.readthedocs.io/en/stable/progress.html We could a spinner column to the rich progress in folder `packages/hagrid/hagrid/cli.py` in `execute_commands` functions https://github.com/OpenMined/PySyft/blob/5de360b9fc4cff81dda76bd0a35af7c0167554c4/packages/hagrid/hagrid/cli.py#L597 This is mainly to show new users that the thread is not frozen and it is still working as it takes time.
Hi @rasswanth-s, I'm new to the PySyft codebase but I have some familiarity with using rich progress bars. I would like to work on this issue to get started here. To add the spinner, I should change the code below: https://github.com/OpenMined/PySyft/blob/5de360b9fc4cff81dda76bd0a35af7c0167554c4/packages/hagrid/hagrid/cli.py#L613 to this: ```python3 with Progress( SpinnerColumn(), TextColumn("[progress.description]{task.description}"), console=console, auto_refresh=False, ) as progress: ``` Is this correct or am I missing something? Sure , Go for it 🚀 We mainly need the spinner animation instead of clock gif when doing hagrid launch You could follow the pysyft main readme, to install hagrid and launch quickstart which inturn would help to launch a domain, which would show if the spinner animation works. Comment on the thread , if there were any blockers.
2022-11-06T13:45:30
OpenMined/PySyft
7,008
OpenMined__PySyft-7008
[ "6956" ]
f8ac36639da08ffd2e94bb18cb5789fef7d7d7fd
diff --git a/packages/syft/src/syft/core/adp/vectorized_publish.py b/packages/syft/src/syft/core/adp/vectorized_publish.py --- a/packages/syft/src/syft/core/adp/vectorized_publish.py +++ b/packages/syft/src/syft/core/adp/vectorized_publish.py @@ -129,6 +129,8 @@ def publish( # if we dont return below we will terminate if the tensor gets replaced with zeros prev_tensor = None + print("VALUE: ", value) + print("Tensor: ", tensor.child) while can_reduce_further(value=value, zeros_like=zeros_like): if prev_tensor is None: @@ -155,7 +157,20 @@ def publish( # Step 2: Calculate the epsilon spend for this query # rdp_constant = all terms in Theorem. 2.7 or 2.8 of https://arxiv.org/abs/2008.11193 EXCEPT alpha - rdp_constants = compute_rdp_constant(rdp_params, private=private) + if any(np.isnan(l2_norms)): + if any(np.isnan(l2_norm_bounds)) or any(np.isinf(l2_norm_bounds)): + raise Exception( + "NaN or Inf values in bounds not allowed in PySyft for safety reasons." + "Please contact the OpenMined support team for help." + "\nFor that you can either:" + "\n * describe your issue on our Slack #support channel. To join: https://openmined.slack.com/" + "\n * send us an email describing your problem at [email protected]" + "\n * leave us an issue here: https://github.com/OpenMined/PySyft/issues" + ) + rdp_constants = compute_rdp_constant(rdp_params, private=False) + else: + rdp_constants = compute_rdp_constant(rdp_params, private=private) + print("Rdp constants", rdp_constants) if any(rdp_constants < 0): raise Exception( "Negative budget spend not allowed in PySyft for safety reasons." @@ -165,14 +180,6 @@ def publish( " * send us an email describing your problem at [email protected]" " * leave us an issue here: https://github.com/OpenMined/PySyft/issues" ) - if any(np.isnan(rdp_constants)) or any(np.isinf(rdp_constants)): - raise Exception( - "Invalid privacy budget spend. Please contact the OpenMined support team for help." - "For that you can either:" - " * describe your issue on our Slack #support channel. To join: https://openmined.slack.com/" - " * send us an email describing your problem at [email protected]" - " * leave us an issue here: https://github.com/OpenMined/PySyft/issues" - ) all_epsilons = ledger._get_epsilon_spend( rdp_constants ) # This is the epsilon spend for ALL data subjects @@ -180,10 +187,10 @@ def publish( raise Exception( "Negative budget spend not allowed in PySyft for safety reasons." "Please contact the OpenMined support team for help." - "For that you can either:" - " * describe your issue on our Slack #support channel. To join: https://openmined.slack.com/" - " * send us an email describing your problem at [email protected]" - " * leave us an issue here: https://github.com/OpenMined/PySyft/issues" + "\nFor that you can either:" + "\n * describe your issue on our Slack #support channel. To join: https://openmined.slack.com/" + "\n * send us an email describing your problem at [email protected]" + "\n * leave us an issue here: https://github.com/OpenMined/PySyft/issues" ) epsilon_spend = max( @@ -205,6 +212,8 @@ def publish( # Step 3: Check if the user has enough privacy budget for this query privacy_budget = get_budget_for_user(verify_key=ledger.user_key) + print(privacy_budget) + print(epsilon_spend) has_budget = epsilon_spend <= privacy_budget # if we see the same budget and spend twice in a row we have failed to reduce it diff --git a/packages/syft/src/syft/core/tensor/lazy_repeat_array.py b/packages/syft/src/syft/core/tensor/lazy_repeat_array.py --- a/packages/syft/src/syft/core/tensor/lazy_repeat_array.py +++ b/packages/syft/src/syft/core/tensor/lazy_repeat_array.py @@ -430,6 +430,27 @@ def clip(self, *args: Any, **kwargs: Any) -> lazyrepeatarray: return lazyrepeatarray(data=res, shape=res.shape) +def has_nans_inf(min_val: lazyrepeatarray, max_val: lazyrepeatarray) -> bool: + """Helper function that detects if a LRA has NaNs or Inf, and raises exceptions. + This is so that we can raise Exceptions at the pointer level.""" + raise_exception = False + if min_val.data.size == 1: + if np.isnan(min_val.data) or np.isinf(min_val.data): + raise_exception = True + else: + if np.isnan(min_val.data).any() or np.isnan(min_val.data).any(): + raise_exception = True + + if max_val.data.size == 1: + if np.isnan(max_val.data) or np.isinf(max_val.data): + raise_exception = True + else: + if np.isnan(max_val.data).any() or np.isinf(max_val.data).any(): + raise_exception = True + + return raise_exception + + # As the min and max values calculation is the same regardless of the tensor type, # We centralize this method as baseline for calculation for min/max values def compute_min_max( @@ -802,4 +823,21 @@ def compute_min_max( else: raise ValueError(f"Invaid Operation for LazyRepeatArray: {op_str}") - return (min_vals, max_vals) + if has_nans_inf(min_vals, max_vals): + raise Exception( + "I'm sorry, but our DP Privacy Accountant can't yet handle NaNs or Infinite values." + "This was likely caused by dividing by zero. Please find a way to approximate your " + "computation without dividing by a private value which might be zero. " + "\n" + "This can usually be done by computing the function piecewise- by performing three " + "computations which are merged through masking & summing; one which addresses what happens" + "if the denominator is positive-definite, another if it is negative-definite, and finally" + "one which addresses what happens if the denominator is 0 exactly. Use comparison operators," + "masking, and summing to accomplish this (not if statements- which don't work on private" + "values)." + "\n" + "Again, we apologize for the inconvenience and are working to extend support to this in " + "future versions of PySyft." + ) + + return min_vals, max_vals
diff --git a/packages/syft/tests/syft/core/tensor/lazy_repeat_array_test.py b/packages/syft/tests/syft/core/tensor/lazy_repeat_array_test.py --- a/packages/syft/tests/syft/core/tensor/lazy_repeat_array_test.py +++ b/packages/syft/tests/syft/core/tensor/lazy_repeat_array_test.py @@ -4,6 +4,7 @@ # syft absolute import syft as sy +from syft.core.tensor.lazy_repeat_array import has_nans_inf from syft.core.tensor.lazy_repeat_array import lazyrepeatarray @@ -94,3 +95,27 @@ def test_sum() -> None: lazyarray = lazyrepeatarray(data=np.array([1]), shape=array.shape) assert lazyarray.sum(axis=None).data == array.sum(axis=None) + + +def test_nans() -> None: + shape = (5, 5) + good_minv = lazyrepeatarray(1, shape) + bad_minv = lazyrepeatarray(np.nan, shape) + good_maxv = lazyrepeatarray(1000, shape) + bad_maxv = lazyrepeatarray(np.nan, shape) + assert has_nans_inf(min_val=good_minv, max_val=good_maxv) is False + assert has_nans_inf(min_val=good_minv, max_val=bad_maxv) is True + assert has_nans_inf(min_val=bad_minv, max_val=good_maxv) is True + assert has_nans_inf(min_val=bad_minv, max_val=bad_maxv) is True + + +def test_infs() -> None: + shape = (5, 5) + good_minv = lazyrepeatarray(1, shape) + bad_minv = lazyrepeatarray(np.inf, shape) + good_maxv = lazyrepeatarray(1000, shape) + bad_maxv = lazyrepeatarray(np.inf, shape) + assert has_nans_inf(min_val=good_minv, max_val=good_maxv) is False + assert has_nans_inf(min_val=good_minv, max_val=bad_maxv) is True + assert has_nans_inf(min_val=bad_minv, max_val=good_maxv) is True + assert has_nans_inf(min_val=bad_minv, max_val=bad_maxv) is True
Test if we can handle `NaN` values in the publish system without revealing private data Currently, in our test suite, we don't test with `NaN` values which can be a problem as we expect to spend `NaN` privacy budget when publishing arrays with at least one unknown value. Co-authored by @IshanMi
Given the issues that `NaN` causes im inclined to mark this as a `bug` even though its more pre-emptive. I tested this and indeed the RDP constants become `NaN` and you can leak a whole array with a single `NaN` value. I inspected the code for `compute_rdp_constant` in `data_subject_ledger.py` and I was thinking that maybe when we detect `NaN` values (or `Inf` values) we can call computer_rdp_constant with private=False to use the bonds when computing the budget spent. @IshanMi what do you think about this solution? If there's a NaN or Inf value in our array, my gut feeling says there's a good chance the min_val or max_val might also be NaN or Inf so private=False might also fail Perhaps we can fall back on `private=False`, and throw an exception if `min_vals.data` or `max_vals.data` is a NaN or Inf
2022-11-09T15:05:41
OpenMined/PySyft
7,010
OpenMined__PySyft-7010
[ "6930" ]
b0168f9b0e05dc707be2a795a91224ee20ec61cb
diff --git a/packages/syft/src/syft/core/adp/data_subject_list.py b/packages/syft/src/syft/core/adp/data_subject_list.py --- a/packages/syft/src/syft/core/adp/data_subject_list.py +++ b/packages/syft/src/syft/core/adp/data_subject_list.py @@ -623,7 +623,7 @@ def from_objs(input_subjects: Union[np.ndarray, list]) -> ArrayLike: # per data point, we should make sure that we implement in such a way we expand # the datasubjects automatically for row to data point mapping. if not isinstance(input_subjects, np.ndarray): - input_subjects = np.array(input_subjects) + input_subjects = np.array(input_subjects, dtype=DataSubjectArray) data_map = ( lambda x: DataSubjectArray([str(x)]) diff --git a/packages/syft/src/syft/core/tensor/ancestors.py b/packages/syft/src/syft/core/tensor/ancestors.py --- a/packages/syft/src/syft/core/tensor/ancestors.py +++ b/packages/syft/src/syft/core/tensor/ancestors.py @@ -435,10 +435,9 @@ def _private( min_val, max_val, target_shape=self.child.shape ) - unique_data_subjects = len(data_subjects.sum()) - if unique_data_subjects == 1: + if any(len(x.item()) > 1 for x in np.nditer(data_subjects, flags=["refs_ok"])): self.replace_abstraction_top( - tensor_type=_PhiTensor(), + tensor_type=_GammaTensor(), child=self.child, min_vals=min_vals, max_vals=max_vals, @@ -446,7 +445,7 @@ def _private( ) # type: ignore else: self.replace_abstraction_top( - tensor_type=_GammaTensor(), + tensor_type=_PhiTensor(), child=self.child, min_vals=min_vals, max_vals=max_vals,
diff --git a/packages/syft/tests/syft/core/tensor/private_test.py b/packages/syft/tests/syft/core/tensor/private_test.py --- a/packages/syft/tests/syft/core/tensor/private_test.py +++ b/packages/syft/tests/syft/core/tensor/private_test.py @@ -4,6 +4,7 @@ # syft absolute import syft as sy +from syft.core.adp.data_subject_list import DataSubjectArray from syft.core.tensor.autodp.gamma_tensor import GammaTensor as GT from syft.core.tensor.autodp.phi_tensor import PhiTensor as PT from syft.core.tensor.lazy_repeat_array import lazyrepeatarray as lra @@ -163,11 +164,38 @@ def test_2d_array(tensor: Tensor, low: int, high: int) -> None: assert len(private.child.data_subjects.sum()) == 1 +def test_phi(tensor: Tensor, low: int, high: int) -> None: + data_subjects = np.random.choice(["Optimus Prime", "Bumblebee"], (5, 5)) + # Make sure there's at least one of "Optimus Prime" and "Bumblebee" to prevent + # the 1/2^24 chance of failure + data_subjects[0, 0] = "Optimus Prime" + data_subjects[4, 4] = "Bumblebee" + + private = tensor.private( + min_val=low, + max_val=high, + data_subjects=data_subjects, + ) + assert isinstance(private, Tensor) + assert isinstance(private.child, PT) + assert isinstance(private.child.min_vals, lra) + assert isinstance(private.child.max_vals, lra) + assert private.child.min_vals.shape == private.child.shape + assert private.child.max_vals.shape == private.child.shape + assert isinstance(private.child.data_subjects, np.ndarray) + assert private.child.data_subjects.shape == private.child.shape + assert len(private.child.data_subjects.sum()) == 2 + + def test_gamma(tensor: Tensor, low: int, high: int) -> None: + data_subjects = np.random.choice(["Optimus Prime", "Bumblebee"], (5, 5)).tolist() + data_subjects = [[DataSubjectArray([x]) for x in row] for row in data_subjects] + data_subjects[0][0] = DataSubjectArray(["Optimus Prime", "Bumblebee"]) + private = tensor.private( min_val=low, max_val=high, - data_subjects=np.random.choice(["Optimus Prime", "Bumblebee"], (5, 5)), + data_subjects=data_subjects, ) assert isinstance(private, Tensor) assert isinstance(private.child, GT)
GammaTensor created incorrectly when calling .private() The heuristic we use to check whether a tensor is a PhiTensor or a GammaTensor when calling `.private()` or `.annotate_with_dp_metadata` is whether len(data_subjects.sum()) > 1 This will raise false positives- GammaTensors have >1 data subject per value in the Tensor, but this check would also create a GammaTensor if data_subjects had 1 data subject per value, but they were all different.
2022-11-09T15:30:29
OpenMined/PySyft
7,011
OpenMined__PySyft-7011
[ "6971" ]
2e84528723aa1c805c05a31c1da4059b828d6e7c
diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -1,4 +1,5 @@ # stdlib +from collections import namedtuple import json import os from pathlib import Path @@ -183,7 +184,7 @@ def clean(location: str) -> None: ) @click.option( "--tail", - default="true", + default="false", required=False, type=str, help="Optional: don't tail logs on launch", @@ -285,7 +286,7 @@ def clean(location: str) -> None: ) @click.option( "--tag", - default=None, + default="latest", required=False, type=str, help="Optional: container image tag to use", @@ -388,6 +389,13 @@ def launch(args: TypeTuple[str], **kwargs: Any) -> None: port = match_port.group().replace("HTTP_PORT=", "") check_status("localhost" + ":" + port) + node_name = verb.get_named_term_type(name="node_name").raw_input + rich.get_console().print( + rich.panel.Panel.fit( + f"🚨🚨🚨 To view container logs run [bold red] hagrid logs {node_name} [/bold red]\t" + ) + ) + except Exception as e: print(f"Error: {e}\n\n") return @@ -3511,3 +3519,77 @@ def ssh(ip_address: str, cmd: str) -> None: cli.add_command(ssh) + + +# Add hagrid logs command to the CLI [email protected](help="Get the logs of the HAGrid node") [email protected]("domain_name", type=str) +def logs(domain_name: str) -> None: # nosec + + container_ids = ( + subprocess.check_output( # nosec + f"docker ps -qf name=^{domain_name}-*", shell=True + ) + .decode("utf-8") + .split() + ) + Container = namedtuple("Container", "id name logs") + container_names = [] + for container in container_ids: + container_name = ( + subprocess.check_output( # nosec + "docker inspect --format '{{.Name}}' " + container, shell=True + ) + .decode("utf-8") + .strip() + .replace("/", "") + ) + log_command = "docker logs -f " + container_name + container_names.append( + Container(id=container, name=container_name, logs=log_command) + ) + # Generate a table of the containers and their logs with Rich + table = rich.table.Table(title="Container Logs") + table.add_column("Container ID", justify="center", style="cyan", no_wrap=True) + table.add_column("Container Name", justify="right", style="cyan", no_wrap=True) + table.add_column("Log Command", justify="right", style="cyan", no_wrap=True) + for container in container_names: # type: ignore + table.add_row(container.id, container.name, container.logs) # type: ignore + console = rich.console.Console() + console.print(table) + # Print instructions on how to view the logs + console.print( + rich.panel.Panel( + long_string, + title="How to view logs", + border_style="white", + expand=False, + padding=1, + highlight=True, + ) + ) + + +long_string = ( + "ℹ [bold green]To view the live logs of a container,copy the log command and paste it into your terminal.[/bold green]\n" # noqa: E501 + + "\n" + + "ℹ [bold green]The logs will be streamed to your terminal until you exit the command.[/bold green]\n" + + "\n" + + "ℹ [bold green]To exit the logs, press CTRL+C.[/bold green]\n" + + "\n" + + "🚨 The [bold white]backend,backend_stream & celery[/bold white] [bold green]containers are the most important to monitor for debugging.[/bold green]\n" # noqa: E501 + + "\n" + + " [bold white]--------------- Ctop 🦾 -------------------------[/bold white]\n" + + "\n" + + "🧠 To learn about using [bold white]ctop[/bold white] to monitor your containers,visit https://www.youtube.com/watch?v=BJhlCxerQP4n \n" # noqa: E501 + + "\n" + + " [bold white]----------------- How to view this. 🙂 ---------------[/bold white]\n" + + "\n" + + """ℹ [bold green]To view this panel again, run the command [bold white]hagrid logs {{DOMAIN_NAME}}[/bold white] [/bold green]\n""" # noqa: E501 + + "\n" + + """🚨 DOMAIN_NAME above is the name of your Hagrid deployment,without the curly braces. E.g hagrid logs canada [bold green]\n""" # noqa: E501 + + "\n" + + " [bold green]HAPPY DEBUGGING! 🐛🐞🦗🦟🦠🦠🦠[/bold green]\n " +) + +cli.add_command(logs)
"hagrid launch --tail=False" should be default + a table that lists containers and the commands to see their live logs The new user experience when --tail=False is incredible and by far the best experience for most users (everyone except devs?), especially since there's a definitive "you're fully launched' moment, as opposed to just watching logs until it stops. --tail=false should be the default experience except when you're in dev mode. That said - finding logs is hard and scary to docker noobs and they're going to experience bugs. I think it would be great if we added a table to the final output of hagrid launch --tail-False which listed every container with a single command for how to see the logs. (and a highlight for where "if you get errors they'll be here or here" pointing somehow towards the streaming and backend containers. STRETCH: link to Rasswanth's video for finding logs in case people need it.
I agree we should default to `tail=false`. To provide more access to information we should build `hagrid status` which will show all the known nodes and allow a user to explore them further, either by checking logs or running commands like, land, update, backup, etc. A small step in that direction would be a `hagrid logs` command which could provide a quick way to access the logs of different containers. In the meantime some helpful print statements is likely the best short term solution.
2022-11-09T23:23:25
OpenMined/PySyft
7,024
OpenMined__PySyft-7024
[ "6933" ]
0f7ca2e483ded8624cca32e260d4fbbc6ccaa259
diff --git a/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py b/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py --- a/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py +++ b/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py @@ -425,7 +425,7 @@ def _apply_op( Tuple[MPCTensor,Union[MPCTensor,int,float,np.ndarray]] : Result of the operation """ if isinstance(other, TensorWrappedPhiTensorPointer): - if (self.data_subjects != other.data_subjects).all(): # type: ignore + if np.array(self.data_subjects != other.data_subjects).all(): # type: ignore return getattr(self.gamma, op_str)(other.gamma) elif isinstance(other, TensorWrappedGammaTensorPointer): return getattr(self.gamma, op_str)(other) @@ -2744,7 +2744,7 @@ def __sub__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]: def __mul__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]: if isinstance(other, PhiTensor): - if (self.data_subjects == other.data_subjects).all(): + if np.array(self.data_subjects == other.data_subjects).all(): min_min = self.min_vals.data * other.min_vals.data min_max = self.min_vals.data * other.max_vals.data max_min = self.max_vals.data * other.min_vals.data @@ -2794,7 +2794,7 @@ def __mul__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]: def __truediv__(self, other: Any) -> Union[PhiTensor, GammaTensor]: if isinstance(other, PhiTensor): - if (self.data_subjects != other.data_subjects).all(): + if np.array(self.data_subjects != other.data_subjects).all(): return self.gamma / other.gamma else: min_min = self.min_vals.data / other.min_vals.data @@ -3250,7 +3250,7 @@ def concatenate( def __lt__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]: if isinstance(other, PhiTensor): - if (self.data_subjects == other.data_subjects).all(): + if np.array(self.data_subjects == other.data_subjects).all(): return PhiTensor( child=(self.child < other.child) * 1, data_subjects=self.data_subjects, @@ -3284,7 +3284,7 @@ def __le__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]: # if the tensor being compared is also private if isinstance(other, PhiTensor): - if (self.data_subjects == other.data_subjects).all(): + if np.array(self.data_subjects == other.data_subjects).all(): return PhiTensor( child=(self.child <= other.child) * 1, data_subjects=self.data_subjects, @@ -3318,7 +3318,7 @@ def __gt__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]: # if the tensor being compared is also private if isinstance(other, PhiTensor): - if (self.data_subjects == other.data_subjects).all(): + if np.array(self.data_subjects == other.data_subjects).all(): return PhiTensor( child=(self.child > other.child) * 1, data_subjects=self.data_subjects, @@ -3351,7 +3351,7 @@ def __ge__(self, other: SupportedChainType) -> Union[PhiTensor, GammaTensor]: # if the tensor being compared is also private if isinstance(other, PhiTensor): - if (self.data_subjects == other.data_subjects).all(): + if np.array(self.data_subjects == other.data_subjects).all(): return PhiTensor( child=(self.child >= other.child) * 1, data_subjects=self.data_subjects, @@ -3809,7 +3809,7 @@ def __floordiv__(self, other: Any) -> Union[PhiTensor, GammaTensor]: return self // value. """ if isinstance(other, PhiTensor): - if (self.data_subjects != other.data_subjects).all(): + if np.array(self.data_subjects != other.data_subjects).all(): return self.gamma // other.gamma else: min_min = self.min_vals.data // other.min_vals.data
Data subject checks fail when using the result of an operation like sum In several places in the codebase, we do the following check: `if (self.data_subjects == other.data_subjects).all():` If `self` and `other` is the result of an operation like sum, then this `self.data_subjects == other.data_subjects` returns a single Bool (since we're comparing two singletons) instead of an array of booleans (which has the attribute `all()`). This causes the check above to fail: ![Image](https://user-images.githubusercontent.com/32711264/199333586-613306b6-92d1-48f8-93c2-7087366438b3.png)
This will also cause failures for methods like __mod__, __truediv__, __eq__, etc Agreed. Since we are trying to support Tensors and Scalars in the same data types we should be more careful and perhaps introduce an `equality` check function we can call which checks the sizes and does the correct comparison in the event of a Scalar.
2022-11-11T11:35:37
OpenMined/PySyft
7,083
OpenMined__PySyft-7083
[ "6975" ]
876b2dbf9ac080f5ec5e921863acff29464c3cf1
diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -3006,9 +3006,7 @@ def check_status( ) print("You can view your container logs using the following tool:") print("Tool: [link=https://ctop.sh]Ctop[/link]") - print( - "Video Explanation: [link=https://youtu.be/BJhlCxerQP4]How to use Ctop[/link]\n" - ) + print("Video Explanation: https://youtu.be/BJhlCxerQP4 \n") cli.add_command(check)
I can't click "How to use Ctop" on OSX ![Image](https://user-images.githubusercontent.com/4328594/200077151-078762cd-2a84-48d0-9b90-c53226192d43.png)
Interestingly this works for me, but I do have to press and hold `command` button first which is normal for highlighting links in my terminal. It might be a good idea to just output the raw link though for maximum compatibility. <img width="391" alt="Screen Shot 2022-11-07 at 3 50 35 pm" src="https://user-images.githubusercontent.com/2882739/200235594-c5842073-6141-4f13-9041-5a2b788324c3.png"> @madhavajay I would like to work on this issue if no one else has already started. Replacing [this](https://github.com/OpenMined/PySyft/blob/d2bc2290b545c37bb0b6f0a0ad888530ae284405/packages/hagrid/hagrid/cli.py#L2981) print statement with the below one should solve the issue, right? ```python3 print("Video Explanation: https://youtu.be/BJhlCxerQP4\n") ``` @bipinKrishnan Thanks. I have assigned you, let me know if you have any questions. 🙂 Cool! I will start my work on this :rocket:
2022-11-18T14:53:27
OpenMined/PySyft
7,108
OpenMined__PySyft-7108
[ "7041" ]
a62b5eaed20194ac9762154060b73c21ce307aee
diff --git a/packages/syft/src/syft/core/tensor/autodp/gamma_tensor.py b/packages/syft/src/syft/core/tensor/autodp/gamma_tensor.py --- a/packages/syft/src/syft/core/tensor/autodp/gamma_tensor.py +++ b/packages/syft/src/syft/core/tensor/autodp/gamma_tensor.py @@ -377,6 +377,8 @@ def _apply_self_tensor_op(self, op_str: str, *args: Any, **kwargs: Any) -> Any: data_subjects = getattr(self.data_subjects, op_str)(*args, **kwargs) if op_str in INPLACE_OPS: data_subjects = self.data_subjects + elif op_str in ("ones_like", "zeros_like"): + data_subjects = self.data_subjects else: raise ValueError(f"Invalid Numpy Operation: {op_str} for DSA") @@ -1382,12 +1384,27 @@ def __getitem__( """ return self._apply_self_tensor_op("__getitem__", key) + def zeros_like( + self, + *args: Any, + **kwargs: Any, + ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]: + """Apply the "zeros_like" operation on "self" + + Args: + y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand. + + Returns: + Union[TensorWrappedGammaTensorPointer,MPCTensor] : Result of the operation. + """ + return self._apply_self_tensor_op("zeros_like", *args, **kwargs) + def ones_like( self, *args: Any, **kwargs: Any, ) -> Union[TensorWrappedGammaTensorPointer, MPCTensor]: - """Apply the "ones like" operation on self" + """Apply the "ones_like" operation on "self" Args: y (Union[TensorWrappedGammaTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand. @@ -1765,6 +1782,15 @@ def to_local_object_without_private_data_child(self) -> GammaTensor: ) +@implements(TensorWrappedGammaTensorPointer, np.zeros_like) +def zeros_like( + tensor: TensorWrappedGammaTensorPointer, + *args: Any, + **kwargs: Any, +) -> TensorWrappedGammaTensorPointer: + return tensor.zeros_like(*args, **kwargs) + + @implements(TensorWrappedGammaTensorPointer, np.ones_like) def ones_like( tensor: TensorWrappedGammaTensorPointer, diff --git a/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py b/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py --- a/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py +++ b/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py @@ -322,6 +322,8 @@ def _apply_self_tensor_op(self, op_str: str, *args: Any, **kwargs: Any) -> Any: data_subjects = getattr(self.data_subjects, op_str)(*args, **kwargs) if op_str in INPLACE_OPS: data_subjects = self.data_subjects + elif op_str in ("ones_like", "zeros_like"): + data_subjects = self.data_subjects else: raise ValueError(f"Invalid Numpy Operation: {op_str} for DSA") @@ -954,12 +956,27 @@ def __getitem__( """ return self._apply_self_tensor_op("__getitem__", key) + def zeros_like( + self, + *args: Any, + **kwargs: Any, + ) -> TensorWrappedPhiTensorPointer: + """Apply the "zeros_like" operation on "self" + + Args: + y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand. + + Returns: + Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation. + """ + return self._apply_self_tensor_op("zeros_like", *args, **kwargs) + def ones_like( self, *args: Any, **kwargs: Any, ) -> TensorWrappedPhiTensorPointer: - """Apply the "ones_like" operation between "self" and "other" + """Apply the "ones_like" operation on "self" Args: y (Union[TensorWrappedPhiTensorPointer,MPCTensor,int,float,np.ndarray]) : second operand. @@ -1692,6 +1709,15 @@ def reshape(self, *args: Any, **kwargs: Any) -> TensorWrappedPhiTensorPointer: return self._apply_self_tensor_op("reshape", *args, **kwargs) +@implements(TensorWrappedPhiTensorPointer, np.zeros_like) +def zeros_like( + tensor: TensorWrappedPhiTensorPointer, + *args: Any, + **kwargs: Any, +) -> TensorWrappedPhiTensorPointer: + return tensor.zeros_like(*args, **kwargs) + + @implements(TensorWrappedPhiTensorPointer, np.ones_like) def ones_like( tensor: TensorWrappedPhiTensorPointer, diff --git a/packages/syft/src/syft/core/tensor/lazy_repeat_array.py b/packages/syft/src/syft/core/tensor/lazy_repeat_array.py --- a/packages/syft/src/syft/core/tensor/lazy_repeat_array.py +++ b/packages/syft/src/syft/core/tensor/lazy_repeat_array.py @@ -536,6 +536,9 @@ def compute_min_max( dummy_res = np.empty(x_min_vals.shape).max(*args, **kwargs) min_vals = lazyrepeatarray(data=x_min_vals.data, shape=dummy_res.shape) max_vals = lazyrepeatarray(data=x_max_vals.data, shape=dummy_res.shape) + elif op_str == "zeros_like": + min_vals = x_min_vals.zeros_like(*args, **kwargs) + max_vals = x_max_vals.zeros_like(*args, **kwargs) elif op_str == "ones_like": min_vals = x_min_vals.ones_like(*args, **kwargs) max_vals = x_max_vals.ones_like(*args, **kwargs)
diff --git a/packages/syft/tests/syft/core/tensor/adp/gamma_tensor_test.py b/packages/syft/tests/syft/core/tensor/adp/gamma_tensor_test.py --- a/packages/syft/tests/syft/core/tensor/adp/gamma_tensor_test.py +++ b/packages/syft/tests/syft/core/tensor/adp/gamma_tensor_test.py @@ -157,6 +157,50 @@ def deduct_epsilon_for_user(*args: Any, **kwargs: Any) -> bool: print(ledger_store.kv_store) +def test_zeros_like( + reference_data: np.ndarray, + upper_bound: np.ndarray, + lower_bound: np.ndarray, + ishan: DataSubjectArray, +) -> None: + data_subjects = np.broadcast_to( + np.array(DataSubjectArray(["eagle", "potato"])), reference_data.shape + ) + reference_tensor = GammaTensor( + child=reference_data, + data_subjects=data_subjects, + max_vals=upper_bound, + min_vals=lower_bound, + ) + output = reference_tensor.zeros_like() + assert np.all(output.child == 0) + assert output.min_vals.shape == reference_tensor.min_vals.shape + assert output.max_vals.shape == reference_tensor.max_vals.shape + assert (output.data_subjects == reference_tensor.data_subjects).all() + + +def test_ones_like( + reference_data: np.ndarray, + upper_bound: np.ndarray, + lower_bound: np.ndarray, + ishan: DataSubjectArray, +) -> None: + data_subjects = np.broadcast_to( + np.array(DataSubjectArray(["eagle", "potato"])), reference_data.shape + ) + reference_tensor = GammaTensor( + child=reference_data, + data_subjects=data_subjects, + max_vals=upper_bound, + min_vals=lower_bound, + ) + output = reference_tensor.ones_like() + assert np.all(output.child == 1) + assert output.min_vals.shape == reference_tensor.min_vals.shape + assert output.max_vals.shape == reference_tensor.max_vals.shape + assert (output.data_subjects == reference_tensor.data_subjects).all() + + def test_sum( reference_data: np.ndarray, upper_bound: np.ndarray, diff --git a/packages/syft/tests/syft/core/tensor/adp/phi_tensor_test.py b/packages/syft/tests/syft/core/tensor/adp/phi_tensor_test.py --- a/packages/syft/tests/syft/core/tensor/adp/phi_tensor_test.py +++ b/packages/syft/tests/syft/core/tensor/adp/phi_tensor_test.py @@ -131,6 +131,46 @@ def test_eq( ).all(), "Equality between identical PTs fails" +def test_zeros_like( + reference_data: np.ndarray, + upper_bound: np.ndarray, + lower_bound: np.ndarray, + ishan: DataSubjectArray, +) -> None: + ishan = np.broadcast_to(ishan, reference_data.shape) + reference_tensor = PT( + child=reference_data, + data_subjects=ishan, + max_vals=upper_bound, + min_vals=lower_bound, + ) + output = reference_tensor.zeros_like() + assert np.all(output.child == 0) + assert output.min_vals.shape == reference_tensor.min_vals.shape + assert output.max_vals.shape == reference_tensor.max_vals.shape + assert (output.data_subjects == reference_tensor.data_subjects).all() + + +def test_ones_like( + reference_data: np.ndarray, + upper_bound: np.ndarray, + lower_bound: np.ndarray, + ishan: DataSubjectArray, +) -> None: + ishan = np.broadcast_to(ishan, reference_data.shape) + reference_tensor = PT( + child=reference_data, + data_subjects=ishan, + max_vals=upper_bound, + min_vals=lower_bound, + ) + output = reference_tensor.ones_like() + assert np.all(output.child == 1) + assert output.min_vals.shape == reference_tensor.min_vals.shape + assert output.max_vals.shape == reference_tensor.max_vals.shape + assert (output.data_subjects == reference_tensor.data_subjects).all() + + def test_add_wrong_types( reference_data: np.ndarray, upper_bound: np.ndarray,
`np.ones_like` doesn't work on data pointers ## Description `np.ones_like(d_ptr)` throws `ValueError: Invalid Numpy Operation: ones_like for DSA`. ## How to Reproduce ```py import syft as sy import numpy as np domain_client = sy.login(email="[email protected]", password="changethis", port=8081) arr = np.random.randint(0, 10000, (10000)) data = sy.Tensor(arr).annotate_with_dp_metadata(-1, 11000, data_subjects="Bob") data_ptr = data.send(domain_client, tags=["my_data"]) domain_client.create_user(name="DS", email="[email protected]", password="password", budget=99999) ds_client = sy.login(email="[email protected]", password="password", port=8081) d_ptr = ds_client.store[data_ptr.id_at_location] np.ones_like(d_ptr) ``` ```py --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In [7], line 1 ----> 1 np.ones_like(d_ptr) File <__array_function__ internals>:180, in ones_like(*args, **kwargs) File ~/pysyft/packages/syft/src/syft/core/tensor/passthrough.py:698, in PassthroughTensor.__array_function__(self, func, types, args, kwargs) 696 implementation = query_implementation(self.__class__, func) 697 if implementation: --> 698 return implementation(*args, **kwargs) 699 return self.__class__(func(*args, **kwargs)) File ~/pysyft/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py:1693, in ones_like(tensor, *args, **kwargs) 1687 @implements(TensorWrappedPhiTensorPointer, np.ones_like) 1688 def ones_like( 1689 tensor: TensorWrappedPhiTensorPointer, 1690 *args: Any, 1691 **kwargs: Any, 1692 ) -> TensorWrappedPhiTensorPointer: -> 1693 return tensor.ones_like(*args, **kwargs) File ~/pysyft/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py:962, in TensorWrappedPhiTensorPointer.ones_like(self, *args, **kwargs) 949 def ones_like( 950 self, 951 *args: Any, 952 **kwargs: Any, 953 ) -> TensorWrappedPhiTensorPointer: 954 """Apply the "ones_like" operation between "self" and "other" 955 956 Args: (...) 960 Union[TensorWrappedPhiTensorPointer,MPCTensor] : Result of the operation. 961 """ --> 962 return self._apply_self_tensor_op("ones_like", *args, **kwargs) File ~/pysyft/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py:318, in TensorWrappedPhiTensorPointer._apply_self_tensor_op(self, op_str, *args, **kwargs) 316 data_subjects = self.data_subjects 317 else: --> 318 raise ValueError(f"Invalid Numpy Operation: {op_str} for DSA") 320 result = TensorWrappedPhiTensorPointer( 321 data_subjects=data_subjects, 322 min_vals=min_vals, 323 max_vals=max_vals, 324 client=self.client, 325 ) 327 # QUESTION can the id_at_location be None? ValueError: Invalid Numpy Operation: ones_like for DSA ``` ## Expected Behavior A clear and concise description of what you expected to happen. ## Screenshots If applicable, add screenshots to help explain your problem. ## System Information - OS: [e.g. iOS] - OS Version: [e.g. 22] - Language Version: [e.g. Python 3.7, Node 10.18.1] - Package Manager Version: [e.g. Conda 4.6.1, NPM 6.14.1] - Browser (if applicable): [e.g. Google Chrome] - Browser Version (if applicable): [e.g. 81.0.4044.138] ## Additional Context Add any other context about the problem here.
Before fixing this we need to decide whether we're actually going to try to modify the numpy API or whether we should just make an imitation syft.numpy module where we define our own methods (like zeroes_like()) with no connection to actual numpy. If unsure we should do the latter for now because it's easier to build and easier to change later (and less likely to be buggy) @kiendang is this a duplicate of this issue: https://github.com/OpenMined/PySyft/issues/6922 I don't think so. `ones_like` was implemented but doesn't work. Maybe @rasswanth-s can confirm? Sorry I mean the `ufunc` support: https://github.com/OpenMined/PySyft/issues/7042 Could be related. What I understand is `ufunc` support for `np.ones_like` was supposed to work but broken? #7042 is discussing whether we support `ufunc` at all? So yes, these 2 issues are closely related.
2022-11-24T07:14:01
OpenMined/PySyft
7,110
OpenMined__PySyft-7110
[ "7109" ]
7bb5e9f8bdfad34e98f1e285b7fd8b04561f6e22
diff --git a/packages/syft/src/syft/core/tensor/tensor.py b/packages/syft/src/syft/core/tensor/tensor.py --- a/packages/syft/src/syft/core/tensor/tensor.py +++ b/packages/syft/src/syft/core/tensor/tensor.py @@ -517,8 +517,8 @@ def __init__( self.public_dtype = public_dtype print( - "Tensor created! You can activate Differential Privacy protection by calling .private() or \ - .annotate_with_dp_metadata()." + "Tensor created! You can activate Differential Privacy protection by calling .private() or " + ".annotate_with_dp_metadata()." ) def tag(self, name: str) -> Tensor:
Redundant white spaces in message on tensor creation ## Description <img width="1018" alt="Screenshot 2022-11-24 at 3 46 54 PM" src="https://user-images.githubusercontent.com/6521018/203723732-ecae0e8c-d724-4860-ad71-6d1d8a6e2caf.png"> ## How to Reproduce Create any `sy.Tensor` ## Expected Behavior A clear and concise description of what you expected to happen. ## Screenshots If applicable, add screenshots to help explain your problem. ## System Information - OS: [e.g. iOS] - OS Version: [e.g. 22] - Language Version: [e.g. Python 3.7, Node 10.18.1] - Package Manager Version: [e.g. Conda 4.6.1, NPM 6.14.1] - Browser (if applicable): [e.g. Google Chrome] - Browser Version (if applicable): [e.g. 81.0.4044.138] ## Additional Context Add any other context about the problem here.
2022-11-24T07:52:06
OpenMined/PySyft
7,111
OpenMined__PySyft-7111
[ "7042" ]
2ecfa816d46c30667787d6b75fb62bd566d19c48
diff --git a/packages/syft/src/syft/core/tensor/passthrough.py b/packages/syft/src/syft/core/tensor/passthrough.py --- a/packages/syft/src/syft/core/tensor/passthrough.py +++ b/packages/syft/src/syft/core/tensor/passthrough.py @@ -696,13 +696,20 @@ def __array_function__( implementation = query_implementation(self.__class__, func) if implementation: return implementation(*args, **kwargs) - return self.__class__(func(*args, **kwargs)) - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - implementation = query_implementation(self.__class__, ufunc) + method_name = func.__name__ + implementation = getattr(self.__class__, method_name, None) if implementation: - return implementation(*inputs, **kwargs) - return self.__class__(ufunc(*inputs, **kwargs)) + return ( + implementation(*args, **kwargs) + if callable(implementation) + else self.__getattribute__(method_name) + ) + + return NotImplemented + + # Set __array_ufunc_ = None for now until we can implement this properly + __array_ufunc__ = None def __repr__(self): return f"{self.__class__.__name__}(child={self.child})"
diff --git a/packages/syft/tests/syft/core/tensor/passthrough_test.py b/packages/syft/tests/syft/core/tensor/passthrough_test.py --- a/packages/syft/tests/syft/core/tensor/passthrough_test.py +++ b/packages/syft/tests/syft/core/tensor/passthrough_test.py @@ -8,6 +8,7 @@ # syft absolute from syft.core.tensor.passthrough import PassthroughTensor +from syft.core.tensor.tensor import Tensor from syft.core.tensor.util import implements @@ -933,6 +934,7 @@ def test__array_function__() -> None: assert result_c == expected_c.child [email protected] def test__array_ufunc__() -> None: data_a = np.array([1, 2, 3], dtype=np.int32) tensor_a = PtTensorSubclass(child=data_a, unit="Hedgehogs") @@ -964,6 +966,7 @@ def test_repr() -> None: assert type(result) == str [email protected] def test_square() -> None: data = np.array([[0, 1], [-2, 3]], dtype=np.int32) expected = np.array([[0, 1], [4, 9]], dtype=np.int32) @@ -972,3 +975,20 @@ def test_square() -> None: result = np.square(tensor_a) assert result == tensor_b + + +def test_unimplemented_array_func() -> None: + # test if unimplemented ops correctly return TypeError: no implementation found + tensor = Tensor(np.array([0, 1, 2, 3], dtype=np.int32)) + + # change to some op other than full_like if this ends up being implemented + with pytest.raises(TypeError, match="no implementation found"): + np.full_like(tensor, 7) + + +def test_unsupported_ufunc() -> None: + # we don't currently support ufunc + tensor = Tensor(np.array([0, 1, 2, 3], dtype=np.int32)) + + with pytest.raises(TypeError, match="does not support ufuncs"): + np.sin(tensor)
Calling unimplemented numpy methods on tensors causes infinite loop instead of throwing `NotImplemented` ## Description Calling unimplemented methods such as `np.zeros_like` or `np.full_like` on data pointers causes non terminating recursion instead of throwing `NotImplemented`. ## How to Reproduce create a data pointer ```py import syft as sy import numpy as np domain_client = sy.login(email="[email protected]", password="changethis", port=8081) arr = np.random.randint(0, 10000, (10000)) data = sy.Tensor(arr).annotate_with_dp_metadata(-1, 11000, data_subjects="Bob") data_ptr = data.send(domain_client, tags=["my_data"]) domain_client.create_user(name="DS", email="[email protected]", password="password", budget=99999) ds_client = sy.login(email="[email protected]", password="password", port=8081) d_ptr = ds_client.store[data_ptr.id_at_location] ``` call `np.zeros_like` which is not implemented ```py np.zeros_like(d_ptr) ``` ```py --------------------------------------------------------------------------- RecursionError Traceback (most recent call last) Cell In [8], line 1 ----> 1 np.zeros_like(d_ptr) File <__array_function__ internals>:180, in zeros_like(*args, **kwargs) File ~/pysyft/packages/syft/src/syft/core/tensor/passthrough.py:699, in PassthroughTensor.__array_function__(self, func, types, args, kwargs) 697 if implementation: 698 return implementation(*args, **kwargs) --> 699 return self.__class__(func(*args, **kwargs)) File <__array_function__ internals>:180, in zeros_like(*args, **kwargs) File ~/pysyft/packages/syft/src/syft/core/tensor/passthrough.py:699, in PassthroughTensor.__array_function__(self, func, types, args, kwargs) 697 if implementation: 698 return implementation(*args, **kwargs) --> 699 return self.__class__(func(*args, **kwargs)) [... skipping similar frames: zeros_like at line 180 (988 times), PassthroughTensor.__array_function__ at line 699 (987 times)] File ~/pysyft/packages/syft/src/syft/core/tensor/passthrough.py:699, in PassthroughTensor.__array_function__(self, func, types, args, kwargs) 697 if implementation: 698 return implementation(*args, **kwargs) --> 699 return self.__class__(func(*args, **kwargs)) File <__array_function__ internals>:180, in zeros_like(*args, **kwargs) File ~/pysyft/packages/syft/src/syft/core/tensor/passthrough.py:693, in PassthroughTensor.__array_function__(self, func, types, args, kwargs) 684 def __array_function__( 685 self, 686 func: Callable, (...) 691 # Note: this allows subclasses that don't override 692 # __array_function__ to handle PassthroughTensor objects. --> 693 if not all(issubclass(t, self.__class__) for t in types): 694 return NotImplemented 696 implementation = query_implementation(self.__class__, func) RecursionError: maximum recursion depth exceeded while calling a Python object ``` ## Expected Behavior A clear and concise description of what you expected to happen. ## Screenshots If applicable, add screenshots to help explain your problem. ## System Information - OS: [e.g. iOS] - OS Version: [e.g. 22] - Language Version: [e.g. Python 3.7, Node 10.18.1] - Package Manager Version: [e.g. Conda 4.6.1, NPM 6.14.1] - Browser (if applicable): [e.g. Google Chrome] - Browser Version (if applicable): [e.g. 81.0.4044.138] ## Additional Context Add any other context about the problem here.
Before fixing this we need to decide whether we're actually going to try to modify the numpy API or whether we should just make an imitation syft.numpy module where we define our own methods (like zeroes_like()) with no connection to actual numpy. If unsure we should do the latter for now because it's easier to build and easier to change later (and less likely to be buggy) Yeah doesnt supporting the `np` functional style require us to provide `__array_ufunc__` support? I believe we started this in `PassthroughTensor` but its probably incomplete and it was super confusing. Perhaps the easiest thing here is to raise `NotImplementedError` for now? ```python def __array_function__( self, func: Callable, types: List[Type], # what this means = List of Type(Type()) args: List[Any], kwargs: Dict[str, Any], ) -> PassthroughTensor: # Note: this allows subclasses that don't override # __array_function__ to handle PassthroughTensor objects. if not all(issubclass(t, self.__class__) for t in types): return NotImplemented implementation = query_implementation(self.__class__, func) if implementation: return implementation(*args, **kwargs) return self.__class__(func(*args, **kwargs)) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): implementation = query_implementation(self.__class__, ufunc) if implementation: return implementation(*inputs, **kwargs) return self.__class__(ufunc(*inputs, **kwargs)) ``` This feels like a combo of both these issues: 1) calling passthrough tensor methods on a pointer https://github.com/OpenMined/PySyft/issues/7043 2) triggering ufunc by passing pointers / tensor types to np.functional calls https://github.com/OpenMined/PySyft/issues/7042 Maybe something like the following: ``` def __getattribute__(self, attr_name: str) -> Any: if attr in VISIBLE_ATTRS: return object.__getattribute__(self, attr_name) raise ValueError(f"Attribute {attr_name} not found!") # Or Not Implemented ``` `VISIBLE_ATTRS` would have to be of string with all the `methods/attributes` we support @gmuraru this would require us to maintain a separate `VISIBLE_ATTRS`? The issue here is just a non terminating recursion, would detecting non implementation and throw NotImplemented early instead of continuing descending into `self.__class__(func(*args, **kwargs))` a cleaner solution? Yep, it would imply to keep that list with the implemented methods. There might be some other workarounds for this problem. We could also check `func` - it is implemented - but I think you would still need to have a list of implemented methods. Changed the title coz currently this is the issue with both tensors and pointers because they both subclass `PassthroughTensor` but the issue with pointers is being tracked at #6922.
2022-11-24T11:18:16
OpenMined/PySyft
7,123
OpenMined__PySyft-7123
[ "6922" ]
449cbe4862b006317d783e4d3597a901a478d0ad
diff --git a/packages/syft/src/syft/core/tensor/autodp/gamma_tensor.py b/packages/syft/src/syft/core/tensor/autodp/gamma_tensor.py --- a/packages/syft/src/syft/core/tensor/autodp/gamma_tensor.py +++ b/packages/syft/src/syft/core/tensor/autodp/gamma_tensor.py @@ -80,7 +80,7 @@ @serializable(recursive_serde=True) -class TensorWrappedGammaTensorPointer(Pointer, PassthroughTensor): +class TensorWrappedGammaTensorPointer(Pointer): __name__ = "TensorWrappedGammaTensorPointer" __module__ = "syft.core.tensor.autodp.gamma_tensor" __attr_allowlist__ = [ diff --git a/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py b/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py --- a/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py +++ b/packages/syft/src/syft/core/tensor/autodp/phi_tensor.py @@ -67,7 +67,7 @@ @serializable(recursive_serde=True) -class TensorWrappedPhiTensorPointer(Pointer, PassthroughTensor): +class TensorWrappedPhiTensorPointer(Pointer): __name__ = "TensorWrappedPhiTensorPointer" __module__ = "syft.core.tensor.autodp.phi_tensor" __attr_allowlist__ = [
Some operations are still implemented in PassthroughTensor and are in the Syft Tensor AST, even though they're not supported From the list of supported ops, you can see that some ops are not supported: argpartition, partition, cumprod, conj, etc Some of these are still implemented in PassthroughTensor and still are included in the Syft Tensor API located in `syft/core/tensor/__init__.py` and thus show up when calling `dir(TensorPointer)` ![Image](https://user-images.githubusercontent.com/32711264/199150919-9fe02d50-bcc3-4d4f-acf5-deecf921450a.png)
Anything that doesn't reliably work should be `commented` out for the 0.7 release. @IshanMi could this just be because the Pointer classes subclass from `PassthroughTensor`? ```python class TensorWrappedPhiTensorPointer(Pointer, PassthroughTensor): class TensorWrappedGammaTensorPointer(Pointer, PassthroughTensor): ``` There may need to be some cleanup here involving PassthroughTensor but it's as of yet unclear to me. The idea of PassthroughTensor was to be able to reduce the number of ops we had to implement by implementing some ops by using simpler ops. For example, sum() could be implemented as a series of __add__ operations at the pointer level. This should have alleviated the need for us to implement a sum() method for SMPC or DP as long as SMPC/DP had __add__ implemented. It'd probably be a bit slower, but would bring us to full API functionality faster. If, however,. there are ops which PassthroughTensor defines but doesn't have functionality for, we should comment them out. However, I think PassthroughTensor is still a good idea, particularly for giving us lots of functionality quickly. @iamtrask As I understand it PassthroughTensor is for the server side. Pointers are for the client side. Client side Pointers will never have a .child hence the issue. Ah - so I suppose PassthroughTensor is doing two things when it should really only do one. 1) if an op can be framed as merely a series of other tensor ops, implement it 2) if an op cannot be framed as a series of other ops, call self.child.op The main value of passthrough tensor is (1). That's the bit I'd like for us to keep. We can delete the parts of the api for (2). (I don't know if those deletions will break things... they might) > Ah - so I suppose PassthroughTensor is doing two things when it should really only do one. > > 1. if an op can be framed as merely a series of other tensor ops, implement it > 2. if an op cannot be framed as a series of other ops, call self.child.op > > The main value of passthrough tensor is (1). That's the bit I'd like for us to keep. We can delete the parts of the api for (2). (I don't know if those deletions will break things... they might) Does it make sense for a Pointer to implement its client side logic with calls to multiple actions as composition? All that does is send 3 sub actions and chain the results, rather than sending 1 action and having the server side do the composition. I think Pointers should have 0 knowledge of anything except "send this action", which is why they used to be derived directly. As I understand the goal of subclassing is to provide default behavior, but what is the default behavior for an unimplemented method? Looking at the code I think whats happening is because we have public data such as bounds and data subjects, its desirable that the generated local pointer from an operation mirrors the same behavior that would happen remotely. If this was implemented as a DP Pointer parent class then it could do something like: ```python def op(self, *args, *kwargs) -> Any: self.send_action(self.op.__name__, args, kwargs) min_vals = apply_op(op, self.min_vals, args, kwargs) max_vals = apply_op(op, self.max_vals, args, kwargs) data_subjects = apply_op(op, self.data_subjects, args, kwargs) return DPPointer.create(data_subjects.is_gamma, min_vals, max_vals, data_subjects) ``` Since the DP Tensor Pointer knows about its internal public bounds and datasubject attributes which require mutation and since DSA and bounds should be a `numpy`-like type ready to be called directly with the op. If min_vals, max_vals, or data_subject happen to inherit from PassthroughTensor then they can get all the free composition locally as well, but the Pointer shouldnt know about this stuff. Then the default behavior for any `op` on DPTensorPointer would be to send that op to the server and run it against its local public attributes, if we wanted to. Looking at `MPCTensor` it looks like that uses a seperate `TensorPointer` which does not inherit from `PassthroughTensor`. Without digging into it I guess theres probably some coupling and overlap particularly around the need for multiple return type signatures from methods which could result in either a DP or SMPC TensorPointer. I suggest we do two things here: 1) remove the bug by not subclassing from PassthroughTensor 2) separately look at our Pointer design and unify it around a set of constraints which allow for local public and remote private data to be executed of different types It's not ideal for Pointer to implement ops as multiple ops because it means we're sending more messages than necessary. It's a fine way to get to a lot of functionality fast but the ideal state is ti have that all on server side. Given that ideal state, it seems like a good decision for us to remove passthrough from pointer. Since SMPC functionality is so limited there's a part of me that wonders if leaving some of the methods that work by calling a series of other method on self would be worth it. But if we do a push on SMPC we can cross that bridge then.
2022-11-28T03:02:33
OpenMined/PySyft
7,125
OpenMined__PySyft-7125
[ "7121" ]
e052633dea48a8706a114324d087a7a3496574de
diff --git a/packages/syft/src/syft/core/adp/data_subject_list.py b/packages/syft/src/syft/core/adp/data_subject_list.py --- a/packages/syft/src/syft/core/adp/data_subject_list.py +++ b/packages/syft/src/syft/core/adp/data_subject_list.py @@ -630,16 +630,6 @@ def sqrt(self, *args: Any, **kwargs: Any) -> DataSubjectArray: def rint(self, *args: Any, **kwargs: Any) -> DataSubjectArray: return DataSubjectArray(self.data_subjects) - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs) -> ArrayLike: # type: ignore - method_name = ufunc.__name__ - method = getattr(self, method_name, None) - if method is not None: - return method(*inputs, **kwargs) - else: - raise NotImplementedError( - f"Method: {method_name} not implemented in DataSubjectArray" - ) - @staticmethod def from_objs(input_subjects: Union[np.ndarray, list]) -> ArrayLike: # TODO: When the user passes the data subjects they might pass it as list
Implementation of `__array_ufunc__` for `DataSubjectArray` only supports `__call__` According to https://numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__, implementation of `__array_ufunc__` should support different call `method`s: `__call__`, `reduce`, `reduceat`, `accumulate`, `outer`, `inner`. In our current implementation for `DataSubjectArray` we only support `__call__`. In other words, if users call `np.multiply.reduce` on `DataSubjectArray(["bob", "alice"])` it would be the same as calling `np.multiply`. https://github.com/OpenMined/PySyft/blob/291e506bec5de63e8fd96153efca9f1e31cc0e50/packages/syft/src/syft/core/adp/data_subject_list.py#L633-L641 See how we don't use the `method` argument passed to `__array_ufunc__` here? Even though I suspect supporting `reduce`, `reduceat`, `accumulate`, `outer`, `inner` doesn't make sense for `DataSubjectArray`, we should at least throw an error when users try to do that. This happens with our implementation of `__array_ufunc__` for `PassthroughTensor` too. In `PassthroughTensor`, supporting the other `method`s does make sense. That's why in #7111 I opted to just disable `__array_ufunc__` entirely for now. @rasswanth-s
2022-11-28T09:44:34
OpenMined/PySyft
7,127
OpenMined__PySyft-7127
[ "7117" ]
1aed011bda1cb3e15a6505d5b76b0896ca69c10d
diff --git a/packages/syft/src/syft/core/node/common/node_service/user_manager/new_user_messages.py b/packages/syft/src/syft/core/node/common/node_service/user_manager/new_user_messages.py --- a/packages/syft/src/syft/core/node/common/node_service/user_manager/new_user_messages.py +++ b/packages/syft/src/syft/core/node/common/node_service/user_manager/new_user_messages.py @@ -304,6 +304,7 @@ def run( # type: ignore or self.payload.name or self.payload.institution or self.payload.website + or self.payload.budget ) # Change own information @@ -311,7 +312,7 @@ def run( # type: ignore if not _valid_parameters: raise MissingRequestKeyError( - "Missing json fields ( email,password,role,groups, name )" + "Missing json fields (email, password, role, groups, name or budget)" ) if not _valid_user:
Budget update throwing 500 on UI ## Description Not able to update the privacy budget of a user from the UI. This was working in the previous beta versions. ## How to Reproduce 1. Log into the UI with admin credentials 2. Select a user 3. Click on adjust budget 4. update the budget value and click save ## Expected Behavior User budget is updated successfully. ## Screenshots ![Screenshot from 2022-11-25 16-41-03](https://user-images.githubusercontent.com/11032835/203979609-a59eabd4-e58c-49a1-9843-23170c7874e2.png) ## System Information - OS: [e.g. iOS] - OS Version: [e.g. 22] - Language Version: [e.g. Python 3.7, Node 10.18.1] - Package Manager Version: [e.g. Conda 4.6.1, NPM 6.14.1] - Browser (if applicable): [e.g. Google Chrome] - Browser Version (if applicable): [e.g. 81.0.4044.138] ## Additional Context Add any other context about the problem here.
@tcp do you know if this will be difficult or is it related to a common problem with PyGrid UI in 0.7? @madhavajay hopefully with https://github.com/OpenMined/PySyft/pull/7112 we'll get it over the hump
2022-11-29T10:49:51
OpenMined/PySyft
7,150
OpenMined__PySyft-7150
[ "7151" ]
faf5edbf154dd0f7f17c7a698ba551212d136def
diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -389,10 +389,10 @@ def launch(args: TypeTuple[str], **kwargs: Any) -> None: port = match_port.group().replace("HTTP_PORT=", "") check_status("localhost" + ":" + port) - node_name = verb.get_named_term_type(name="node_name").raw_input + node_name = verb.get_named_term_type(name="node_name").snake_input rich.get_console().print( rich.panel.Panel.fit( - f"🚨🚨🚨 To view container logs run [bold red] hagrid logs {node_name} [/bold red]\t" + f"✨ To view container logs run [bold green]hagrid logs {node_name}[/bold green]\t" ) ) diff --git a/packages/hagrid/hagrid/deps.py b/packages/hagrid/hagrid/deps.py --- a/packages/hagrid/hagrid/deps.py +++ b/packages/hagrid/hagrid/deps.py @@ -36,6 +36,7 @@ # relative from .exceptions import MissingDependency +from .lib import is_gitpod from .mode import EDITABLE_MODE from .nb_output import NBOutput from .version import __version__ @@ -546,7 +547,7 @@ def allowed_to_run_docker() -> Tuple[bool, str]: bool_result = True # Check if current user is member of docker group. - elif user not in "".join(line): + elif not is_gitpod() and user not in "".join(line): msg = f"""⚠️ User is not a member of docker group. {WHITE}You're currently not allowed to run docker, perform the following steps:\n 1 - Run \'{GREEN}sudo usermod -a -G docker $USER\'{WHITE} to add docker permissions.
hagrid launch shows hagrid logs None if a node name is not given ## Description If a node name is not specified in `hagrid launch`, i.e. `hagrid launch domain to docker:8081`, the message will read `To view container logs run hagrid logs None`. <img width="403" alt="Screenshot 2022-12-07 at 12 11 29 AM" src="https://user-images.githubusercontent.com/6521018/205967978-e68e44fe-7fa9-4306-9137-42c9ac08a025.png"> ## How to Reproduce 1. Go to '...' 2. Click on '...' 3. Scroll down to '...' 4. See error ## Expected Behavior A clear and concise description of what you expected to happen. ## Screenshots If applicable, add screenshots to help explain your problem. ## System Information - OS: [e.g. iOS] - OS Version: [e.g. 22] - Language Version: [e.g. Python 3.7, Node 10.18.1] - Package Manager Version: [e.g. Conda 4.6.1, NPM 6.14.1] - Browser (if applicable): [e.g. Google Chrome] - Browser Version (if applicable): [e.g. 81.0.4044.138] ## Additional Context Add any other context about the problem here.
2022-12-06T16:04:51
OpenMined/PySyft
7,168
OpenMined__PySyft-7168
[ "7141" ]
604dea9bac799bef4582297f69195dfcbfe7b8e9
diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -124,7 +124,8 @@ def cli() -> None: @click.command( - help="Restore some part of the hagrid installation or deployment to its initial/starting state." + help="Restore some part of the hagrid installation or deployment to its initial/starting state.", + context_settings={"show_default": True}, ) @click.argument("location", type=str, nargs=1) def clean(location: str) -> None: @@ -145,89 +146,82 @@ def clean(location: str) -> None: subprocess.call("docker rmi $(docker images -q)", shell=True) # nosec [email protected](help="Start a new PyGrid domain/network node!") [email protected]( + help="Start a new PyGrid domain/network node!", + context_settings={"show_default": True}, +) @click.argument("args", type=str, nargs=-1) @click.option( "--username", default=None, required=False, type=str, - help="Optional: the username for provisioning the remote host", + help="Username for provisioning the remote host", ) @click.option( "--key_path", default=None, required=False, type=str, - help="Optional: the path to the key file for provisioning the remote host", + help="Path to the key file for provisioning the remote host", ) @click.option( "--password", default=None, required=False, type=str, - help="Optional: the password for provisioning the remote host", + help="Password for provisioning the remote host", ) @click.option( "--repo", default=None, required=False, type=str, - help="Optional: repo to fetch source from", + help="Repo to fetch source from", ) @click.option( "--branch", default=None, required=False, type=str, - help="Optional: branch to monitor for updates", + help="Branch to monitor for updates", ) @click.option( "--tail", - default="false", - required=False, - type=str, - help="Optional: don't tail logs on launch", + is_flag=True, + help="Tail logs on launch", ) @click.option( "--headless", - default="false", - required=False, - type=str, - help="Optional: don't start the frontend container", + is_flag=True, + help="Start the frontend container", ) @click.option( "--cmd", - default="false", - required=False, - type=str, - help="Optional: print the cmd without running it", + is_flag=True, + help="Print the cmd without running it", ) @click.option( "--jupyter", is_flag=True, - help="Optional: enable Jupyter Notebooks", + help="Enable Jupyter Notebooks", ) @click.option( "--build", - default=None, - required=False, - type=str, - help="Optional: enable or disable forcing re-build", + is_flag=True, + help="Disable forcing re-build", ) @click.option( - "--provision", - default="true", - required=False, - type=str, - help="Optional: enable or disable provisioning VMs", + "--no_provision", + is_flag=True, + help="Disable provisioning VMs", ) @click.option( "--node_count", default=1, required=False, type=click.IntRange(1, 250), - help="Optional: number of independent nodes/VMs to launch", + help="Number of independent nodes/VMs to launch", ) @click.option( "--auth_type", @@ -247,89 +241,81 @@ def clean(location: str) -> None: default="production", required=False, type=click.Choice(["production", "development"], case_sensitive=False), - help="Optional: choose between production and development release", + help="Choose between production and development release", ) @click.option( "--cert_store_path", default="/home/om/certs", required=False, type=str, - help="Optional: remote path to store and load TLS cert and key", + help="Remote path to store and load TLS cert and key", ) @click.option( "--upload_tls_cert", default="", required=False, type=str, - help="Optional: local path to TLS cert to upload and store at --cert_store_path", + help="Local path to TLS cert to upload and store at --cert_store_path", ) @click.option( "--upload_tls_key", default="", required=False, type=str, - help="Optional: local path to TLS private key to upload and store at --cert_store_path", + help="Local path to TLS private key to upload and store at --cert_store_path", ) @click.option( - "--use_blob_storage", - default=None, - required=False, - type=str, - help="Optional: flag to use blob storage", + "--no_blob_storage", + is_flag=True, + help="Disable blob storage", ) @click.option( "--image_name", default=None, required=False, type=str, - help="Optional: image to use for the VM", + help="Image to use for the VM", ) @click.option( "--tag", default=None, required=False, type=str, - help="Optional: container image tag to use", + help="Container image tag to use", ) @click.option( "--build_src", default=DEFAULT_BRANCH, required=False, type=str, - help="Optional: git branch to use for launch / build operations", + help="Git branch to use for launch / build operations", ) @click.option( "--platform", default=None, required=False, type=str, - help="Optional: run docker with a different platform like linux/arm64", + help="Run docker with a different platform like linux/arm64", ) @click.option( - "--vpn", - default="true", - required=False, - type=str, - help="Optional: turn tailscale vpn container on or off", + "--no_vpn", + is_flag=True, + help="Disable tailscale vpn container", ) @click.option( "--silent", is_flag=True, - help="Optional: prevent lots of launch output", + help="Suppress extra launch outputs", ) @click.option( "--from_template", - default="false", - required=False, - type=str, - help="Optional: launch node using the manifest template", + is_flag=True, + help="Launch node using the manifest template", ) @click.option( - "--health_checks", - default="true", - required=False, - type=str, - help="Optional: turn on or off auto health checks post node launch", + "--no_health_checks", + is_flag=True, + help="Turn off auto health checks post node launch", ) def launch(args: TypeTuple[str], **kwargs: Any) -> None: verb = get_launch_verb() @@ -351,19 +337,15 @@ def launch(args: TypeTuple[str], **kwargs: Any) -> None: print(f"Error: {e}\n\n") return - dry_run = True - if "cmd" not in kwargs or str_to_bool(cast(str, kwargs["cmd"])) is False: - dry_run = False + dry_run = bool(kwargs["cmd"]) - health_checks = str_to_bool(cast(str, kwargs["health_checks"])) + health_checks = not bool(kwargs["no_health_checks"]) try: - tail = False if "tail" in kwargs and not str_to_bool(kwargs["tail"]) else True + tail = bool(kwargs["tail"]) silent = not tail - from_rendered_dir = ( - str_to_bool(cast(str, kwargs["from_template"])) and EDITABLE_MODE - ) + from_rendered_dir = bool(kwargs["from_template"]) and EDITABLE_MODE execute_commands( cmds, dry_run=dry_run, silent=silent, from_rendered_dir=from_rendered_dir @@ -1054,20 +1036,13 @@ def create_launch_cmd( host = host_term.host auth: Optional[AuthCredentials] = None - tail = True - if "tail" in kwargs and not str_to_bool(kwargs["tail"]): - tail = False + tail = bool(kwargs["tail"]) parsed_kwargs = {} - if "build" in kwargs and kwargs["build"] is not None: - parsed_kwargs["build"] = str_to_bool(cast(str, kwargs["build"])) - else: - parsed_kwargs["build"] = None + parsed_kwargs["build"] = bool(kwargs["build"]) - parsed_kwargs["use_blob_storage"] = ( - kwargs["use_blob_storage"] if "use_blob_storage" in kwargs else None - ) + parsed_kwargs["use_blob_storage"] = not bool(kwargs["no_blob_storage"]) parsed_kwargs["node_count"] = ( int(kwargs["node_count"]) if "node_count" in kwargs else 1 @@ -1079,19 +1054,15 @@ def create_launch_cmd( # Default to detached mode if running more than one nodes tail = False if parsed_kwargs["node_count"] > 1 else tail - headless = False - if "headless" in kwargs and str_to_bool(cast(str, kwargs["headless"])): - headless = True + headless = bool(kwargs["headless"]) parsed_kwargs["headless"] = headless - parsed_kwargs["tls"] = bool(kwargs["tls"]) if "tls" in kwargs else False - parsed_kwargs["test"] = bool(kwargs["test"]) if "test" in kwargs else False - parsed_kwargs["dev"] = bool(kwargs["dev"]) if "dev" in kwargs else False + parsed_kwargs["tls"] = bool(kwargs["tls"]) + parsed_kwargs["test"] = bool(kwargs["test"]) + parsed_kwargs["dev"] = bool(kwargs["dev"]) - parsed_kwargs["silent"] = bool(kwargs["silent"]) if "silent" in kwargs else False - parsed_kwargs["from_template"] = ( - str_to_bool(kwargs["from_template"]) if "from_template" in kwargs else False - ) + parsed_kwargs["silent"] = bool(kwargs["silent"]) + parsed_kwargs["from_template"] = bool(kwargs["from_template"]) parsed_kwargs["release"] = "production" if "release" in kwargs and kwargs["release"] != "production": @@ -1107,8 +1078,8 @@ def create_launch_cmd( parsed_kwargs["upload_tls_cert"] = kwargs["upload_tls_cert"] if "upload_tls_key" in kwargs: parsed_kwargs["upload_tls_key"] = kwargs["upload_tls_key"] - if "provision" in kwargs: - parsed_kwargs["provision"] = str_to_bool(cast(str, kwargs["provision"])) + + parsed_kwargs["provision"] = not bool(kwargs["no_provision"]) if "image_name" in kwargs and kwargs["image_name"] is not None: parsed_kwargs["image_name"] = kwargs["image_name"] @@ -1125,14 +1096,13 @@ def create_launch_cmd( else: parsed_kwargs["jupyter"] = False - if "vpn" in kwargs and kwargs["vpn"] is not None: - parsed_kwargs["vpn"] = str_to_bool(cast(str, kwargs["vpn"])) - else: - parsed_kwargs["vpn"] = True + parsed_kwargs["vpn"] = not bool(kwargs["no_vpn"]) # allows changing docker platform to other cpu architectures like arm64 parsed_kwargs["platform"] = kwargs["platform"] if "platform" in kwargs else None + parsed_kwargs["tail"] = tail + if parsed_kwargs["from_template"] and host is not None: # Setup the files from the manifest_template.yml kwargs = setup_from_manifest_template(host_type=host) @@ -1693,14 +1663,11 @@ def create_launch_docker_cmd( version_string = GRID_SRC_VERSION[0] version_string += "-dev" version_hash = GRID_SRC_VERSION[1] - if build is None: - build = True + build = True else: # whereas if in production mode and tag == "local" use the local VERSION file # or if its not set somehow, which should never happen, use stable # otherwise use the kwargs["tag"] from above - if build is None: - build = False # during production the default would be stable if version_string == "local": @@ -1725,11 +1692,9 @@ def create_launch_docker_cmd( print("\n") - use_blob_storage = "True" - if str(node_type.input) == "network": - use_blob_storage = "False" - elif "use_blob_storage" in kwargs and kwargs["use_blob_storage"] is not None: - use_blob_storage = str(str_to_bool(kwargs["use_blob_storage"])) + use_blob_storage = ( + False if str(node_type.input) == "network" else bool(kwargs["use_blob_storage"]) + ) envs = { "RELEASE": "production", @@ -1743,7 +1708,7 @@ def create_launch_docker_cmd( "TRAEFIK_PUBLIC_NETWORK_IS_EXTERNAL": "False", "VERSION": version_string, "VERSION_HASH": version_hash, - "USE_BLOB_STORAGE": use_blob_storage, + "USE_BLOB_STORAGE": str(use_blob_storage), "FRONTEND_TARGET": "grid-ui-production", "STACK_API_KEY": str( generate_sec_random_password(length=48, special_chars=False) @@ -1794,17 +1759,17 @@ def create_launch_docker_cmd( cmd += " docker compose -p " + snake_name - if "vpn" in kwargs and kwargs["vpn"]: + if bool(kwargs["vpn"]): cmd += " --profile vpn" if str(node_type.input) == "network": cmd += " --profile network" - if str_to_bool(use_blob_storage): + if use_blob_storage: cmd += " --profile blob-storage" # no frontend container so expect bad gateway on the / route - if kwargs["headless"] is False: + if not bool(kwargs["headless"]): cmd += " --profile frontend" # new docker compose regression work around @@ -2132,7 +2097,7 @@ def create_launch_gcp_cmd( if not host_up: raise Exception(f"Something went wrong launching the VM at IP: {host_ip}.") - if "provision" in kwargs and not kwargs["provision"]: + if not bool(kwargs["provision"]): print("Skipping automatic provisioning.") print("VM created with:") print(f"IP: {host_ip}") @@ -2282,7 +2247,7 @@ def create_launch_azure_cmd( host_term.parse_input(host_ip) verb.set_named_term_type(name="host", new_term=host_term) - if "provision" in kwargs and not kwargs["provision"]: + if not bool(kwargs["provision"]): print("Skipping automatic provisioning.") print("VM created with:") print(f"Name: {snake_name}") @@ -2579,14 +2544,15 @@ def create_land_docker_cmd(verb: GrammarVerb) -> str: return cmd [email protected](help="Stop a running PyGrid domain/network node.") [email protected]( + help="Stop a running PyGrid domain/network node.", + context_settings={"show_default": True}, +) @click.argument("args", type=str, nargs=-1) @click.option( "--cmd", - default="false", - required=False, - type=str, - help="Optional: print the cmd without running it", + is_flag=True, + help="Print the cmd without running it", ) @click.option( "--ansible_extras", @@ -2598,22 +2564,22 @@ def create_land_docker_cmd(verb: GrammarVerb) -> str: default=DEFAULT_BRANCH, required=False, type=str, - help="Optional: git branch to use for launch / build operations", + help="Git branch to use for launch / build operations", ) @click.option( "--silent", is_flag=True, - help="Optional: prevent lots of land output", + help="Suppress extra outputs", ) @click.option( "--force", is_flag=True, - help="Optional: bypass the prompt during hagrid land ", + help="Bypass the prompt during hagrid land", ) def land(args: TypeTuple[str], **kwargs: Any) -> None: verb = get_land_verb() - silent = bool(kwargs["silent"]) if "silent" in kwargs else False - force = bool(kwargs["force"]) if "force" in kwargs else False + silent = bool(kwargs["silent"]) + force = bool(kwargs["force"]) try: grammar = parse_grammar(args=args, verb=verb) verb.load_grammar(grammar=grammar) @@ -2645,7 +2611,7 @@ def land(args: TypeTuple[str], **kwargs: Any) -> None: ) if force or _land_domain == "y": - if "cmd" not in kwargs or str_to_bool(cast(str, kwargs["cmd"])) is False: + if not bool(kwargs["cmd"]): if not silent: print("Running: \n", cmd) try: @@ -2673,7 +2639,9 @@ def land(args: TypeTuple[str], **kwargs: Any) -> None: cli.add_command(clean) [email protected](help="Show HAGrid debug information") [email protected]( + help="Show HAGrid debug information", context_settings={"show_default": True} +) @click.argument("args", type=str, nargs=-1) def debug(args: TypeTuple[str], **kwargs: TypeDict[str, Any]) -> None: debug_info = gather_debug() @@ -2882,22 +2850,25 @@ def get_syft_install_status(host_name: str) -> bool: return True [email protected](help="Check health of an IP address/addresses or a resource group") [email protected]( + help="Check health of an IP address/addresses or a resource group", + context_settings={"show_default": True}, +) @click.argument("ip_addresses", type=str, nargs=-1) @click.option( "--timeout", default=300, - help="Timeout for hagrid check command,Default: 300 seconds", + help="Timeout for hagrid check command", ) @click.option( - "--silent", - default=True, - help="Optional: don't refresh output,Defaults True", + "--verbose", + is_flag=True, + help="Refresh output", ) def check( - ip_addresses: TypeList[str], silent: bool = True, timeout: Union[int, str] = 300 + ip_addresses: TypeList[str], verbose: bool = False, timeout: Union[int, str] = 300 ) -> None: - check_status(ip_addresses=ip_addresses, silent=silent, timeout=timeout) + check_status(ip_addresses=ip_addresses, silent=not verbose, timeout=timeout) def _check_status( @@ -3021,7 +2992,7 @@ def check_status( # add Hagrid info to the cli [email protected](help="Show HAGrid info") [email protected](help="Show HAGrid info", context_settings={"show_default": True}) def version() -> None: print(f"HAGRID_VERSION: {get_version_string()}") if EDITABLE_MODE: @@ -3189,12 +3160,14 @@ def enqueue_output(out: Any, queue: Queue) -> None: raise e [email protected](help="Launch a Syft + Jupyter Session with a Notebook URL / Path") [email protected]( + help="Launch a Syft + Jupyter Session with a Notebook URL / Path", + context_settings={"show_default": True}, +) @click.argument("url", type=str, required=False) @click.option( "--reset", is_flag=True, - show_default=True, default=False, help="Force hagrid quickstart to setup a fresh virtualenv", ) @@ -3206,15 +3179,11 @@ def enqueue_output(out: Any, queue: Queue) -> None: @click.option( "--quiet", is_flag=True, - show_default=True, - default=False, help="Silence confirmation prompts", ) @click.option( "--pre", is_flag=True, - show_default=True, - default=False, help="Install pre-release versions of syft", ) @click.option( @@ -3224,7 +3193,6 @@ def enqueue_output(out: Any, queue: Queue) -> None: ) @click.option( "--test", - default=False, is_flag=True, help="CI Test Mode, don't hang on Jupyter", ) @@ -3416,7 +3384,7 @@ def add_intro_notebook(directory: str, reset: bool = False) -> str: return os.path.abspath(f"{directory}/{filename}") [email protected](help="Walk the Path") [email protected](help="Walk the Path", context_settings={"show_default": True}) @click.option( "--repo", help="Obi-Wan will guide you to Dagobah", @@ -3468,7 +3436,10 @@ def ssh_into_remote_machine( raise e [email protected](help="SSH into the IP address or a resource group") [email protected]( + help="SSH into the IP address or a resource group", + context_settings={"show_default": True}, +) @click.argument("ip_address", type=str) @click.option( "--cmd", @@ -3530,7 +3501,9 @@ def ssh(ip_address: str, cmd: str) -> None: # Add hagrid logs command to the CLI [email protected](help="Get the logs of the HAGrid node") [email protected]( + help="Get the logs of the HAGrid node", context_settings={"show_default": True} +) @click.argument("domain_name", type=str) def logs(domain_name: str) -> None: # nosec
diff --git a/packages/hagrid/tests/hagrid/cli_test.py b/packages/hagrid/tests/hagrid/cli_test.py --- a/packages/hagrid/tests/hagrid/cli_test.py +++ b/packages/hagrid/tests/hagrid/cli_test.py @@ -1,4 +1,5 @@ # stdlib +from collections import defaultdict from typing import List from typing import Tuple @@ -18,7 +19,9 @@ def test_hagrid_launch() -> None: verb = cli.get_launch_verb() grammar = cli.parse_grammar(args=tuple(args), verb=verb) verb.load_grammar(grammar=grammar) - cmd = cli.create_launch_cmd(verb=verb, kwargs={}, ignore_docker_version_check=True) + cmd = cli.create_launch_cmd( + verb=verb, kwargs=defaultdict(lambda: None), ignore_docker_version_check=True + ) cmd = cmd["Launching"][0] # type: ignore @@ -56,7 +59,9 @@ def test_hagrid_launch_without_name_with_preposition() -> None: verb = cli.get_launch_verb() grammar = cli.parse_grammar(args=tuple(args), verb=verb) verb.load_grammar(grammar=grammar) - cmd = cli.create_launch_cmd(verb=verb, kwargs={}, ignore_docker_version_check=True) + cmd = cli.create_launch_cmd( + verb=verb, kwargs=defaultdict(lambda: None), ignore_docker_version_check=True + ) cmd = cmd["Launching"][0] # type: ignore # check that it's a domain by default @@ -93,7 +98,9 @@ def test_launch_with_multiword_domain_name() -> None: verb = cli.get_launch_verb() grammar = cli.parse_grammar(args=tuple(args), verb=verb) verb.load_grammar(grammar=grammar) - cmd = cli.create_launch_cmd(verb=verb, kwargs={}, ignore_docker_version_check=True) + cmd = cli.create_launch_cmd( + verb=verb, kwargs=defaultdict(lambda: None), ignore_docker_version_check=True + ) cmd = cmd["Launching"][0] # type: ignore @@ -117,7 +124,9 @@ def test_launch_with_longer_multiword_domain_name() -> None: verb = cli.get_launch_verb() grammar = cli.parse_grammar(args=tuple(args), verb=verb) verb.load_grammar(grammar=grammar) - cmd = cli.create_launch_cmd(verb=verb, kwargs={}, ignore_docker_version_check=True) + cmd = cli.create_launch_cmd( + verb=verb, kwargs=defaultdict(lambda: None), ignore_docker_version_check=True + ) cmd = cmd["Launching"][0] # type: ignore @@ -144,7 +153,9 @@ def test_launch_with_longer_multiword_domain_name_with_preposition() -> None: verb = cli.get_launch_verb() grammar = cli.parse_grammar(args=tuple(args), verb=verb) verb.load_grammar(grammar=grammar) - cmd = cli.create_launch_cmd(verb=verb, kwargs={}, ignore_docker_version_check=True) + cmd = cli.create_launch_cmd( + verb=verb, kwargs=defaultdict(lambda: None), ignore_docker_version_check=True + ) cmd = cmd["Launching"][0] # type: ignore
In hagrid boolean arguments (e.g. `--cmd=True`) should be flags (just `--cmd`) instead ## Description This has always been a gripe for me when working with hagrid. This and #7140 seem like small things but the main purpose of is to reduce friction between users and the entire Syft stack and would benefit from a smoother dev ex. Boolean options like `--cmd=True`, `--tail=True`, `--headless=True` should have just been flags, *i.e.* `--cmd`, `--tail` and `--headless`. Reasons: 1. This is how most cli tools work. 2. `--cmd` achieves the same functionality and is also much shorter. Plus with `--cmd=True` users not familiar with the tools or those only use it once in a while will have a hard time remembering whether it's `--cmd`, `--cmd=True`, or `--cmd=true`, how about `--cmd=1`? 3. In hagrid there already exist other flags like `--silent`, `--tls`, `--test`, `--dev`, so this is both against cli best practice and inconsistent within hagrid itself. ## Are you interested in working on this improvement yourself? Yes @iamtrask gave his 👌 on fixing this and #7140. We're still deciding on the timeline. I know this is not originally planned as required for 0.7, and hagrid version is separated from syft, but with a new syft release a lot of folks are gonna use hagrid, some for the first time, so would be quite appropriate to make the changes available on 0.7 release. My proposal is that I will work on these for the next week. If it takes more than that we can reassess. ## Additional Context Add any other context or screenshots.
Totally agree. I think its worth doing some planning around rebuilding hagrid from scratch so that its much cleaner and more consistent and modular. I think a new python cli code base could be started and the functionality we want ported over pretty quickly once we include the desired capabilities. Lets discuss this post 0.7.
2022-12-10T17:29:21
OpenMined/PySyft
7,170
OpenMined__PySyft-7170
[ "7140" ]
4b921c93940e67481bda8a093d31fad2ec854f3c
diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -159,7 +159,7 @@ def clean(location: str) -> None: help="Username for provisioning the remote host", ) @click.option( - "--key_path", + "--key-path", default=None, required=False, type=str, @@ -212,24 +212,24 @@ def clean(location: str) -> None: help="Disable forcing re-build", ) @click.option( - "--no_provision", + "--no-provision", is_flag=True, help="Disable provisioning VMs", ) @click.option( - "--node_count", + "--node-count", default=1, required=False, type=click.IntRange(1, 250), help="Number of independent nodes/VMs to launch", ) @click.option( - "--auth_type", + "--auth-type", default=None, type=click.Choice(["key", "password"], case_sensitive=False), ) @click.option( - "--ansible_extras", + "--ansible-extras", default="", type=str, ) @@ -244,33 +244,33 @@ def clean(location: str) -> None: help="Choose between production and development release", ) @click.option( - "--cert_store_path", + "--cert-store-path", default="/home/om/certs", required=False, type=str, help="Remote path to store and load TLS cert and key", ) @click.option( - "--upload_tls_cert", + "--upload-tls-cert", default="", required=False, type=str, - help="Local path to TLS cert to upload and store at --cert_store_path", + help="Local path to TLS cert to upload and store at --cert-store-path", ) @click.option( - "--upload_tls_key", + "--upload-tls-key", default="", required=False, type=str, - help="Local path to TLS private key to upload and store at --cert_store_path", + help="Local path to TLS private key to upload and store at --cert-store-path", ) @click.option( - "--no_blob_storage", + "--no-blob-storage", is_flag=True, help="Disable blob storage", ) @click.option( - "--image_name", + "--image-name", default=None, required=False, type=str, @@ -284,7 +284,7 @@ def clean(location: str) -> None: help="Container image tag to use", ) @click.option( - "--build_src", + "--build-src", default=DEFAULT_BRANCH, required=False, type=str, @@ -298,7 +298,7 @@ def clean(location: str) -> None: help="Run docker with a different platform like linux/arm64", ) @click.option( - "--no_vpn", + "--no-vpn", is_flag=True, help="Disable tailscale vpn container", ) @@ -308,12 +308,12 @@ def clean(location: str) -> None: help="Suppress extra launch outputs", ) @click.option( - "--from_template", + "--from-template", is_flag=True, help="Launch node using the manifest template", ) @click.option( - "--no_health_checks", + "--no-health-checks", is_flag=True, help="Turn off auto health checks post node launch", ) @@ -2537,12 +2537,12 @@ def create_land_docker_cmd(verb: GrammarVerb) -> str: help="Print the cmd without running it", ) @click.option( - "--ansible_extras", + "--ansible-extras", default="", type=str, ) @click.option( - "--build_src", + "--build-src", default=DEFAULT_BRANCH, required=False, type=str, diff --git a/packages/hagrid/hagrid/lib.py b/packages/hagrid/hagrid/lib.py --- a/packages/hagrid/hagrid/lib.py +++ b/packages/hagrid/hagrid/lib.py @@ -254,7 +254,7 @@ def should_provision_remote( if username and password or username and key_path: return is_remote if is_remote: - raise Exception("--username requires either --password or --key_path") + raise Exception("--username requires either --password or --key-path") return is_remote
Hagrid command line argument names should be `--hyphen-separated` instead of `--snake_case` ## Description Things like `--auth_type`, `--ansible_extras` should be `--auth-type` and `--ansible-extras`. 3 reasons for this 1. This is how options in most cli tools are defined. 2. Extra key stroke, typing `-` is much faster than `_` 3. Most importantly, there could some options that Hagrid passes to the underlying cli tools. For example I was working on implementing k3d and k8s support for hagrid and there are some options that are passed to `k3d` or `kubectl`, `devspace` cli, for example `--registry-port`, `--registry-volume`. Setting them as `--registry_port`, `--registry_volume` would be a source of confusion to users who were familiar with those tools. ## Are you interested in working on this improvement yourself? Yes. Please see #7141 for planning. ## Additional Context Add any other context or screenshots.
2022-12-11T07:06:05
OpenMined/PySyft
7,187
OpenMined__PySyft-7187
[ "7054" ]
c28f6a93e97a05edb036f0fc4e91aa32c8c1a29f
diff --git a/packages/hagrid/hagrid/cache.py b/packages/hagrid/hagrid/cache.py --- a/packages/hagrid/hagrid/cache.py +++ b/packages/hagrid/hagrid/cache.py @@ -29,6 +29,14 @@ "gcp_repo": DEFAULT_REPO, "gcp_branch": STABLE_BRANCH, "install_wizard_complete": False, + "aws_region": "us-east-1", + "aws_security_group_name": "openmined_sg", + "aws_security_group_cidr": "0.0.0.0/0", + "aws_image_id": "ami-09cd747c78a9add63", # Ubuntu Server 20.04 LTS (HVM), SSD Volume Type + "aws_ec2_instance_type": "t2.xlarge", + "aws_ec2_instance_username": "ubuntu", # For Ubuntu AMI, the default user name is ubuntu + "aws_repo": DEFAULT_REPO, + "aws_branch": STABLE_BRANCH, } diff --git a/packages/hagrid/hagrid/cli.py b/packages/hagrid/hagrid/cli.py --- a/packages/hagrid/hagrid/cli.py +++ b/packages/hagrid/hagrid/cli.py @@ -914,6 +914,22 @@ def check_gcloud_cli_installed() -> bool: return True +def check_aws_cli_installed() -> bool: + try: + result = subprocess.run( # nosec + ["aws", "--version"], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT + ) + if result.returncode != 0: + raise FileNotFoundError("AWS CLI not installed") + except Exception: # nosec + msg = "\nYou don't appear to have the AWS CLI installed! \n\n\ +Please install it and then retry your command.\ +\n\nInstallation Instructions: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html\n" + raise FileNotFoundError(msg) + + return True + + def check_gcloud_authed() -> bool: try: result = subprocess.run( # nosec @@ -964,6 +980,28 @@ def generate_gcloud_key_at_path(key_path: str) -> str: return key_path +def generate_aws_key_at_path(key_path: str, key_name: str) -> str: + key_path = os.path.expanduser(key_path) + if os.path.exists(key_path): + raise Exception(f"Can't generate key since path already exists. {key_path}") + else: + # TODO we need to do differently for powershell. + # Ex: aws ec2 create-key-pair --key-name MyKeyPair --query 'KeyMaterial' + # --output text | out-file -encoding ascii -filepath MyKeyPair.pem + + print(f"Creating AWS key pair with name {key_name} at path {key_path}..") + cmd = f"aws ec2 create-key-pair --key-name {key_name} --query 'KeyMaterial' --output text > {key_path}" + try: + subprocess.check_call(cmd, shell=True) # nosec + subprocess.check_call(f"chmod 400 {key_path}", shell=True) # nosec + except Exception as e: # nosec + print(f"Failed to create key: {e}") + if not os.path.exists(key_path): + raise Exception(f"AWS failed to generate key pair at: {key_path}") + + return key_path + + def generate_key_at_path(key_path: str) -> str: key_path = os.path.expanduser(key_path) if os.path.exists(key_path): @@ -1499,8 +1537,155 @@ def create_launch_cmd( raise MissingDependency(msg) elif host in ["aws"]: - print("Coming soon.") - return "" + check_aws_cli_installed() + + if DEPENDENCIES["ansible-playbook"]: + aws_region = ask( + question=Question( + var_name="aws_region", + question="In what region do you want to deploy the EC2 instance?", + default=arg_cache["aws_region"], + kind="string", + cache=True, + ), + kwargs=kwargs, + ) + aws_security_group_name = ask( + question=Question( + var_name="aws_security_group_name", + question="Name of the security group to be created?", + default=arg_cache["aws_security_group_name"], + kind="string", + cache=True, + ), + kwargs=kwargs, + ) + aws_security_group_cidr = ask( + question=Question( + var_name="aws_security_group_cidr", + question="What IP addresses to allow for incoming network traffic? Please use CIDR notation", + default=arg_cache["aws_security_group_cidr"], + kind="string", + cache=True, + ), + kwargs=kwargs, + ) + ec2_instance_type = ask( + question=Question( + var_name="aws_ec2_instance_type", + question="What EC2 instance type do you want to deploy?", + default=arg_cache["aws_ec2_instance_type"], + kind="string", + cache=True, + ), + kwargs=kwargs, + ) + + aws_key_name = ask( + question=Question( + var_name="aws_key_name", + question="Enter the name of the key pair to use to connect to the EC2 instance", + kind="string", + cache=True, + ), + kwargs=kwargs, + ) + + key_path_qn_str = ( + "Please provide the path of the private key to connect to the instance" + ) + key_path_qn_str += " (if it does not exist, this path corresponds to " + key_path_qn_str += "where you want to store the key upon creation)" + key_path_question = Question( + var_name="aws_key_path", + question=key_path_qn_str, + kind="path", + cache=True, + ) + try: + key_path = ask( + key_path_question, + kwargs=kwargs, + ) + except QuestionInputPathError as e: + print(e) + key_path = str(e).split("is not a valid path")[0].strip() + + create_key_question = Question( + var_name="aws_key_path", + question=f"Key {key_path} does not exist. Do you want AWS to make it? (y/n)", + default="y", + kind="yesno", + ) + create_key = ask( + create_key_question, + kwargs=kwargs, + ) + if create_key == "y": + key_path = generate_aws_key_at_path( + key_path=key_path, key_name=aws_key_name + ) + else: + raise QuestionInputError( + "Unable to create EC2 instance without key" + ) + + repo = ask( + Question( + var_name="aws_repo", + question="Repo to fetch source from?", + default=arg_cache["aws_repo"], + kind="string", + cache=True, + ), + kwargs=kwargs, + ) + branch = ask( + Question( + var_name="aws_branch", + question="Branch to monitor for updates?", + default=arg_cache["aws_branch"], + kind="string", + cache=True, + ), + kwargs=kwargs, + ) + + use_branch(branch=branch) + + username = arg_cache["aws_ec2_instance_username"] + auth = AuthCredentials(username=username, key_path=key_path) + + return create_launch_aws_cmd( + verb=verb, + region=aws_region, + ec2_instance_type=ec2_instance_type, + security_group_name=aws_security_group_name, + aws_security_group_cidr=aws_security_group_cidr, + key_path=key_path, + key_name=aws_key_name, + repo=repo, + branch=branch, + ansible_extras=kwargs["ansible_extras"], + kwargs=parsed_kwargs, + ami_id=arg_cache["aws_image_id"], + username=username, + auth=auth, + ) + + else: + errors = [] + if not DEPENDENCIES["ansible-playbook"]: + errors.append("ansible-playbook") + msg = "\nERROR!!! MISSING DEPENDENCY!!!" + msg += f"\n\nLaunching a Cloud VM requires: {' '.join(errors)}" + msg += "\n\nPlease follow installation instructions: " + msg += "https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#" + msg += "\n\nNote: we've found the 'conda' based installation instructions to work best" + msg += " (e.g. something lke 'conda install -c conda-forge ansible'). " + msg += "The pip based instructions seem to be a bit buggy if you're using a conda environment" + msg += "\n" + raise MissingDependency(msg) else: if DEPENDENCIES["ansible-playbook"]: if host != "localhost": @@ -2006,6 +2191,190 @@ def check_ip_for_ssh( return False +def create_aws_security_group( + security_group_name: str, region: str, snake_name: str +) -> str: + sg_description = f"{snake_name} security group" + create_cmd = f"aws ec2 create-security-group --group-name {security_group_name} " + create_cmd += f'--region {region} --description "{sg_description}" ' + sg_output = subprocess.check_output( # nosec + create_cmd, + shell=True, + ) + sg_output_dict = json.loads(sg_output) + if "GroupId" in sg_output_dict: + return sg_output_dict["GroupId"] + + return "" + + +def open_port_aws( + security_group_name: str, port_no: int, cidr: str, region: str +) -> None: + cmd = f"aws ec2 authorize-security-group-ingress --group-name {security_group_name} --protocol tcp " + cmd += f"--port {port_no} --cidr {cidr} --region {region}" + subprocess.check_call( # nosec + cmd, + shell=True, + ) + + +def extract_instance_ids_aws(stdout: bytes) -> TypeList: + output = stdout.decode("utf-8") + output_dict = json.loads(output) + instance_ids: TypeList = [] + if "Instances" in output_dict: + for ec2_instance_metadata in output_dict["Instances"]: + if "InstanceId" in ec2_instance_metadata: + instance_ids.append(ec2_instance_metadata["InstanceId"]) + + return instance_ids + + +def get_host_ips_given_instance_ids( + instance_ids: TypeList, timeout: int = 600, wait_time: int = 10 +) -> TypeList: + checks = int(timeout / wait_time) # 10 minutes in 10 second chunks + instance_ids_str = " ".join(instance_ids) + cmd = f"aws ec2 describe-instances --instance-ids {instance_ids_str}" + cmd += " --query 'Reservations[*].Instances[*].{StateName:State.Name,PublicIpAddress:PublicIpAddress}'" + cmd += " --output json" + while checks > 0: + checks -= 1 + time.sleep(wait_time) + desc_ec2_output = subprocess.check_output(cmd, shell=True) # nosec + instances_output_json = json.loads(desc_ec2_output.decode("utf-8")) + host_ips: TypeList = [] + all_instances_running = True + for reservation in instances_output_json: + for instance_metadata in reservation: + if instance_metadata["StateName"] != "running": + all_instances_running = False + break + else: + host_ips.append(instance_metadata["PublicIpAddress"]) + if all_instances_running: + return host_ips + # else, wait another wait_time seconds and try again + + return [] + + +def make_aws_ec2_instance( + ami_id: str, ec2_instance_type: str, key_name: str, security_group_name: str +) -> TypeList: + # From the docs: "For security groups in a nondefault VPC, you must specify the security group ID". + # Right now, since we're using default VPC, we can use security group name instead of ID. + + ebs_size = 200 # gb + cmd = f"aws ec2 run-instances --image-id {ami_id} --count 1 --instance-type {ec2_instance_type} " + cmd += f"--key-name {key_name} --security-groups {security_group_name} " + tmp_cmd = rf"[{{\"DeviceName\":\"/dev/sdf\",\"Ebs\":{{\"VolumeSize\":{ebs_size},\"DeleteOnTermination\":false}}}}]" + cmd += f'--block-device-mappings "{tmp_cmd}"' + + host_ips: TypeList = [] + try: + print(f"Creating EC2 instance.\nRunning: {cmd}") + create_ec2_output = subprocess.check_output(cmd, shell=True) # nosec + instance_ids = extract_instance_ids_aws(create_ec2_output) + host_ips = get_host_ips_given_instance_ids(instance_ids=instance_ids) + except Exception as e: + print("failed", e) + + if not (host_ips): + raise Exception("Failed to create EC2 instance(s) or get public ip(s)") + + return host_ips + + +def create_launch_aws_cmd( + verb: GrammarVerb, + region: str, + ec2_instance_type: str, + security_group_name: str, + aws_security_group_cidr: str, + key_name: str, + key_path: str, + ansible_extras: str, + kwargs: TypeDict[str, Any], + repo: str, + branch: str, + ami_id: str, + username: str, + auth: AuthCredentials, +) -> TypeList[str]: + node_name = verb.get_named_term_type(name="node_name") + snake_name = str(node_name.snake_input) + create_aws_security_group(security_group_name, region, snake_name) + open_port_aws( + security_group_name=security_group_name, + port_no=80, + cidr=aws_security_group_cidr, + region=region, + ) # HTTP + open_port_aws( + security_group_name=security_group_name, + port_no=443, + cidr=aws_security_group_cidr, + region=region, + ) # HTTPS + open_port_aws( + security_group_name=security_group_name, + port_no=22, + cidr=aws_security_group_cidr, + region=region, + ) # SSH + if kwargs["jupyter"]: + open_port_aws( + security_group_name=security_group_name, + port_no=8888, + cidr=aws_security_group_cidr, + region=region, + ) # Jupyter + + host_ips = make_aws_ec2_instance( + ami_id=ami_id, + ec2_instance_type=ec2_instance_type, + key_name=key_name, + security_group_name=security_group_name, + ) + + launch_cmds: TypeList[str] = [] + + for host_ip in host_ips: + # get old host + host_term = verb.get_named_term_hostgrammar(name="host") + + # replace + host_term.parse_input(host_ip) + verb.set_named_term_type(name="host", new_term=host_term) + + if not bool(kwargs["provision"]): + print("Skipping automatic provisioning.") + print("VM created with:") + print(f"IP: {host_ip}") + print(f"Key: {key_path}") + print("\nConnect with:") + print(f"ssh -i {key_path} {username}@{host_ip}") + + else: + extra_kwargs = { + "repo": repo, + "branch": branch, + "ansible_extras": ansible_extras, + } + kwargs.update(extra_kwargs) + + # provision + host_up = check_ip_for_ssh(host_ip=host_ip) + if not host_up: + print(f"Warning: {host_ip} ssh not available yet") + launch_cmd = create_launch_custom_cmd(verb=verb, auth=auth, kwargs=kwargs) + launch_cmds.append(launch_cmd) + + return launch_cmds + + def make_vm_azure( node_name: str, resource_group: str,
Add support for AWS in Hagrid ## Feature Description Add support for deploying PySyft and Grid stack on AWS cloud VMs using HaGrid. Add support to deploy the stack either using AWS cli or boto3 client. - Ask for information for Region, Name, Type of machine, ssh password or key, username, etc. - Launch a VM on AWS - Create a security group - Open Ports 80 and 22, 443 - Integrate ansible to deploy the stack ## Is your feature request related to a problem? A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] ## What alternatives have you considered? A clear and concise description of any alternative solutions or features you've considered. ## Additional Context Add any other context or screenshots about the feature request here.
2022-12-20T12:05:10
OpenMined/PySyft
7,200
OpenMined__PySyft-7200
[ "7197" ]
c2347c4df6e3a50608472163c7b0b500977f671f
diff --git a/packages/syft/src/syft/core/adp/data_subject_ledger.py b/packages/syft/src/syft/core/adp/data_subject_ledger.py --- a/packages/syft/src/syft/core/adp/data_subject_ledger.py +++ b/packages/syft/src/syft/core/adp/data_subject_ledger.py @@ -335,7 +335,7 @@ def _get_optimal_alpha_for_constant( f = self._get_fake_rdp_func(constant=constant) f2 = self._get_alpha_search_function(rdp_compose_func=f) results = minimize_scalar( - f2, method="Brent", bracket=(1, 2), bounds=[1, np.inf] + f2, method="Brent", bracket=(1, 2)#, bounds=[1, np.inf] ) return results.x, results.fun
Fix ledger test brent method ## Description FAILED tests/syft/core/adp/data_subject_ledger_test.py::test_cache_indexing_correctness - ValueError: Use of `bounds` is incompatible with 'method=Brent'. FAILED tests/syft/core/adp/data_subject_ledger_test.py::test_cache_bypass - ValueError: Use of `bounds` is incompatible with 'method=Brent'. ## How to Reproduce See CI: https://github.com/OpenMined/PySyft/actions/runs/3842992045/jobs/6544847611 `pytest -s -m 'fast or slow' --cov syft --cov-fail-under 65 -n auto --durations=50`
2023-01-05T10:14:05
MTES-MCT/aides-territoires
174
MTES-MCT__aides-territoires-174
[ "150" ]
693841e3ed5fbd679d08238931b565d37b0e3cce
diff --git a/src/accounts/forms.py b/src/accounts/forms.py --- a/src/accounts/forms.py +++ b/src/accounts/forms.py @@ -39,6 +39,13 @@ def clean_email(self): class LoginForm(AuthenticationForm): + error_messages = { + 'invalid_login': _( + 'Please enter a correct email address and password.' + ), + 'inactive': _('This account is inactive.'), + } + username = forms.EmailField( label=_('Your email address'), required=True)
Mauvais article et mauvaise casse pour message d'erreur sur adresse mail Sur la page `/comptes/connexion/`, on voit : ``` Saisissez un Adresse e-mail… ``` Alors que l'on devrait avoir : ``` Saisissez une adresse e-mail… ``` ![image](https://user-images.githubusercontent.com/6030745/90972240-a399de00-e517-11ea-89e7-85a6d0445ee2.png) La base du message d'erreur vient des [fichiers de traduction de Django](https://github.com/django/django/blob/6376278a904e2f8b34893a7166508dfd205fdceb/django/contrib/auth/locale/fr/LC_MESSAGES/django.po) : ```py msgid "" "Please enter a correct %(username)s and password. Note that both fields may " "be case-sensitive." msgstr "" "Saisissez un %(username)s et un mot de passe valides. Remarquez que chacun " "de ces champs est sensible à la casse (différenciation des majuscules/" "minuscules)." ``` Et à la place du placeholder `%(username)s`, on a `Adresse e-mail` dans ce projet. Dans le fichier de traduction (`django.po`) du projet actuel, on voit : ```py msgid "Email address" msgstr "Adresse e-mail" ```
Je propose de simplement surcharger la traduction du framework par le texte exact dont nous avons besoin : ``` msgid "" "Please enter a correct %(username)s and password. Note that both fields may " "be case-sensitive." msgstr "" "Saisissez une adresse e-mail et un mot de passe valides. Remarquez que chacun " "de ces champs est sensible à la casse (différenciation des majuscules/" "minuscules)." ``` Voici ce que j'obtiens dans mon environnement local : ![image](https://user-images.githubusercontent.com/6030745/90973103-22464980-e51f-11ea-9eb1-8702f94d1202.png)
2020-09-17T13:43:35
apple/coremltools
207
apple__coremltools-207
[ "198" ]
4a0d816a0013aba530c067b3a2b2023061d2f620
diff --git a/coremltools/models/model.py b/coremltools/models/model.py --- a/coremltools/models/model.py +++ b/coremltools/models/model.py @@ -76,8 +76,14 @@ def _get_proxy_from_spec(filename): if _has_custom_layer(spec): # custom layers can't be supported directly by compiling and loading the model here return None - - return _MLModelProxy(filename) + try: + return _MLModelProxy(filename) + except RuntimeError as e: + warnings.warn( + "You will not be able to run predict() on this Core ML model." + + "Underlying exception message was: " + str(e), + RuntimeWarning) + return None else: return None
diff --git a/coremltools/test/test_model.py b/coremltools/test/test_model.py --- a/coremltools/test/test_model.py +++ b/coremltools/test/test_model.py @@ -134,5 +134,16 @@ def test_future_version(self): model.predict(1) self.spec.specificationVersion = 1 - - + @unittest.skipIf(macos_version() >= (10, 13), 'Only supported on macOS 10.13-') + def test_MLModel_warning(self): + self.spec.specificationVersion = 3 + import warnings + with warnings.catch_warnings(record=True) as w: + # Cause all warnings to always be triggered. + warnings.simplefilter("always") + model = MLModel(self.spec) + assert len(w) == 1 + assert issubclass(w[-1].category, RuntimeWarning) + assert "not able to run predict()" in str(w[-1].message) + self.spec.specificationVersion = 1 + model = MLModel(self.spec)
Error reading protobuf spec. validator error: The .mlmodel supplied is of version 3, intended for a newer version of Xcode. This version of Xcode supports model version 2 or earlier. Ported from https://github.com/apple/turicreate/issues/701 On Linux, loading a spec into an `MLModel` with any specification version works, because it doesn't attempt to validate/compile the model, since it assumes Core ML doesn't exist on the system anyway. On latest macOS with latest Xcode, loading a spec into an `MLModel` with any supported specification version works, because model validation/compilation succeeds. However, loading a spec into an `MLModel` with specification version 3 on macOS 10.13 with Xcode 9.4 fails with the error: ``` Error reading protobuf spec. validator error: The .mlmodel supplied is of version 3, intended for a newer version of Xcode. This version of Xcode supports model version 2 or earlier. ``` Expected: This should work on older macOS/Xcode the same way it works on Linux, perhaps with a runtime warning that predict won't be available. Actual: Since this throws a Python exception, it interrupts execution of the caller and in the case of Turi Create, results in an unusable exported model/spec.
2018-06-30T00:11:12
apple/coremltools
298
apple__coremltools-298
[ "141" ]
e5ce33c6396077c0b267180a92573355a8ab53eb
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ install_requires=[ 'numpy >= 1.10.0', 'protobuf >= 3.1.0', - 'six==1.10.0' + 'six>=1.10.0' ], entry_points = { 'console_scripts': ['coremlconverter = coremltools:_main']
Why is six pinned to 1.10.0? Is there any reason for [six to be pinned to version 1.10.0](https://github.com/apple/coremltools/blob/master/setup.py#L44). This gives transitive dependency issues sometimes. /cc @mats-claassen
coremltools 0.8 has requirement six==1.10.0, but you'll have six 1.11.0 which is incompatible.
2018-11-22T15:28:07
apple/coremltools
508
apple__coremltools-508
[ "505" ]
a62b55e168b609d0448dccea73e640d51e919c38
diff --git a/coremltools/models/model.py b/coremltools/models/model.py --- a/coremltools/models/model.py +++ b/coremltools/models/model.py @@ -353,7 +353,7 @@ def predict(self, data, useCPUOnly=False, **kwargs): else: raise Exception('Unable to load CoreML.framework. Cannot make predictions.') - def visualize_spec(self, port=None, input_shape_dict=None): + def visualize_spec(self, port=None, input_shape_dict=None, title='CoreML Graph Visualization'): """ Visualize the model. @@ -367,6 +367,9 @@ def visualize_spec(self, port=None, input_shape_dict=None): are 1 i.e. (1, 1, C, H, W). If either is not 1, then provide full input shape + title: str + Title for the visualized model + Returns ------- @@ -479,6 +482,10 @@ def visualize_spec(self, port=None, input_shape_dict=None): web_dir = _os.path.join(_os.path.dirname(coremltools.__file__), 'graph_visualization') with open('{}/model.json'.format(web_dir), 'w') as file: - _json.dump(cy_data, file) + model_data = { + 'title': title, + 'cy_data': cy_data, + } + _json.dump(model_data, file) _start_server(port, web_dir)
Custom Title in Spec Visualization ## 🌱 Describe your Feature Request I'm a big fan of the spec visualization built into coremltools (i.e. calling `my_mlmodel.visualize_spec()`). Using this feature has saved me so much time. However when examining multiple specs in different web browser tabs, things can get confusing. The title is **always**: `CoreML Graph Visualization`. This is the title of the tab/webpage, as well as the title displayed in the body, at the top of, the webpage. If you're looking at more than one spec, it's easy to get confused about which one is which. I think it would be great if `visualize_spec` took an optional parameter called `title`. The default value for this value could be `"CoreML Graph Visualization"`.
2019-10-27T15:54:28
apple/coremltools
617
apple__coremltools-617
[ "591" ]
963d9d3c166403de55ae91002d500c9f806d7300
diff --git a/coremltools/converters/nnssa/coreml/graph_pass/mlmodel_passes.py b/coremltools/converters/nnssa/coreml/graph_pass/mlmodel_passes.py --- a/coremltools/converters/nnssa/coreml/graph_pass/mlmodel_passes.py +++ b/coremltools/converters/nnssa/coreml/graph_pass/mlmodel_passes.py @@ -236,4 +236,71 @@ def _remove_disconnected_layers_rec(nn_spec): out_degree = _get_blob_out_degree(spec) nn_spec = _get_nn_spec(spec) # Initiate removal from high level Neural Network spec - _remove_disconnected_layers_rec(nn_spec) \ No newline at end of file + _remove_disconnected_layers_rec(nn_spec) + +def remove_redundant_transposes(spec): + """ + Removes layers from model specification that are back to back transposes + that compose to the identity. + """ + + def _delete_layers(nn_spec, layers_to_delete): + """ + Given a neural network spec and pairs of transposes to remove, rewire + the network to bypass those transposes and remove them from the spec. + """ + nn_layers = nn_spec.layers + # First pass: rewire layers to bypass those that will be deleted. + for _layer_pair in layers_to_delete: + for _nn_layer in nn_layers: + if _nn_layer in _layer_pair: + # Skip the layers we're going to delete. + continue + if _layer_pair[1].name in _nn_layer.input: + # This layer has one of the deleted as an input. Replace it + # with the deleted layer's input. + idx = [i for i,n in enumerate(_nn_layer.input) if n == _layer_pair[1].name][0] + _nn_layer.input[idx] = _layer_pair[0].input[0] + # Second pass: delete the layers. + for _layer_pair in layers_to_delete: + nn_layers.remove(_layer_pair[0]) + nn_layers.remove(_layer_pair[1]) + + def _find_redundant_transposes(nn_spec): + """ + Search the neural network spec for pairs of transposes that together + are the identity, and return a list of those pairs. + """ + nn_layers = nn_spec.layers + layers_to_delete = [] + # This holds the axes definition if the previous layer was a transpose, + # otherwise it is None. + previous_transpose = None + for _layer in nn_layers: + layer_type = _layer.WhichOneof('layer') + if layer_type != 'transpose' or len(_layer.output) != 1: + previous_transpose = None + continue + + if not previous_transpose: + previous_transpose = {'layer': _layer, 'axes':_layer.transpose.axes} + else: + # This layer and the previous are transposes. Check if they're each + # other's inverses. + this_transpose = _layer.transpose.axes + composed = [previous_transpose['axes'][i] for i in this_transpose] + if all([ax == i for i, ax in enumerate(composed)]): + # These transpose ops are redundant, remove them. + layers_to_delete.append((previous_transpose['layer'], _layer)) + else: + # Compare this transpose against the next layer. + # TODO: Should we try to combine a sequence if transposes + # into one op? + previous_transpose = {'layer': _layer, 'axes':_layer.transpose.axes} + return layers_to_delete + + nn_spec = _get_nn_spec(spec) + layers_to_delete = _find_redundant_transposes(nn_spec) + _delete_layers(nn_spec, layers_to_delete) + if len(layers_to_delete) > 0: + print('{} transpose pairs deleted'.format(len(layers_to_delete))) diff --git a/coremltools/converters/nnssa/coreml/ssa_converter.py b/coremltools/converters/nnssa/coreml/ssa_converter.py --- a/coremltools/converters/nnssa/coreml/ssa_converter.py +++ b/coremltools/converters/nnssa/coreml/ssa_converter.py @@ -219,7 +219,9 @@ def ssa_convert(ssa, mlmodel_spec.description.output.extend(modified_output_features_list) # MLModel passes - mlmodel_passes = [remove_disconnected_layers] + mlmodel_passes = [remove_disconnected_layers, + remove_redundant_transposes, + ] for p in mlmodel_passes: p(mlmodel_spec) diff --git a/coremltools/models/neural_network/builder.py b/coremltools/models/neural_network/builder.py --- a/coremltools/models/neural_network/builder.py +++ b/coremltools/models/neural_network/builder.py @@ -3316,8 +3316,9 @@ def set_pre_processing_parameters(self, image_input_names=None, is_bgr=False, _, channels, height, width = [array_shape[e] for e in input_indices] if image_format == 'NHWC': - # If input format is 'NHWC', then add transpose - # after the input and replace all use of input + # If input format is 'NHWC' for TF model, it will be + # 'NCHW' for CoreML model. Therefore, add transpose to + # NHWC after the input and replace all use of input # with output of transpose axes = [1, 2, 0] if len(array_shape) == 4:
diff --git a/coremltools/converters/tensorflow/test/test_tf_2x.py b/coremltools/converters/tensorflow/test/test_tf_2x.py --- a/coremltools/converters/tensorflow/test/test_tf_2x.py +++ b/coremltools/converters/tensorflow/test/test_tf_2x.py @@ -670,6 +670,34 @@ def test_softplus(self): inputs={input_name: (1, 28, 28)}, outputs=[output_name], decimal=3) + def test_redundant_transpose(self): + H = 224 + W = 224 + C = 3 + inputs = tf.keras.layers.Input(shape=(H, W, C), batch_size=1) + out = tf.keras.layers.Conv2D( + filters=4, + kernel_size=3, + )(inputs) + model = tf.keras.Model(inputs, out) + input_name = model.inputs[0].name.split(":")[0] + input_shape = (1, H, W, C) + output_name = model.outputs[0].name.split(':')[0].split('/')[-1] + + model.save(self.model_path, include_optimizer=False, save_format="h5") + + mlmodel = coremltools.converters.tensorflow.convert( + self.model_path, + inputs={input_name: input_shape}, + image_input_names=input_name, + outputs=[output_name], + ) + + spec = mlmodel.get_spec() + output_names = [layer.name for layer in spec.neuralNetwork.layers] + expected_names = [u'model/conv2d/Conv2D', u'Identity'] + np.testing.assert_array_equal(output_names, expected_names) + if __name__ == '__main__': np.random.seed(1984) diff --git a/coremltools/test/neural_network/test_graph_passes.py b/coremltools/test/neural_network/test_graph_passes.py --- a/coremltools/test/neural_network/test_graph_passes.py +++ b/coremltools/test/neural_network/test_graph_passes.py @@ -5,7 +5,7 @@ from coremltools.models import MLModel from coremltools.models.neural_network.printer import print_network_spec from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import \ - remove_disconnected_layers, transform_conv_crop + remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes import copy import pytest @@ -203,6 +203,81 @@ def test_conv_crop_bn_relu_to_conv_bn_relu_crop(self): np.testing.assert_equal('activation', spec.layers[2].WhichOneof('layer')) np.testing.assert_equal('crop', spec.layers[3].WhichOneof('layer')) + def test_redundant_transposes(self): + + def _build_and_test_network(input_size, transpose_layers, expected_layers): + """ + Helper function for testing transpose removal. + + Args: + input_size: Size of the input network tensor. + transpose_layers: Array of transpose axes definitions. + expected_layers: Array of indices into transpose_layers indicating + which of the transpose layers should be present after the + graph pass. + """ + input_features = [('data', datatypes.Array(*input_size))] + output_features = [('out', None)] + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + last_layer = 'data' + for idx, axes in enumerate(transpose_layers): + name = 't{}'.format(idx) + if idx == len(transpose_layers) - 1: + output_name = 'out' + else: + output_name = name + '_out' + builder.add_transpose(name=name, + axes=axes, + input_name=last_layer, + output_name=output_name) + last_layer = output_name + + spec = builder.spec.neuralNetwork + # Check the network before the graph pass. + for idx in range(len(transpose_layers)): + np.testing.assert_equal('transpose', spec.layers[idx].WhichOneof('layer')) + # Run the removal pass. + remove_redundant_transposes(builder.spec) + # Verify only the expected layers remain. + np.testing.assert_equal(len(spec.layers), len(expected_layers)) + for output_layer_idx, input_layer_idx in enumerate(expected_layers): + np.testing.assert_equal( + 'transpose', + spec.layers[output_layer_idx].WhichOneof('layer') + ) + np.testing.assert_array_equal( + transpose_layers[input_layer_idx], + spec.layers[output_layer_idx].transpose.axes + ) + + _build_and_test_network( + input_size=[1, 10, 10], + # These transposes together are the identity. + transpose_layers=[[2, 0, 1], [1, 2, 0]], + expected_layers=[], + ) + + _build_and_test_network( + input_size=[1, 10, 10], + # These transposes are not inverses. + transpose_layers=[[2, 0, 1], [2, 0, 1]], + expected_layers=[0, 1], + ) + + _build_and_test_network( + input_size=[1, 1, 10, 10, 3], + # First two are the identity, then an extra. + transpose_layers=[[2, 4, 1, 0, 3], [3, 2, 0, 4, 1], [1, 0, 2, 3, 4]], + expected_layers=[2], + ) + + _build_and_test_network( + input_size=[1, 1, 10, 10, 3], + # First is okay, next two are the identity. + transpose_layers=[[1, 0, 2, 3, 4], [2, 4, 1, 0, 3], [3, 2, 0, 4, 1]], + expected_layers=[0], + ) + if __name__ == '__main__': RUN_ALL_TESTS = True
Double transpose ops when using image inputs on a TF 2 model ## 🐞Describe the bug - There are duplicate transpose ops that should be removed. ## System environment (please complete the following information): - coremltools version (e.g., 3.0b5): 3.20 - OS (e.g., MacOS, Linux): MacOS - any other relevant information: - e.g. using TF 2 ## Additional context ``` import tensorflow as tf import coremltools import tempfile inpt = tf.keras.layers.Input(shape=(357, 257, 3), batch_size=1) out = tf.keras.layers.Conv2D( filters=4, kernel_size=3, )(inpt) model = tf.keras.Model(inpt, out) input_name = model.inputs[0].name.split(":")[0] input_shape = (1, 357, 257, 3) print(input_name) temp = tempfile.NamedTemporaryFile(suffix=".h5") model.save(temp.name, include_optimizer=False, save_format="h5") mlmodel = coremltools.converters.tensorflow.convert( temp.name, inputs={input_name: input_shape}, image_scale=1.0 / 255.0, red_bias=-0.5, green_bias=-0.5, blue_bias=-0.5, image_input_names=input_name ) mlmodel.save('test.mlmodel') ``` ![image](https://user-images.githubusercontent.com/1422280/72943701-c1665100-3d44-11ea-949c-90c1c850a489.png) I believe this happens because the transpose op is added in the coremltools builder and there isn't an optimization pass to remove it
2020-02-03T19:16:57
apple/coremltools
662
apple__coremltools-662
[ "623" ]
c195d1e906b71c9399e251373a5456975a26178b
diff --git a/coremltools/converters/nnssa/coreml/graph_pass/op_removals.py b/coremltools/converters/nnssa/coreml/graph_pass/op_removals.py --- a/coremltools/converters/nnssa/coreml/graph_pass/op_removals.py +++ b/coremltools/converters/nnssa/coreml/graph_pass/op_removals.py @@ -70,23 +70,28 @@ def _remove_internal_identity_nodes(nnssa): delete_count = 0 for fn_key in list(nnssa.functions.keys()): f = nnssa.functions[fn_key] - keys = list(f.graph.keys()) - for k in keys: - if k not in f.graph: + for name in list(f.graph.keys()): + if name not in f.graph: continue - node = f.graph[k] - if len(node.inputs) != 1 or len(node.outputs) != 1: + node = f.graph[name] + + # Check if the node is in graph outputs + if len(node.inputs) != 1: + continue + if len(node.outputs) == 0 and len(node.control_outputs) == 0: continue + + # Remove identity node inp_node = f.graph[node.inputs[0]] if node.op == 'Identity' and inp_node.op != 'get_tuple': delete_count += 1 - parent_name = f.graph[k].inputs[0] - disconnect_edge(f.graph, parent_name, k) - for control_input in f.graph[k].control_inputs: - replace_control_dest(f.graph, control_input, k, parent_name) + parent_name = f.graph[name].inputs[0] + disconnect_edge(f.graph, parent_name, name) + for control_input in f.graph[name].control_inputs: + replace_control_dest(f.graph, control_input, name, parent_name) - replace_node(f.graph, k, parent_name) # join parent to children - delete_node(f.graph, k) + replace_node(f.graph, name, parent_name) # join parent to children + delete_node(f.graph, name) return delete_count
diff --git a/coremltools/converters/tensorflow/test/test_tf_2x.py b/coremltools/converters/tensorflow/test/test_tf_2x.py --- a/coremltools/converters/tensorflow/test/test_tf_2x.py +++ b/coremltools/converters/tensorflow/test/test_tf_2x.py @@ -325,6 +325,92 @@ class TestTensorflow2Model(unittest.TestCase): def setUp(self): self.saved_model_dir = tempfile.mkdtemp() + def test_two_layers_control_dependency(self): + + class model(tf.Module): + + def __init__(self, name=None): + super(model, self).__init__(name=name) + self.w = tf.constant(tf.random.normal(shape=[1, 10]), name='bias', dtype=tf.float32) + + @tf.function(input_signature=[tf.TensorSpec(shape=[1, 10], dtype=tf.float32), + tf.TensorSpec(shape=[1, 10], dtype=tf.float32), + tf.TensorSpec(shape=[1, 10], dtype=tf.float32)]) + def __call__(self, x, y, z): + with tf.control_dependencies([x]): + with tf.control_dependencies([y]): + return self.w + z + model = model() + tf.saved_model.save(model, self.saved_model_dir) + mlmodel = coremltools.converters.tensorflow.convert( + self.saved_model_dir, + input={'x':[1,10], 'y':[1,10], 'z':[1,10]}, + outputs=['Identity'] + ) + + x, y, z = np.random.rand(1,10), np.random.rand(1, 10), np.random.rand(1, 10) + tf_output = model(x, y, z).numpy() + ml_output = mlmodel.predict({'x':x, 'y':y, 'z':z})['Identity'] + + np.testing.assert_almost_equal(tf_output, ml_output, decimal=3) + + + def test_two_control_inputs(self): + + class model(tf.Module): + + def __init__(self, name=None): + super(model, self).__init__(name=name) + self.w = tf.constant(tf.random.normal(shape=[1, 10]), name='bias', dtype=tf.float32) + + @tf.function(input_signature=[tf.TensorSpec(shape=[1, 10], dtype=tf.float32), + tf.TensorSpec(shape=[1, 10], dtype=tf.float32), + tf.TensorSpec(shape=[1, 10], dtype=tf.float32)]) + def __call__(self, x, y, z): + with tf.control_dependencies([x, y]): + return self.w + z + model = model() + tf.saved_model.save(model, self.saved_model_dir) + mlmodel = coremltools.converters.tensorflow.convert( + self.saved_model_dir, + input={'x':[1,10], 'y':[1,10], 'z':[1,10]}, + outputs=['Identity'] + ) + + x, y, z = np.random.rand(1,10), np.random.rand(1, 10), np.random.rand(1, 10) + tf_output = model(x, y, z).numpy() + ml_output = mlmodel.predict({'x':x, 'y':y, 'z':z})['Identity'] + + np.testing.assert_almost_equal(tf_output, ml_output, decimal=3) + + + def test_control_inputs_with_node_with_no_outputs(self): + + class model(tf.Module): + + def __init__(self, name=None): + super(model, self).__init__(name=name) + self.w = tf.constant(tf.random.normal(shape=[1, 10]), name='bias', dtype=tf.float32) + + @tf.function(input_signature=[tf.TensorSpec(shape=[1, 10], dtype=tf.float32), + tf.TensorSpec(shape=[1, 10], dtype=tf.float32)]) + def __call__(self, x, y): + with tf.control_dependencies([x]): + return self.w + y + model = model() + tf.saved_model.save(model, self.saved_model_dir) + mlmodel = coremltools.converters.tensorflow.convert( + self.saved_model_dir, + input={'x':[1,10], 'y':[1,10]}, + outputs=['Identity'] + ) + + x, y = np.random.rand(1,10), np.random.rand(1, 10) + tf_output = model(x, y).numpy() + ml_output = mlmodel.predict({'x':x, 'y':y})['Identity'] + + np.testing.assert_almost_equal(tf_output, ml_output, decimal=3) + def test_save_and_load_low_level_model(self): class model(tf.Module): def __init__(self, in_features, output_features, name=None):
ValueError: list.remove(x): x not in list ## 🐞Describe the bug I'm training a multi-input CNN (`tf.keras`) with `TF2.0.0`. I keep getting `ValueError: list.remove(x): x not in list`, but I don't understand what's the list, what's x, do I need to rename nodes? I can see that the converter starts, but the error comes after some conversion. I tried `tfcoreml` and converting from saved weights with no success. Is there something I'm missing? This is the process I follow for conversion: ```python tf.keras.backend.clear_session() tf.keras.backend.set_learning_phase(0) # model is a .h5 file produced from a model.fit callback (modelcheckpoint) # I have a custom dense layer that has __init__, build, call and config implemented model = tf.keras.models.load_model(model, custom_objects = {'DenseLayer': DenseLayer}) tf.saved_model.save(model, export_dir = pb_model) # save the model as pb for conversion # my target ios is 12 coremltools_model = coremltools.converters.tensorflow.convert( _PB_MODEL, input_name_shape_dict={ 'B': [1, 20, 40, 1], 'G': [1, 20, 40, 1], 'R': [1, 20, 40, 1], 'X': [1, 2], 'Y': [1, 4]}, output_feature_names=['dense_layer_18/Identity'], minimum_ios_deployment_target='12' ) coremltools_model.save(ml_model) ``` ## Trace ```python Converting pb model to .mlmodel...... 0 assert nodes deleted 872 nodes deleted 36 nodes deleted 0 nodes deleted [Op Fusion] fuse_bias_add() deleted 24 nodes. 77 identity nodes deleted Traceback (most recent call last): File "tf2coreml.py", line 198, in <module> convertModel(model=_TRAIN_MODEL, ml_model=_ML_MODEL, pb_model=_PB_MODEL) File "tf2coreml.py", line 174, in convertModel minimum_ios_deployment_target='12' File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/coremltools/converters/tensorflow/_tf_converter.py", line 193, in convert optional_inputs=optional_inputs) File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/coremltools/converters/nnssa/coreml/ssa_converter.py", line 130, in ssa_convert p(ssa) File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/coremltools/converters/nnssa/coreml/graph_pass/op_removals.py", line 20, in remove_no_ops_and_shift_control_dependencies f.graph[each_control_input].control_outputs.remove(node.name) ValueError: list.remove(x): x not in list ``` ## System environment (please complete the following information): - coremltools version: 3.2 - OS: Ubuntu 16.04 LTS (AWS EC2) - Python version: 3.6.5 - any other relevant information: - Tensorflow: 2.0.0
2020-03-06T19:59:06
apple/coremltools
837
apple__coremltools-837
[ "823", "823" ]
37e619d99bf603d2cb9ea0839fa3ebe649996b0a
diff --git a/coremltools/converters/mil/frontend/torch/ops.py b/coremltools/converters/mil/frontend/torch/ops.py --- a/coremltools/converters/mil/frontend/torch/ops.py +++ b/coremltools/converters/mil/frontend/torch/ops.py @@ -551,6 +551,8 @@ def max_pool2d(context, node): x = inputs[0] kernel_sizes = inputs[1] strides = inputs[2] + if strides.op.op_type == "const" and (not list(strides.val)): + strides = mb.const(val=kernel_sizes.val, name=strides.name) pad_type = "custom" # Need to explicity state L-R, T-B pad @@ -1527,6 +1529,8 @@ def _avg_pool(context, node, inputs): x = inputs[0] kernel_sizes = inputs[1] strides = inputs[2] + if strides.op.op_type == "const" and (not list(strides.val)): + strides = mb.const(val=kernel_sizes.val, name=strides.name) pad_type = "custom" # Need to explicity state L-R, T-B pad pad = inputs[3]
Todays' beta2 checkin breaks old issue #759 F.max_pool2d where stride is NOT explicitly specified Am comparing the old test case testPool from issue #759 with today's git version commit 37e619d99bf603d2cb9ea0839fa3ebe649996b0a (HEAD -> master, tag: 4.0b2, origin/master, origin/HEAD) versus from a few days ago July 22 commit 705244e2be26c3fb7881fd7a731d25a55f5e4765 test case from issue #759 now fails again. (I cannot attach it atm. Seems AWS issue) reproducible:--------------- yes log:--------------- Torch version : 1.5.1 CoreML tools version : 4.0b2 Converting Frontend ==> MIL Ops: 0%| | 0/6 [00:00<?, ? ops/s]Converting op 13 : constant : shape = n/a Converting op 14 : constant : shape = n/a Converting op 15 : constant : shape = n/a Converting op 16 : constant : shape = n/a Converting op 11 : constant : shape = n/a Converting op 12 : max_pool2d : shape = (1, 1, 28, 28) Converting Frontend ==> MIL Ops: 83%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▌ | 5/6 [00:00<00:00, 4629.47 ops/s] Traceback (most recent call last): File "testPool.py", line 94, in <module> inputs=[ ct.TensorType(name="input1", shape=dummy_input.shape) ], File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/_converters_entry.py", line 299, in convert **kwargs File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/converter.py", line 120, in _convert prog = frontend_converter(model, **kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/converter.py", line 62, in __call__ return load(*args, **kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/frontend/torch/load.py", line 86, in load raise e File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/frontend/torch/load.py", line 76, in load prog = converter.convert() File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 221, in convert convert_nodes(self.context, self.graph) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/frontend/torch/ops.py", line 64, in convert_nodes _add_op(context, node) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/frontend/torch/ops.py", line 584, in max_pool2d name=node.name, File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/ops/registry.py", line 62, in add_op return cls._add_op(op_cls, **kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/builder.py", line 188, in _add_op new_op = op_cls(**kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/ops/defs/pool.py", line 180, in __init__ super(max_pool, self).__init__(**kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/ops/defs/pool.py", line 25, in __init__ super(Pooling, self).__init__(**kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/operation.py", line 148, in __init__ self._validate_and_set_inputs(**kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/operation.py", line 360, in _validate_and_set_inputs parsed_inputs = self.input_spec.parse_inputs(kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/input_type.py", line 67, in parse_inputs raise TypeError(msg) TypeError: Input strides has type <class 'coremltools.converters.mil.mil.types.type_tensor.tensor.<locals>.tensor'> not compatible with expected type IntTensorInputType Todays' beta2 checkin breaks old issue #759 F.max_pool2d where stride is NOT explicitly specified Am comparing the old test case testPool from issue #759 with today's git version commit 37e619d99bf603d2cb9ea0839fa3ebe649996b0a (HEAD -> master, tag: 4.0b2, origin/master, origin/HEAD) versus from a few days ago July 22 commit 705244e2be26c3fb7881fd7a731d25a55f5e4765 test case from issue #759 now fails again. (I cannot attach it atm. Seems AWS issue) reproducible:--------------- yes log:--------------- Torch version : 1.5.1 CoreML tools version : 4.0b2 Converting Frontend ==> MIL Ops: 0%| | 0/6 [00:00<?, ? ops/s]Converting op 13 : constant : shape = n/a Converting op 14 : constant : shape = n/a Converting op 15 : constant : shape = n/a Converting op 16 : constant : shape = n/a Converting op 11 : constant : shape = n/a Converting op 12 : max_pool2d : shape = (1, 1, 28, 28) Converting Frontend ==> MIL Ops: 83%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▌ | 5/6 [00:00<00:00, 4629.47 ops/s] Traceback (most recent call last): File "testPool.py", line 94, in <module> inputs=[ ct.TensorType(name="input1", shape=dummy_input.shape) ], File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/_converters_entry.py", line 299, in convert **kwargs File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/converter.py", line 120, in _convert prog = frontend_converter(model, **kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/converter.py", line 62, in __call__ return load(*args, **kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/frontend/torch/load.py", line 86, in load raise e File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/frontend/torch/load.py", line 76, in load prog = converter.convert() File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 221, in convert convert_nodes(self.context, self.graph) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/frontend/torch/ops.py", line 64, in convert_nodes _add_op(context, node) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/frontend/torch/ops.py", line 584, in max_pool2d name=node.name, File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/ops/registry.py", line 62, in add_op return cls._add_op(op_cls, **kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/builder.py", line 188, in _add_op new_op = op_cls(**kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/ops/defs/pool.py", line 180, in __init__ super(max_pool, self).__init__(**kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/ops/defs/pool.py", line 25, in __init__ super(Pooling, self).__init__(**kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/operation.py", line 148, in __init__ self._validate_and_set_inputs(**kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/operation.py", line 360, in _validate_and_set_inputs parsed_inputs = self.input_spec.parse_inputs(kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/mil/input_type.py", line 67, in parse_inputs raise TypeError(msg) TypeError: Input strides has type <class 'coremltools.converters.mil.mil.types.type_tensor.tensor.<locals>.tensor'> not compatible with expected type IntTensorInputType
As AWS does not allow me to attach a test case, here is the code to verify with 4.0b2 vs previous import torch import torch.nn as nn import torch.nn.functional as F import coremltools as ct class small_model(nn.Module): def __init__(self): super(small_model, self).__init__() self.pool1 = nn.MaxPool2d(2,stride=1) # Pool 2x2, explicit stride self.pool1b = nn.MaxPool2d(2) # Pool 2x2, implicit stride def forward(self, x): # Functional pooling y = F.max_pool2d(x, 2) # BOOM, BUG, implicit stride=1 #y = F.max_pool2d(x, 2, stride=1) # OK, explicit stride=1 # Layer-based pooling #y = self.pool1(x) # OK #y = self.pool1b(x) # OK return y if __name__ == '__main__': print ("Torch version : " + str(torch.__version__)) print ("CoreML tools version : " + str(ct.__version__)) model = small_model() model.eval() dummy_input = torch.randn(1,1,28,28) traced_model = torch.jit.trace(model, dummy_input) model = ct.convert( traced_model, inputs=[ ct.TensorType(name="input1", shape=dummy_input.shape) ], ) Maybe a tentative fix and put on a branch. Will push shortly for review. In the meantime, new test case [testPool.txt](https://github.com/apple/coremltools/files/5017284/testPool.txt) As I have difficulty with git pushing the branch (error 403), here is the tentative patch for review [patch.txt](https://github.com/apple/coremltools/files/5017298/patch.txt) PS: To be clear, I cannot generate the PR as I cannot push my branch at the moment. As the patch has only 4 lines of code, maybe someone else can carry it forward. Thx. As AWS does not allow me to attach a test case, here is the code to verify with 4.0b2 vs previous import torch import torch.nn as nn import torch.nn.functional as F import coremltools as ct class small_model(nn.Module): def __init__(self): super(small_model, self).__init__() self.pool1 = nn.MaxPool2d(2,stride=1) # Pool 2x2, explicit stride self.pool1b = nn.MaxPool2d(2) # Pool 2x2, implicit stride def forward(self, x): # Functional pooling y = F.max_pool2d(x, 2) # BOOM, BUG, implicit stride=1 #y = F.max_pool2d(x, 2, stride=1) # OK, explicit stride=1 # Layer-based pooling #y = self.pool1(x) # OK #y = self.pool1b(x) # OK return y if __name__ == '__main__': print ("Torch version : " + str(torch.__version__)) print ("CoreML tools version : " + str(ct.__version__)) model = small_model() model.eval() dummy_input = torch.randn(1,1,28,28) traced_model = torch.jit.trace(model, dummy_input) model = ct.convert( traced_model, inputs=[ ct.TensorType(name="input1", shape=dummy_input.shape) ], ) Maybe a tentative fix and put on a branch. Will push shortly for review. In the meantime, new test case [testPool.txt](https://github.com/apple/coremltools/files/5017284/testPool.txt) As I have difficulty with git pushing the branch (error 403), here is the tentative patch for review [patch.txt](https://github.com/apple/coremltools/files/5017298/patch.txt) PS: To be clear, I cannot generate the PR as I cannot push my branch at the moment. As the patch has only 4 lines of code, maybe someone else can carry it forward. Thx.
2020-08-03T21:22:28
apple/coremltools
843
apple__coremltools-843
[ "810" ]
828812bf897afdd7c5fa87d5f2fa9dea4f740a2e
diff --git a/coremltools/converters/mil/backend/nn/op_mapping.py b/coremltools/converters/mil/backend/nn/op_mapping.py --- a/coremltools/converters/mil/backend/nn/op_mapping.py +++ b/coremltools/converters/mil/backend/nn/op_mapping.py @@ -1375,12 +1375,18 @@ def lstm(const_context, builder, op): _squeeze(builder, op.outputs[2].name, output_names[2], axes=[0, 3, 4]) elif direction == "bidirectional": - # Expand initial_h and initial_c - _expand_dim(builder, initial_h + "_expanded", initial_h, [2, 3, 4]) - initial_h += "_expanded" + # Issue #810 + num_layer = len(builder.layers) + initial_h_expand = initial_h + "_expanded" + "_" + str(num_layer) + if not (initial_h_expand in set(builder.layers)): + _expand_dim(builder, initial_h_expand, initial_h, [2, 3, 4]) + initial_h = initial_h_expand + # initial_h may have the same name as initial_c (e.g., same Var) - _expand_dim(builder, initial_c + "_expanded2", initial_c, [2, 3, 4]) - initial_c += "_expanded2" + initial_c_expand = initial_c + "_expanded2" + "_" + str(num_layer) + if not (initial_c_expand in set(builder.layers)): + _expand_dim(builder, initial_c_expand, initial_c, [2, 3, 4]) + initial_c = initial_c_expand initial_h_f = initial_h + "_forward" initial_h_r = initial_h + "_reverse"
diff --git a/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py b/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py --- a/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py +++ b/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py @@ -491,6 +491,72 @@ def test_lstm_xexception( backend= backend, ) +# Workaround for GitHub Issue #824 +# i.e. the return h_n/c_n for a converted BLSTM are mangled. +# Therefore, just look at output 'y' (for now) which is correct. +class StripCellAndHidden(nn.Module): + def __init__(self,flagReturnTuple_): + super(StripCellAndHidden, self).__init__() + self.flagReturnTuple = flagReturnTuple_ + + def forward(self,x): + # Pass tuple, not tensor, to avoid issue in coremltools/converters/mil/frontend/torch/test/testing_utils.py on "if not expected_results:" + # Pass tensor when we need input for LSTM #2 as part of nn.Sequential() + return tuple(x[0]) if self.flagReturnTuple else x[0] + +# Check GitHub Issue #810, assume num_layers == 2 and bidirectional == True +class TestStackedBLSTM: + @pytest.mark.parametrize( + "input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend", + itertools.product([7], [5], [2], [True, False], [True, False], [0.3], [True], backends), + ) + def test_lstm( + self, + input_size, + hidden_size, + num_layers, + bias, + batch_first, + dropout, + bidirectional, + backend, + ): + model = nn.Sequential( + nn.LSTM( + input_size=input_size, + hidden_size=hidden_size, + num_layers=1, + bias=bias, + batch_first=batch_first, + dropout=dropout, + bidirectional=True), + StripCellAndHidden(False), + nn.LSTM( + input_size=2*hidden_size, + hidden_size=hidden_size, + num_layers=1, + bias=bias, + batch_first=batch_first, + dropout=dropout, + bidirectional=True), + StripCellAndHidden(True) + ) + + SEQUENCE_LENGTH = 3 + BATCH_SIZE = 2 + + num_directions = int(bidirectional) + 1 + + # (seq_len, batch, input_size) + if batch_first: + _input = torch.rand(BATCH_SIZE, SEQUENCE_LENGTH, input_size) + else: + _input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size) + + # Do not use h_0/c_0 input and do not check h_n/c_n output, GitHub Issue #824 + expected_results = model(_input) + + run_compare_torch(_input, model, expected_results, input_as_shape=False, backend=backend) class TestConcat: # This tests an edge case where the list of tensors to concatenate only
PyTorch to CoreML LSTM model conversion v4 fails with ValueError: Layer with name "_lstm_h0_reshaped_expanded" has already been added. Please use a unique name. Relevance:----------------------------------------------------------- While there other open issues in the coremltools v4 TOT repo regarding PyTorch trained LSTMs, such as #755 (multi layer LSTM) and #776 (handling of LSTM initials h0/c0), I was looking for workarounds on how to build multi-layer bidirectional LSTMs. Now I ran into a blocking bug with error: > Layer with name "_lstm_h0_reshaped_expanded" has already been added. Please use a unique name. In other words, another blocker on the conversion of PyTorch multilayer LSTMs (potential GRU/RNN as well). On the good side, a little digging in the conversion code leads to potential fix as well (see code and potential solution below) but I need your please to help to assess whether this approach is best, whether the unidirectional LSTM and GRU also need fixes, etc. Reproducible:----------------------------------------------------------- Yes Testcase:----------------------------------------------------------- Attached. Run e.g. as python3 -O ../testLstmTwoLayer.py [testLstmTwoLayer.txt](https://github.com/apple/coremltools/files/4967763/testLstmTwoLayer.txt) Setup:----------------------------------------------------------- Torch version : 1.5.1 CoreML tools version : TOT 705244e2be26c3fb7881fd7a731d25a55f5e4765 July 23 Log:----------------------------------------------------------- Torch version : 1.5.1 CoreML tools version : 4.0b1 small_model( (lstm1): LSTM(28, 16, batch_first=True, bidirectional=True) (lstm2): LSTM(32, 8, batch_first=True, bidirectional=True) (fc1): Linear(in_features=16, out_features=11, bias=True) ) Converting Frontend ==> MIL Ops: 98%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 46/47 [00:00<00:00, 218.23 ops/s] Running MIL optimization passes: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 13/13 [00:00<00:00, 17.78 passes/s] Translating MIL ==> MLModel Ops: 69%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 22/32 [00:00<00:00, 14936.01 ops/s] Traceback (most recent call last): File "../testLstmTwoLayer.py", line 74, in <module> inputs=[ ct.TensorType(name="input1", shape=dummy_input.shape) ], File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/_converters_entry.py", line 299, in convert **kwargs File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/converter.py", line 122, in _convert out = backend_converter(prog, **kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/converter.py", line 72, in __call__ return load(*args, **kwargs) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/backend/nn/load.py", line 239, in load prog.functions["main"].outputs, File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/backend/nn/op_mapping.py", line 50, in convert_ops mapper(const_context, builder, op) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/backend/nn/op_mapping.py", line 1399, in lstm _expand_dim(builder, initial_h + postfix, initial_h, [2, 3, 4]) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/converters/mil/backend/nn/op_mapping.py", line 208, in _expand_dim name=node_name, input_name=input_name, output_name=node_name, axes=axes File "~/Library/Python/3.7/lib/python/site-packages/coremltools/models/neural_network/builder.py", line 7056, in add_expand_dims spec_layer = self._add_generic_layer(name, [input_name], [output_name]) File "~/Library/Python/3.7/lib/python/site-packages/coremltools/models/neural_network/builder.py", line 1029, in _add_generic_layer % name ValueError: Layer with name "_lstm_h0_reshaped_expanded" has already been added. Please use a unique name. Interpretation and potential fix: ----------------------------------------------------------- This is observed with git coremltools, TOT 705244e2be26c3fb7881fd7a731d25a55f5e4765 July 23 The error > Layer with name "_lstm_h0_reshaped_expanded" has already been added. Please use a unique name. seems to indicate that the naming scheme in the converter is using a global name scheme. While the testcase runs fine with ONE layer (set flag twoLayerModelFlag to False) when you run with TWO layers (twoLayerModelFlag=True) ) (or more presumably) the error occurs. Originally, you have on TOT the use of fixed postfixes like "_expanded" (for h0) and "_expanded2" (for c0) which leads to the issue it seems. When you have 2 layers or more, it seems the same variable name is generated multiple times. The line 1399 in coremltools/converters/mil/backend/nn/op_mapping.py points to the code below (The line numbers might be a little off due to my logging lines). Original code: def lstm(const_context, builder, op): ... # Roughly Line 1375 et al # elif direction == "bidirectional": # Expand initial_h and initial_c _expand_dim(builder, initial_h + "_expanded", initial_h, [2, 3, 4]) # <------------------ initial_h += "_expanded" # initial_h may have the same name as initial_c (e.g., same Var) _expand_dim(builder, initial_c + "_expanded2", initial_c, [2, 3, 4]) # <------------------ initial_c += "_expanded2" Therefore, I tried a quick randomization fix along the lines below to generate UNIQUE names every time you handle a new BLSTM layer. Potential fix: def lstm(const_context, builder, op): ... # Roughly Line 1375 et al # elif direction == "bidirectional": # Expand initial_h and initial_c if 1: # New import random mymax = 10000000 # Better, use sys.maxint n = random.randrange(1,mymax) postfix = "_expanded_" + str(n) n2 = random.randrange(1,mymax) postfix2 = "_expanded2_" + str(n2) mynodename = initial_h + "_expanded" print ("nodename " + str(mynodename)) print ("postfix" + str(postfix)) print ("postfix2" + str(postfix2)) else: #ORG postfix = "_expanded" postfix2 = "_expanded2" _expand_dim(builder, initial_h + postfix, initial_h, [2, 3, 4]) # <------------------ initial_h += postfix # initial_h may have the same name as initial_c (e.g., same Var) _expand_dim(builder, initial_c + postfix2, initial_c, [2, 3, 4]) # <------------------ initial_c += postfix2 While the conversion now completes, I have not checked whether "everything" is fine. which is why I ask for your help for a proper fix please. Note we might need a fix for other uniLSTM, GRU or RNN in the same file as well. As the other issues #755 (multi layer LSTM) and #776 (handling of initials h0/c0) are still blocking us, it would be an immense help if we could fix this new issue quickly such that I can properly stack multiple BLSTMs. Thanks. Here is the output WITH the fix above: python3 -O ../testLstmTwoLayer.py WARNING:root:Keras version 2.4.3 detected. Last version known to be fully compatible of Keras is 2.2.4 . Torch version : 1.5.1 CoreML tools version : 4.0b1 small_model( (lstm1): LSTM(28, 16, batch_first=True, bidirectional=True) (lstm2): LSTM(32, 8, batch_first=True, bidirectional=True) (fc1): Linear(in_features=16, out_features=11, bias=True) ) Converting Frontend ==> MIL Ops: 98%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 46/47 [00:00<00:00, 213.31 ops/s] Running MIL optimization passes: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 13/13 [00:00<00:00, 17.83 passes/s] Translating MIL ==> MLModel Ops: 0%| | 0/32 [00:00<?, ? ops/s] nodename _lstm_h0_reshaped_expanded postfix_expanded_6035530 # <------------------ randomized postfix2_expanded2_4582868 # <------------------ randomized nodename _lstm_h0_reshaped_expanded postfix_expanded_3955283 postfix2_expanded2_1781011 Translating MIL ==> MLModel Ops: 100%
2020-08-05T16:21:24
apple/coremltools
888
apple__coremltools-888
[ "886" ]
9a10c0b7a6c63edbd2261b58ec530e675a13d75b
diff --git a/coremltools/converters/mil/input_types.py b/coremltools/converters/mil/input_types.py --- a/coremltools/converters/mil/input_types.py +++ b/coremltools/converters/mil/input_types.py @@ -293,7 +293,7 @@ def __init__(self, shape, default=None): shape = list(shape) for idx, s in enumerate(shape): if s is None or s == -1: - msg = 'Dimension cannot be None of -1. Use ' +\ + msg = 'Dimension cannot be None or -1. Use ' +\ 'ct.RangeDim for runtime determined dimension. ' +\ 'Dim {}: {} ' +\ 'See https://coremltools.readme.io/docs/flexible-inputs'
Typo in error message text. of->or ## 🐞Describe the bug typo in error message "Dimension cannot be None of -1" it should be "Dimension cannot be None or -1" `of` should be replaced with `or` ## Trace code line https://github.com/apple/coremltools/blob/master/coremltools/converters/mil/input_types.py#L296 ## To Reproduce Try to convert TF mobilenet with shape `(-1, 224, 224,3)` - If a python script can reproduce the error, please paste the code snippet ``` # Paste code snippet here ``` - If applicable, please attach the source model - If the model cannot be shared publicly, please attach it via filing a bug report at https://developer.apple.com/bug-reporting/ and provide the reference number here - If it is a model conversion issue and the conversion succeeds, however, if there is a numerical mismatch between the original and the coreml model, please paste script used for comparison. ## System environment (please complete the following information): - coremltools version (e.g., 3.0b5): 4.0b3 - OS (e.g., MacOS, Linux): Linux - macOS version (if applicable): - XCode version (if applicable): - How you install python (anaconda, virtualenv, system): - python version (e.g. 3.7): 3.7 - any other relevant information: - e.g. keras version if using keras conversion etc. ## Additional context Add any other context about the problem here.
Related PR #858 (where the bug was introduced) I asked @bhushan23 to look at the issue but no reply @apivovarov The API for the coremltools 4.0b3 is changed, now you need to use ct.RangeDim() instead if -1. Please reopen the issue if this doesn't solve your issue.
2020-08-29T04:40:21
apple/coremltools
911
apple__coremltools-911
[ "804" ]
218e18c52e26acba74094a07352db2aa979e5b1a
diff --git a/coremltools/converters/mil/frontend/torch/internal_graph.py b/coremltools/converters/mil/frontend/torch/internal_graph.py --- a/coremltools/converters/mil/frontend/torch/internal_graph.py +++ b/coremltools/converters/mil/frontend/torch/internal_graph.py @@ -246,7 +246,7 @@ def __init__( # Add params for name, param in params_dict.items(): - value = param.detach().numpy() + value = param.detach().cpu().numpy() self.params[name] = value # Add inputs
cuda tensor parameter fail to convert to numpy in InternalTorchIRGraph ## 🐞Describe the bug - If the input parameter type to a traced model is tensor.cuda(), ct.convert fails with the below error - Torch ## Trace ``` File "/home/josh/anaconda3/lib/python3.7/site-packages/coremltools/converters/mil/frontend/torch/internal_graph.py", line 180, in __init__ value = param.detach().numpy() TypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first. ``` Note, possible fix: replace ` value = param.detach().numpy() ` with ` value = param.cpu().detach().numpy() ` ## System environment (please complete the following information): - coremltools version: 4.0b1 - OS: Linux - How you install python: anaconda - python version: 3.7.6 ## Additional context Add any other context about the problem here.
2020-09-06T12:22:53
apple/coremltools
987
apple__coremltools-987
[ "986" ]
157c1b06997d1f61ea90075e7ad7f5efcb075875
diff --git a/coremltools/converters/mil/frontend/torch/ops.py b/coremltools/converters/mil/frontend/torch/ops.py --- a/coremltools/converters/mil/frontend/torch/ops.py +++ b/coremltools/converters/mil/frontend/torch/ops.py @@ -2353,6 +2353,7 @@ def neg(context, node): inputs = _get_inputs(context, node, expected=1) context.add(mb.mul(x=inputs[0], y=-1, name=node.name)) +@register_torch_op def topk(context, node): inputs = _get_inputs(context, node) kwargs = {"name": node.name, "x": inputs[0], "k": inputs[1]}
diff --git a/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py b/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py --- a/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py +++ b/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py @@ -1432,3 +1432,41 @@ def test_std_4_inputs(self, backend, unbiased, dim, keepdim): kwargs={"unbiased": unbiased, "dim" : dim, "keepdim": keepdim}) input_shape = (2, 5, 10) run_compare_torch(input_shape, model, backend=backend) + +class TestTopk: + @pytest.mark.parametrize( + "backend, largest, shape_dim_k", + itertools.product( + backends, + [True, False], + [ + ((4, 6, 7, 3), -1, 2), + ((10, 3, 4), 2, 2), + ((10, 5), -2, 3), + ((5,), 0, 2) + ], + ), + ) + def test_topk(self, backend, largest, shape_dim_k): + input_shape = shape_dim_k[0] + dim = shape_dim_k[1] + k = shape_dim_k[2] + + class TopkModel(nn.Module): + def __init__(self): + super(TopkModel, self).__init__() + + def forward(self, x): + return torch.topk(x, k, dim=dim, largest=largest) + + input_data = torch.rand(input_shape) + model = TopkModel() + expected_results = model(input_data) + expected_results = [expected_results.values, expected_results.indices] + run_compare_torch( + input_data, + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + ) \ No newline at end of file
PyTorch topk not supported after mention in release notes ## 🐞Describe the bug I am converting a PyTorch model that uses their `torch.topk` operator and I get a runtime error that is is unsupported. Contrary to this, the release [notes](https://github.com/apple/coremltools/releases/tag/4.0b4) for 4.0b4 explicitly say support was added. I have installed coremltools v4.0 which came after that pre-release, so I assume topk should also be supported in my version ## Trace ``` Traceback (most recent call last): File "/home/addison/miniconda3/envs/coreml/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-9-21395711530d>", line 1, in <module> mlmodel = ct.convert(traced_model, inputs=[ct.ImageType(name='images', shape=dummy.shape)]) File "/home/addison/miniconda3/envs/coreml/lib/python3.6/site-packages/coremltools/converters/_converters_entry.py", line 183, in convert **kwargs File "/home/addison/miniconda3/envs/coreml/lib/python3.6/site-packages/coremltools/converters/mil/converter.py", line 129, in mil_convert ConverterRegistry, **kwargs) File "/home/addison/miniconda3/envs/coreml/lib/python3.6/site-packages/coremltools/converters/mil/converter.py", line 171, in mil_convert_to_proto prog = frontend_converter(model, **kwargs) File "/home/addison/miniconda3/envs/coreml/lib/python3.6/site-packages/coremltools/converters/mil/converter.py", line 85, in __call__ return load(*args, **kwargs) File "/home/addison/miniconda3/envs/coreml/lib/python3.6/site-packages/coremltools/converters/mil/frontend/torch/load.py", line 83, in load raise e File "/home/addison/miniconda3/envs/coreml/lib/python3.6/site-packages/coremltools/converters/mil/frontend/torch/load.py", line 75, in load prog = converter.convert() File "/home/addison/miniconda3/envs/coreml/lib/python3.6/site-packages/coremltools/converters/mil/frontend/torch/converter.py", line 224, in convert convert_nodes(self.context, self.graph) File "/home/addison/miniconda3/envs/coreml/lib/python3.6/site-packages/coremltools/converters/mil/frontend/torch/ops.py", line 53, in convert_nodes "PyTorch convert function for op '{}' not implemented.".format(node.kind) RuntimeError: PyTorch convert function for op 'topk' not implemented. ``` ## To Reproduce If `torch.topk` is still not supported, that is okay and I do not need to upload a model and conversion script. I am mainly opening the issue to point out the discrepancy between release notes and actual behavior ## System environment (please complete the following information): - coremltools version: 4.0 - OS: Ubuntu 18.04 - macOS version (if applicable): NA - XCode version (if applicable): NA - How you install python: anaconda - python version: 3.6.9 - PyTorch: 1.4.0
yeah that seems to be a gap in testing and a bug. The `topk` op was added to the torch `ops.py` but it lacks the decorator `@register_torch_op`, hence it fails to register. A temporary workaround is to install coremltools from source and add `@register_torch_op` [here](https://github.com/apple/coremltools/blob/157c1b06997d1f61ea90075e7ad7f5efcb075875/coremltools/converters/mil/frontend/torch/ops.py#L2356) @aseemw Thank you for the quick workaround! I was able to run the conversion by simply editing the Python code without needing to reinstall. Shall we leave this open until the official release is fixed?
2020-11-06T19:50:24
apple/coremltools
1,002
apple__coremltools-1002
[ "1001" ]
30c66ba46378e859431f33d49f4c91c57ccfe03f
diff --git a/coremltools/converters/mil/frontend/tensorflow/load.py b/coremltools/converters/mil/frontend/tensorflow/load.py --- a/coremltools/converters/mil/frontend/tensorflow/load.py +++ b/coremltools/converters/mil/frontend/tensorflow/load.py @@ -26,6 +26,7 @@ from coremltools.converters._profile_utils import _profile from tqdm import tqdm as _tqdm from distutils.version import StrictVersion as _StrictVersion +from coremltools._deps import __get_version as _get_version class TFLoader: @@ -105,7 +106,7 @@ def extract_sub_graph(graph_def, outputs=None): logging.debug(msg.format(outputs)) outputs = outputs if isinstance(outputs, list) else [outputs] outputs = [i.split(":")[0] for i in outputs] - if tf.__version__ < _StrictVersion("1.13.1"): + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): return tf.graph_util.extract_sub_graph(graph_def, outputs) else: return tf.compat.v1.graph_util.extract_sub_graph(graph_def, outputs) @@ -146,7 +147,7 @@ def _graph_def_from_model(self, outputs=None): if not os.path.exists(str(self.model)): raise ValueError('Input model "{}" does not exist'.format(self.model)) elif os.path.isfile(str(self.model)) and self.model.endswith(".pb"): - if tf.__version__ < _StrictVersion("1.13.1"): + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): with open(self.model, "rb") as f: gd = tf.GraphDef() gd.ParseFromString(f.read()) @@ -242,7 +243,7 @@ def _from_saved_model(saved_model_dir): # get model outputs output_node_names = [] - if tf.__version__ < _StrictVersion("1.13.1"): + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): sess = tf.Session() else: sess = tf.compat.v1.Session() @@ -256,7 +257,7 @@ def _from_saved_model(saved_model_dir): # get frozen graph output_graph = mktemp() - tf.compat.v1.reset_default_graph() if tf.__version__ >= _StrictVersion("1.13.1") else tf.reset_default_graph() + tf.compat.v1.reset_default_graph() if _get_version(tf.__version__) >= _StrictVersion("1.13.1") else tf.reset_default_graph() freeze_graph.freeze_graph( input_graph=None, input_saver=None, @@ -275,7 +276,7 @@ def _from_saved_model(saved_model_dir): saved_model_tags=",".join(saved_model_tags), ) - if tf.__version__ < _StrictVersion("1.13.1"): + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): graph_def = tf.GraphDef() with open(output_graph, "rb") as f: graph_def.ParseFromString(f.read()) diff --git a/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py b/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py --- a/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py +++ b/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py @@ -17,6 +17,7 @@ from coremltools.converters.mil.mil.types.type_mapping import numpy_val_to_builtin_val from coremltools.converters._profile_utils import _profile from distutils.version import StrictVersion as _StrictVersion +from coremltools._deps import __get_version as _get_version def _get_const_nodes(fn): @@ -76,7 +77,7 @@ def _constant_propagation(fn, new_graph, constant_nodes, constant_node_num_outpu # We're only making one call to `sess.run()` in order to compute constant values. # In this context, the default optimization settings make everything dramatically # slower and more memory-intensive. - if tf.__version__ < _StrictVersion("1.13.1"): + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): session_config = tf.ConfigProto() session_config.graph_options.optimizer_options.opt_level = ( tf.OptimizerOptions.L0
Latest coremltools not compatible with Tensorflow. 2.4.0-rc0 super excited for the mac-optimized tensorflow package that just rolled out unfortunately, it's not compatible with the latest coremltools package on MacOS Followed the coremltools quickstart quide for conversion with failure Can I plan on the future of compatibility looking bright? [https://github.com/apple/tensorflow_macos](url) macOS 11.0.1 BigSur pyenv installed Python 3.8.6 tensorflow 2.4.0-rc0 <img width="960" alt="Untitled Note" src="https://user-images.githubusercontent.com/47344882/99893356-fbe29800-2c4c-11eb-856c-c94027679b1c.png">
2020-11-23T04:50:13
huggingface/trl
29
huggingface__trl-29
[ "18" ]
6895c6afff4b6e3f76ca80f63fa46050694d1d85
diff --git a/trl/ppo.py b/trl/ppo.py --- a/trl/ppo.py +++ b/trl/ppo.py @@ -102,9 +102,12 @@ def __init__(self, model, ref_model, **ppo_params): self.model = model self.optimizer = Adam(model.parameters(), lr=self.ppo_params['lr']) - self.kl_ctl = AdaptiveKLController(self.ppo_params['init_kl_coef'], - self.ppo_params['target'], - self.ppo_params['horizon']) + if self.ppo_params['adap_kl_ctrl']: + self.kl_ctl = AdaptiveKLController(self.ppo_params['init_kl_coef'], + self.ppo_params['target'], + self.ppo_params['horizon']) + else: + self.kl_ctl = FixedKLController(self.ppo_params['init_kl_coef']) def step(self, query, response, scores):
adap_kl_ctrl boolean is not used Hello, Thanks for this implementation, is it possible that your kl adaptation is always on ? https://github.com/lvwerra/trl/blob/750f5fd5329bb81c79b00243c4c8923ac14981d5/trl/ppo.py#L92 Best, Thibaud
Yes, that is true - well spotted! I'll add it as a TODO.
2022-01-01T16:46:38
huggingface/trl
154
huggingface__trl-154
[ "148" ]
032676aea856cae7dcf98287b5a4f090ebb05665
diff --git a/trl/trainer/ppo_trainer.py b/trl/trainer/ppo_trainer.py --- a/trl/trainer/ppo_trainer.py +++ b/trl/trainer/ppo_trainer.py @@ -309,6 +309,7 @@ def prepare_dataloader(self, dataset: Union[torch.utils.data.Dataset, Dataset], batch_size=self.config.batch_size, collate_fn=data_collator, shuffle=True, + drop_last=True, ) return dataloader
diff --git a/tests/trainer/test_ppo_trainer.py b/tests/trainer/test_ppo_trainer.py --- a/tests/trainer/test_ppo_trainer.py +++ b/tests/trainer/test_ppo_trainer.py @@ -147,6 +147,22 @@ def _init_dummy_dataset(self): return dummy_dataset + def test_drop_last_dataloader(self): + self.ppo_config = PPOConfig(batch_size=3, forward_batch_size=1, log_with=None) + + dummy_dataset = self._init_dummy_dataset() + + ppo_trainer = PPOTrainer( + config=self.ppo_config, + model=self.gpt2_model, + ref_model=self.gpt2_model_ref, + tokenizer=self.gpt2_tokenizer, + dataset=dummy_dataset, + ) + dummy_dataloader = ppo_trainer.dataloader + + self.assertEqual(len(dummy_dataloader), 0) + def test_ppo_step(self): # initialize dataset dummy_dataset = self._init_dummy_dataset()
Error with official examples Hi, I am running the official example script on t5 using sentiment as reward function, and after 97 iterations the following error is raised: ``` ╭───────────────────── Traceback (most recent call last) ──────────────────────╮ │ /home/alejandro.vaca/trl/examples/sentiment/scripts/t5-sentiment.py:126 in │ │ <module> │ │ │ │ 123 │ rewards = [torch.tensor(output[1]["score"]).to(device) for output │ │ 124 │ │ │ 125 │ #### Run PPO step │ │ ❱ 126 │ stats = ppo_trainer.step(query_tensors, response_tensors, rewards) │ │ 127 │ ppo_trainer.log_stats(stats, batch, rewards) │ │ 128 │ │ │ │ /home/alejandro.vaca/trl/trl/trainer/ppo_trainer.py:432 in step │ │ │ │ 429 │ │ """ │ │ 430 │ │ bs = self.config.batch_size │ │ 431 │ │ │ │ ❱ 432 │ │ queries, responses, scores = self._step_safety_checker(bs, que │ │ 433 │ │ │ │ 434 │ │ timing = dict() │ │ 435 │ │ t0 = time.time() │ │ │ │ /home/alejandro.vaca/trl/trl/trainer/ppo_trainer.py:392 in │ │ _step_safety_checker │ │ │ │ 389 │ │ │ if not isinstance(tensor_list[0], torch.Tensor): │ │ 390 │ │ │ │ raise ValueError(f"Elements in {name} must tensors - g │ │ 391 │ │ │ if batch_size is not None and len(tensor_list) != batch_si │ │ ❱ 392 │ │ │ │ raise ValueError( │ │ 393 │ │ │ │ │ f"Batch size ({batch_size}) does not match number │ │ 394 │ │ │ │ ) │ │ 395 │ ╰──────────────────────────────────────────────────────────────────────────────╯ ValueError: Batch size (256) does not match number of examples - but got 63 for: queries ```
hmm maybe we run out of data at this point. Maybe you could verify by just iterating over the dataset and not doing generation/training inside the loop. @younesbelkada maybe we can deal with this better inside TRL. We could `drop_last=True` when we create the dataloader? Agreed with @edbeeching ! We can also have a stronger safety checker, happy to have a look!
2023-02-16T14:30:18
huggingface/trl
238
huggingface__trl-238
[ "232" ]
9b60207f0b1285799274a9ef7800209667dd3b94
diff --git a/examples/sentiment/scripts/gpt2-sentiment.py b/examples/sentiment/scripts/gpt2-sentiment.py --- a/examples/sentiment/scripts/gpt2-sentiment.py +++ b/examples/sentiment/scripts/gpt2-sentiment.py @@ -65,6 +65,8 @@ class ScriptArguments: gradient_accumulation_steps: Optional[int] = field( default=1, metadata={"help": "the number of gradient accumulation steps"} ) + early_stopping: Optional[bool] = field(default=False, metadata={"help": "whether to early stop"}) + target_kl: Optional[float] = field(default=0.1, metadata={"help": "kl target for early stopping"}) parser = HfArgumentParser(ScriptArguments) @@ -77,6 +79,8 @@ class ScriptArguments: mini_batch_size=script_args.mini_batch_size, batch_size=script_args.batch_size, gradient_accumulation_steps=script_args.gradient_accumulation_steps, + early_stopping=script_args.early_stopping, + target_kl=script_args.target_kl, ) diff --git a/trl/trainer/ppo_config.py b/trl/trainer/ppo_config.py --- a/trl/trainer/ppo_config.py +++ b/trl/trainer/ppo_config.py @@ -77,6 +77,10 @@ class PPOConfig(object): Seed value for random generations optimize_cuda_cache (`bool`, *optional*, defaults to `False`): Optimize CUDA cache for slightly more memory-effcient training + early_stopping (`bool`, *optional*, defaults to `False`): + Whether to stop the PPO opimization loop early is the KL too high + target_kl (`float`, *optional*, defaults to `0.1`): + Stop early if we exceed this value by over 50% """ def __init__( @@ -106,6 +110,8 @@ def __init__( max_grad_norm: Optional[float] = None, seed: Optional[int] = 0, optimize_cuda_cache: Optional[bool] = False, + early_stopping: Optional[bool] = False, + target_kl: Optional[float] = 0.1, ): self.model_name = model_name self.steps = steps @@ -148,6 +154,8 @@ def __init__( self.tracker_project_name = tracker_project_name self.optimize_cuda_cache = optimize_cuda_cache self.max_grad_norm = max_grad_norm + self.early_stopping = early_stopping + self.target_kl = target_kl self.total_ppo_epochs = int(np.ceil(steps / batch_size)) diff --git a/trl/trainer/ppo_trainer.py b/trl/trainer/ppo_trainer.py --- a/trl/trainer/ppo_trainer.py +++ b/trl/trainer/ppo_trainer.py @@ -605,7 +605,10 @@ def collator(data): t = time.time() all_stats = [] + early_stop = False for _ in range(self.config.ppo_epochs): + if early_stop: + break for batch in mini_batch_dataloader: with self.accelerator.accumulate(self.model): model_inputs = {k: batch[k] for k in model_inputs_names} @@ -622,6 +625,11 @@ def collator(data): vpreds, batch["masks"], ) + if self.config.early_stopping and train_stats["policy/policykl"] > 1.5 * self.config.target_kl: + early_stop = True + self.optimizer.zero_grad() + break + all_stats.append(train_stats) timing["time/ppo/optimize_step"] = time.time() - t
Feature request: PPO early stopping, important for training stability Hi TRL contributors, Thanks a lot for creating this nice library. While using TRL, I realised that an important feature is still missing, that's the early stopping during a PPO step. Without this feature, the optimisation is prone to KL/loss spikes such as those mentioned in #101 . In my experiments, I found this issue is alleviated through reducing the default num PPO epochs. This feature is already implemented in the [OpenAI official PPO](https://github.com/openai/spinningup/blob/master/spinup/algos/pytorch/ppo/ppo.py#L269) and [AllenAI RL4LMs](https://github.com/allenai/RL4LMs/blob/main/rl4lms/algorithms/ppo/ppo.py#L279). Looking forward to your feedback. Kind regards, Shaojie
@lvwerra @lewtun this might improve the issues we were discussing yesterday, I can add this feature if you like? Sounds good! Yes, I agree this would be an easy and useful feature to add. Thanks @edbeeching @lvwerra for your quick responses. Looking forward to the improvement.
2023-03-21T15:06:57
huggingface/trl
249
huggingface__trl-249
[ "246" ]
404621f0f918a3f3a0e078b2a7fa70bbfc0bd1e4
diff --git a/examples/sentiment/scripts/gpt2-sentiment.py b/examples/sentiment/scripts/gpt2-sentiment.py --- a/examples/sentiment/scripts/gpt2-sentiment.py +++ b/examples/sentiment/scripts/gpt2-sentiment.py @@ -173,13 +173,10 @@ def collator(data): query_tensors = batch["input_ids"] # Get response from gpt2 - response_tensors = [] - for query in query_tensors: - gen_len = output_length_sampler() - generation_kwargs["max_new_tokens"] = gen_len - response = ppo_trainer.generate(query, **generation_kwargs) - response_tensors.append(response.squeeze()[-gen_len:]) - batch["response"] = [tokenizer.decode(r.squeeze()) for r in response_tensors] + response_tensors = ppo_trainer.generate( + query_tensors, return_prompt=False, length_sampler=output_length_sampler, **generation_kwargs + ) + batch["response"] = tokenizer.batch_decode(response_tensors) # Compute sentiment score texts = [q + r for q, r in zip(batch["query"], batch["response"])] diff --git a/examples/sentiment/scripts/gpt2-sentiment_peft.py b/examples/sentiment/scripts/gpt2-sentiment_peft.py --- a/examples/sentiment/scripts/gpt2-sentiment_peft.py +++ b/examples/sentiment/scripts/gpt2-sentiment_peft.py @@ -214,13 +214,10 @@ def print_trainable_parameters(model): model.gradient_checkpointing_disable() model.pretrained_model.config.use_cache = True # Get response from Causal LM - response_tensors = [] - for query in query_tensors: - gen_len = output_length_sampler() - generation_kwargs["max_new_tokens"] = gen_len - response = ppo_trainer.generate(query, **generation_kwargs) - response_tensors.append(response.squeeze()[-gen_len:]) - batch["response"] = [tokenizer.decode(r.squeeze()) for r in response_tensors] + response_tensors = ppo_trainer.generate( + query_tensors, return_prompt=False, length_sampler=output_length_sampler, **generation_kwargs + ) + batch["response"] = tokenizer.batch_decode(response_tensors) # Compute sentiment score texts = [q + r for q, r in zip(batch["query"], batch["response"])] diff --git a/examples/sentiment/scripts/t5-sentiment.py b/examples/sentiment/scripts/t5-sentiment.py --- a/examples/sentiment/scripts/t5-sentiment.py +++ b/examples/sentiment/scripts/t5-sentiment.py @@ -144,14 +144,12 @@ def collater(data): for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)): query_tensors = batch["input_ids"] - # Get response from gpt2 - response_tensors = [] - for query in query_tensors: - gen_len = output_length_sampler() - generation_kwargs["max_new_tokens"] = gen_len - response = ppo_trainer.generate(query, **generation_kwargs) - response_tensors.append(response.squeeze()) - batch["response"] = [tokenizer.decode(r[1:].squeeze()) for r in response_tensors] + # Get response from t5 + response_tensors = ppo_trainer.generate( + query_tensors, return_prompt=False, length_sampler=output_length_sampler, **generation_kwargs + ) + response_tensors = [r[1:] for r in response_tensors] + batch["response"] = tokenizer.batch_decode(response_tensors) # Compute sentiment score texts = [q + r for q, r in zip(batch["query"], batch["response"])]
why not use bacth for model generate In [gpt2 example code](https://github.com/lvwerra/trl/blob/main/examples/sentiment/scripts/gpt2-sentiment.py#L177),model generate text single by single,it make traing slow,why not use batch generate?
This is addressed in #228! We need to update the examples though!
2023-03-24T21:13:32
huggingface/trl
260
huggingface__trl-260
[ "259" ]
237eb9c6a5b1f90f6ccb674269bd7a33533e4bf7
diff --git a/trl/trainer/ppo_trainer.py b/trl/trainer/ppo_trainer.py --- a/trl/trainer/ppo_trainer.py +++ b/trl/trainer/ppo_trainer.py @@ -592,11 +592,12 @@ def step( rewards, non_score_reward = self.compute_rewards(scores, all_logprobs, ref_logprobs, masks) timing["time/ppo/compute_rewards"] = time.time() - t + # upcast to float32 to avoid dataset issues mini_batch_dict = { "queries": queries, "responses": responses, - "logprobs": all_logprobs, - "values": values, + "logprobs": all_logprobs.to(torch.float32), + "values": values.to(torch.float32), "rewards": rewards, "masks": masks, }
[Bug] when using accelerator + deepspeed, AcceleratorState class instantiated twice with different config When run `accelerate launch` (with deepspeed), the following errors will appear: <img width="1344" alt="image" src="https://user-images.githubusercontent.com/3998421/228439528-a81196e7-68f0-404c-8ddd-cf7791d0882e.png"> I don't know why this error occurred. It was working fine without using deepspeed. Two places of initialization: - The second initialization occurs during `AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model)`: https://github.com/lvwerra/trl/blob/237eb9c6a5b1f90f6ccb674269bd7a33533e4bf7/trl/models/modeling_base.py#L256 - The second initialization occurs during `PPOTrainer()`: https://github.com/lvwerra/trl/blob/237eb9c6a5b1f90f6ccb674269bd7a33533e4bf7/trl/trainer/ppo_trainer.py#L181 Current workaround: reset AcceleratorState after from_pretrained ```python model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) AcceleratorState._reset_state() ```
After make `accelerate launch` work, If it is fp16 training, you also need to modify the mini_batch_dict in `step()`, because datasets do not support fp16 and will report the following error: <img width="766" alt="截屏2023-03-29 14 16 07" src="https://user-images.githubusercontent.com/3998421/228444410-58bf705e-fee6-4582-8d9c-2ad4d53620b1.png"> Current workaround: https://github.com/lvwerra/trl/blob/237eb9c6a5b1f90f6ccb674269bd7a33533e4bf7/trl/trainer/ppo_trainer.py#L595 ```python mini_batch_dict = { "queries": queries, "responses": responses, "logprobs": all_logprobs.to(torch.float32), "values": values.to(torch.float32), "rewards": rewards, "masks": masks, } ```
2023-03-29T06:50:51
huggingface/trl
262
huggingface__trl-262
[ "256" ]
734624274d02d1a7f0f3665929e0aa69de59d351
diff --git a/examples/sentiment/scripts/t5-sentiment.py b/examples/sentiment/scripts/t5-sentiment.py --- a/examples/sentiment/scripts/t5-sentiment.py +++ b/examples/sentiment/scripts/t5-sentiment.py @@ -148,8 +148,7 @@ def collater(data): response_tensors = ppo_trainer.generate( query_tensors, return_prompt=False, length_sampler=output_length_sampler, **generation_kwargs ) - response_tensors = [r[1:] for r in response_tensors] - batch["response"] = tokenizer.batch_decode(response_tensors) + batch["response"] = tokenizer.batch_decode([r[1:] for r in response_tensors]) # Compute sentiment score texts = [q + r for q, r in zip(batch["query"], batch["response"])] diff --git a/trl/trainer/ppo_trainer.py b/trl/trainer/ppo_trainer.py --- a/trl/trainer/ppo_trainer.py +++ b/trl/trainer/ppo_trainer.py @@ -417,7 +417,8 @@ def _generate_batched( outputs = [] padding_side_default = self.tokenizer.padding_side - self.tokenizer.padding_side = "left" + if not self.is_encoder_decoder: + self.tokenizer.padding_side = "left" # in case we have fewer examples than bs batch_size = min(len(query_tensors), batch_size) @@ -444,7 +445,11 @@ def _generate_batched( generations = self.accelerator.unwrap_model(self.model).generate(**padded_inputs, **generation_kwargs) for generation, mask in zip(generations, padded_inputs["attention_mask"]): - output = generation[(1 - mask).sum() :] # remove padding + if not self.is_encoder_decoder: + output = generation[(1 - mask).sum() :] # remove padding + else: + output = generation + if not return_prompt and not self.is_encoder_decoder: output = output[(mask).sum() :] # remove prompt outputs.append(output)
t5-sentiment example collapses on master I just rerun the t5-sentiment example and rn on master it has negative kl divergence in new version and does not learn in general. This seems not have happened since the v0.4.1 release. The t5-sentiment.py script itself does not seem to be the culprit as i tested master with it reverted to v0.4.1 version and the behavior is identical. ![image](https://user-images.githubusercontent.com/17483828/228250281-c6ab4de0-e689-4243-9658-39b36f199351.png)
@younesbelkada can you reproduce? negative KL is very suspicious! I was able to reproduce, will investigate! The culprit seems to be b5cce0d13e95c8e21eb9d57177930253d9092a02 This PR introduced batched generation on the t5 example, as it can be observed on the [wandb log](https://wandb.ai/distill-bloom/trl/runs/kbj6ts85?workspace=user-) the kl is negative, I can confirm the KL was always positive before that commit
2023-03-29T15:16:51
huggingface/trl
377
huggingface__trl-377
[ "374" ]
6916e0d2dfb23194c70bd02a50fceb58af7697c1
diff --git a/trl/trainer/ppo_trainer.py b/trl/trainer/ppo_trainer.py --- a/trl/trainer/ppo_trainer.py +++ b/trl/trainer/ppo_trainer.py @@ -233,9 +233,9 @@ def __init__( self.dataloader = self.prepare_dataloader(self.dataset, data_collator) elif self.dataset is None and self.accelerator.num_processes > 1: warnings.warn( - "No dataset is provided. In a multi-GPU setting, this will lead to an error. You should", - " prepare your dataloader yourself with `dataloader = ppo_trainer.accelerator.prepare(dataloader)`", - " and using `torch.utils.data.DataLoader`, or pass a dataset to the `PPOTrainer`. Please ", + "No dataset is provided. In a multi-GPU setting, this will lead to an error. You should" + " prepare your dataloader yourself with `dataloader = ppo_trainer.accelerator.prepare(dataloader)`" + " and using `torch.utils.data.DataLoader`, or pass a dataset to the `PPOTrainer`. Please " " refer to the documentation for more details.", UserWarning, )
Unavailable not pass Dataset, because of warning issue If I would not pass Dataset to PPOTrainer, than it cause a warning: ![image](https://github.com/lvwerra/trl/assets/57448108/b135d184-7dd0-4ec4-98cf-6bfec4d20660) It lead to error: ![image](https://github.com/lvwerra/trl/assets/57448108/ba309d26-5f45-488b-9746-8cd96fee4935)
2023-05-18T06:31:14
huggingface/trl
398
huggingface__trl-398
[ "368" ]
a5b0414f6379035f0c515090e82e7cc2a683bcdf
diff --git a/examples/stack_llama/scripts/merge_peft_adapter.py b/examples/stack_llama/scripts/merge_peft_adapter.py --- a/examples/stack_llama/scripts/merge_peft_adapter.py +++ b/examples/stack_llama/scripts/merge_peft_adapter.py @@ -1,17 +1,9 @@ from dataclasses import dataclass, field from typing import Optional -import peft import torch from peft import PeftConfig, PeftModel -from peft.utils import _get_submodules -from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser - - -DEFAULT_PAD_TOKEN = "[PAD]" -DEFAULT_EOS_TOKEN = "</s>" -DEFAULT_BOS_TOKEN = "</s>" -DEFAULT_UNK_TOKEN = "</s>" +from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser @dataclass @@ -32,34 +24,23 @@ class ScriptArguments: assert script_args.base_model_name is not None, "please provide the output name of the merged model" peft_config = PeftConfig.from_pretrained(script_args.adapter_model_name) -model = AutoModelForCausalLM.from_pretrained(script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16) -tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name) -config = AutoConfig.from_pretrained(script_args.base_model_name) -architecture = config.architectures[0] -if "Llama" in architecture: - print("Setting EOS, BOS, and UNK tokens for LLama tokenizer") - tokenizer.add_special_tokens( - { - "eos_token": DEFAULT_EOS_TOKEN, - "bos_token": DEFAULT_BOS_TOKEN, - "unk_token": DEFAULT_UNK_TOKEN, - "pad_token": DEFAULT_PAD_TOKEN, - } +if peft_config.task_type == "SEQ_CLS": + # peft is for reward model so load sequence classification + model = AutoModelForSequenceClassification.from_pretrained( + script_args.base_model_name, num_labels=1, torch_dtype=torch.bfloat16 + ) +else: + model = AutoModelForCausalLM.from_pretrained( + script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16 ) +tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name) + # Load the Lora model model = PeftModel.from_pretrained(model, script_args.adapter_model_name) model.eval() -key_list = [key for key, _ in model.base_model.model.named_modules() if "lora" not in key] -for key in key_list: - parent, target, target_name = _get_submodules(model.base_model.model, key) - if isinstance(target, peft.tuners.lora.Linear): - bias = target.bias is not None - new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias) - model.base_model._replace_module(parent, target_name, new_module, target) - -model = model.base_model.model +model = model.merge_and_unload() model.save_pretrained(f"{script_args.output_name}") tokenizer.save_pretrained(f"{script_args.output_name}")
Llama Reward Model is incorrectly merged As mentioned in #287, `merge_peft_adapter` saves the Llama RM as a `LlamaForCausalLM` see [here](https://github.com/lvwerra/trl/blob/main/examples/stack_llama/scripts/merge_peft_adapter.py#L35) But the reward model is trained and should be a `LlamaForSequenceClassification` and running `rl_training.py` gives the obvious warnings ``` Some weights of the model checkpoint at ./llama-7b-se-rm were not used when initializing LlamaForSequenceClassification: ['lm_head.weight'] - This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at /home/toolkit/huggingface/llama-7b-rm and are newly initialized: ['score.weight'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` We should instead check whether we are merging the rm and then save as a the correct model Also the `score.weight` is not being loaded as mentioned in #297 , see more info below --- update -- It seems that `merge_peft_adapter` should be using `merge_and_unload()` which correctly overrides the score. But I haven't yet managed to get good results using the adapter weights on the hub
A very not-clean way of doing this is ``` if "rm" in script_args.adapter_model_name: model = AutoModelForSequenceClassification.from_pretrained( script_args.base_model_name, num_labels=1, torch_dtype=torch.bfloat16 ) else: model = AutoModelForCausalLM.from_pretrained( script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16 ) ``` It also looks like the reward model's `score.weight` isn't being loaded from the adapter weights. I've checked and this can either be an issue with the adapter config from `trl-lib/llama-7b-se-rm-peft` as it doesn't mention the score as one of the `modules_to_save` even though it is. It could also be an issue with `peft` or how the adapter was saved. In any case, anyone who currently wants to use the RM should overwrite the score.weight before calling `model.save_pretrained` ``` if "rm" in script_args.adapter_model_name: peft_state_dict = torch.load( "/path/to/huggingface/hub/models--trl-lib--llama-7b-se-rm-peft/snapshots/{some_number}/adapter_model.bin" ) score_weight = peft_state_dict["base_model.model.base_model.model.score.weight"] model.score = torch.nn.Linear(4096, 1, bias=False) with torch.no_grad(): model.score.weight.copy_(score_weight) ``` Even after these fixes, I've been unable to get the reward model loaded. I've tried merging the `trl-lib/llama-7b-se-rm-peft` adapter weights directly on llama 7b, as well as merging the `trl-lib/llama-7b-se-peft` weights first and then merging the rm weights. I'm not getting above 56% accuracy on the rm validation set. I agree with you, [@mnoukhov](https://github.com/mnoukhov). The problem is that `merge_peft_adapter` only saves the `base_model` (the llm part, not the score part wich is the head), even after the "fix". I'm encountering the same issue as you, by the way. I'm planning to modify the pipeline definition. Instead of giving a string to the pipeline, I will reconstruct the entire reward model within the script and pass the model as an argument to the pipeline. I'll keep you updated on my progress. The issue with that approach is that `PeftModelForSequenceClassification` isn't supported by `pipeline` so you'd need to modify that as well. The correct thing to do is actually to use `peft`'s `merge_and_unload` but the weights on the hub don't seem to be working for me. For that reason I'm re-running the whole pipeline and will upload my adapter weights once its done I agree with your assessment. The Pipeline doesn't seem to work well with PeftModelForSequenceClassification. However, I believe I've come up with a fix. I didn't use merge_peft_adapter for the reward, instead I modified the [rl_training.py](https://github.com/lvwerra/trl/blob/main/examples/stack_llama/scripts/rl_training.py) script. Here's how I reconstructed the reward model and changed the reward computation: reward_tokenizer_name is the name of the reward tokenizer, in my case it's "EleutherAI/gpt-neo-125m". reward_model_name is the location of the saved adapter of the reward model. ``` # Now we reconstruct the rewardmodel reward_peft_config = PeftConfig.from_pretrained(script_args.reward_model_name) reward_model = AutoModelForSequenceClassification.from_pretrained(script_args.reward_tokenizer_name, num_labels=1, return_dict=True) #, torch_dtype=torch.bfloat16) reward_model = PeftModel.from_pretrained(reward_model, script_args.reward_model_name) reward_model = reward_model.to(current_device) reward_tokenizer = AutoTokenizer.from_pretrained(script_args.reward_tokenizer_name) reward_tokenizer.pad_token = reward_tokenizer.eos_token # Define the pad token in the model configuration reward_model.config.pad_token_id = reward_tokenizer.pad_token_id for epoch, batch in enumerate(ppo_trainer.dataloader): question_tensors = batch["input_ids"] response_tensors = ppo_trainer.generate( question_tensors, return_prompt=False, length_sampler=output_length_sampler, **generation_kwargs, ) # tokenizer.batch_decode works on CPU. perhaps it is faster if # yhou use two models with same tokenizer and just works with tokens on the GPU batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True) # Prepare texts texts = [q + r for q, r in zip(batch["query"], batch["response"])] # Tokenize the texts in batch texts_tokenized = reward_tokenizer(texts, truncation=True, padding=True, return_tensors="pt") # Get the tensors input_ids = texts_tokenized["input_ids"].to(current_device) attention_mask = texts_tokenized["attention_mask"].to(current_device) ) # Forward pass through the reward_model pipe_outputs = reward_model(input_ids=input_ids, attention_mask=attention_mask) rewards = list((torch.sigmoid(pipe_outputs.logits) - script_args.reward_baseline).unbind(dim=0)) # Run PPO step stats = ppo_trainer.step(question_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards) if script_args.save_freq and epoch and epoch % script_args.save_freq == 0: ppo_trainer.save_pretrained(script_args.output_dir + f"step_{epoch}") ``` Any feedback or suggestions would be helpful. This makes sense but it doesn't solve the problem. If you have adapter weights that you've trained yourself you can correctly load them in with `model = model.merge_and_unload()` , replacing lines 54-62 of `merge_peft_adapter.py` (and also changing CausalLM for SequenceClassification) The remaining issue is the weights on the hub don't seem to get the correct reward modelling accuracy > That sounds like a good idea!May I ask if you run it successfully? How is the effect? Is there any loss in the indicator? Thanks. Thx. I have been able to get the training process to run, although I haven't completed training the entire model yet. I'm unsure regarding the calculation of rewards. I don't know if I should directly use the logits or if I need to apply a sigmoid function to them. I have to check what works best. Also if you have the same tokenizer for the reward function and the model, I think that the code I made can be improved... I have to check that... Here is another version for the training loop, wich is improved when the reward model and the model share the same tokenizer. This is faster, the other one is more general ``` reward_mean = [] reward_std = [] for epoch, batch in enumerate(ppo_trainer.dataloader): question_tensors = batch["input_ids"] response_tensors = ppo_trainer.generate( question_tensors, return_prompt=False, length_sampler=output_length_sampler, **generation_kwargs, ) # Prepare tensors directly, no decoding input_ids = [torch.cat([q, r], dim=-1) for q, r in zip(question_tensors, response_tensors)] # Pad the input sequences input_ids_padded = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True) # Create attention_mask attention_mask = torch.ones_like(input_ids_padded) # Forward pass through the reward_model pipe_outputs = reward_model(input_ids=input_ids_padded, attention_mask=attention_mask) rewards = list((torch.sigmoid(pipe_outputs.logits) - script_args.reward_baseline).unbind(dim=0)) # Run PPO step stats = ppo_trainer.step(question_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards) reward_mean.append(stats['ppo/mean_scores']) reward_std.append(stats['ppo/std_scores']) if script_args.save_freq and epoch and epoch % script_args.save_freq == 0: ppo_trainer.save_pretrained(script_args.output_dir + f"step_{epoch}") ``` > Here is another version for the training loop, wich is improved when the reward model and the model share the same tokenizer. This is faster, the other one is more general > > ``` > reward_mean = [] > reward_std = [] > for epoch, batch in enumerate(ppo_trainer.dataloader): > question_tensors = batch["input_ids"] > > response_tensors = ppo_trainer.generate( > question_tensors, > return_prompt=False, > length_sampler=output_length_sampler, > **generation_kwargs, > ) > # Prepare tensors directly, no decoding > input_ids = [torch.cat([q, r], dim=-1) for q, r in zip(question_tensors, response_tensors)] > > # Pad the input sequences > input_ids_padded = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True) > > # Create attention_mask > attention_mask = torch.ones_like(input_ids_padded) > > # Forward pass through the reward_model > pipe_outputs = reward_model(input_ids=input_ids_padded, attention_mask=attention_mask) > > rewards = list((torch.sigmoid(pipe_outputs.logits) - script_args.reward_baseline).unbind(dim=0)) > > # Run PPO step > stats = ppo_trainer.step(question_tensors, response_tensors, rewards) > ppo_trainer.log_stats(stats, batch, rewards) > reward_mean.append(stats['ppo/mean_scores']) > reward_std.append(stats['ppo/std_scores']) > if script_args.save_freq and epoch and epoch % script_args.save_freq == 0: > ppo_trainer.save_pretrained(script_args.output_dir + f"step_{epoch}") > ``` ![截屏2023-05-17 18 34 15](https://github.com/lvwerra/trl/assets/26736518/315b2003-f0b2-44f5-9c78-184108bdd8bd) ![截屏2023-05-17 18 36 01](https://github.com/lvwerra/trl/assets/26736518/2bb2be6a-33d4-4c25-86ba-b5772ba16644) Hi,did you encounter this problem when training this script? No... I have never seen that error before. It seems to me that is an enviroment error related to cuda version... but no, no idea.
2023-06-01T19:05:07
huggingface/trl
474
huggingface__trl-474
[ "473" ]
843c14574f740ed03611ce433a439948ecf4b501
diff --git a/trl/trainer/sft_trainer.py b/trl/trainer/sft_trainer.py --- a/trl/trainer/sft_trainer.py +++ b/trl/trainer/sft_trainer.py @@ -144,7 +144,7 @@ def __init__( if callbacks is None: callbacks = [PeftSavingCallback] - elif not isinstance(model, PreTrainedModel): + elif not isinstance(model, (PreTrainedModel, PeftModel)): model = AutoModelForCausalLM.from_pretrained(model) if tokenizer is None:
SFTTrainer: passing a PeftModel as a model argument without passing the config breaks [https://github.com/lvwerra/trl/blob/main/trl/trainer/sft_trainer.py#L127C36-L127C47](https://github.com/lvwerra/trl/blob/main/trl/trainer/sft_trainer.py#L127C36-L127C47) Requires that you pass a PEFT config even though you've already instantiated the PeftModel yourself, otherwise it tries ignores the if and tries to build a regular transformers model at https://github.com/lvwerra/trl/blob/main/trl/trainer/sft_trainer.py#L148 taking your PeftModel as argument, which makes no sense. I suggest adding a check at https://github.com/lvwerra/trl/blob/main/trl/trainer/sft_trainer.py#L147 to make sure that not isinstance(model, PeftModel): ```python elif not isinstance(model, PreTrainedModel) and not isinstance(model, PeftModel): ```
This would let the user of the library instantiate their own peftmodel, which looks like it's not possible right now
2023-06-27T20:58:12
huggingface/trl
513
huggingface__trl-513
[ "451" ]
f3230902b18d265814b1def6213de30a0bece94b
diff --git a/trl/trainer/reward_trainer.py b/trl/trainer/reward_trainer.py --- a/trl/trainer/reward_trainer.py +++ b/trl/trainer/reward_trainer.py @@ -206,5 +206,6 @@ def prediction_step( logits = torch.stack(logits).mean(dim=2).softmax(dim=0).T labels = torch.zeros(logits.shape[0]) + labels = self._prepare_inputs(labels) return loss, logits, labels
Multi-GPU RuntimeError: Tensors must be CUDA and dense I'm encountering a runtime error in my code from using 8 GPUs and seeking assistance to resolve it. The error message states: "RuntimeError: Tensors must be CUDA and dense." This error happens in the evaluation step. According to this https://github.com/Lightning-AI/lightning/discussions/2529, it seems like I have to move the model/metric to the device, but I couldn't find a way to resolve this through RewardTrainer. Here is the relevant section of the traceback: ``` Traceback (most recent call last): File "/nobackup/jirayu/llama-hh-rlhf/reward_model.py", line 147, in <module> File "/nobackup/jirayu/llama-hh-rlhf/reward_model.py", line 130, in run_training File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer.py", line 1645, in train return inner_training_loop( File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer.py", line 2020, in _inner_training_loop self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer.py", line 2321, in _maybe_log_save_evaluate metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer.py", line 3053, in evaluate output = eval_loop( File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer.py", line 3272, in evaluation_loop labels = self._nested_gather(labels) File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer.py", line 3388, in _nested_gather tensors = distributed_concat(tensors) File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer_pt_utils.py", line 197, in distributed_concat dist.all_gather(output_tensors, tensor) File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/torch/distributed/distributed_c10d.py", line 1451, in wrapper return func(*args, **kwargs) File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/torch/distributed/distributed_c10d.py", line 2448, in all_gather work = default_pg.allgather([tensor_list], [tensor]) RuntimeError: Tensors must be CUDA and dense ``` Code: ```python import argparse import os import torch from accelerate import Accelerator from datasets import load_dataset from peft import LoraConfig from tqdm import tqdm from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, logging, set_seed from trl import RewardTrainer os.environ["WANDB_PROJECT"] = "llama-hh-rlhf" os.environ["WANDB_RUN_NAME"] = "llama-7b-rm" def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--model_path", type=str, default="./llama-7b-sft") parser.add_argument("--batch_size", type=int, default=4) parser.add_argument("--gradient_accumulation_steps", type=int, default=1) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument("--learning_rate", type=float, default=3e-5) parser.add_argument("--lr_scheduler_type", type=str, default="linear") parser.add_argument("--num_warmup_steps", type=int, default=0) parser.add_argument("--weight_decay", type=float, default=0.05) parser.add_argument("--lora_r", type=int, default=16) parser.add_argument("--lora_alpha", type=int, default=32) parser.add_argument("--lora_dropout", type=float, default=0.1) parser.add_argument("--fp16", action="store_true", default=False) parser.add_argument("--bf16", action="store_true", default=True) parser.add_argument("--gradient_checkpointing", action="store_true", default=True) parser.add_argument("--seed", type=int, default=0) parser.add_argument("--output_dir", type=str, default="./llama-7b-rm-adapter") parser.add_argument("--log_freq", default=1, type=int) parser.add_argument("--eval_freq", default=50, type=int) return parser.parse_args() def preprocess_function(examples, tokenizer): tokenized_chosen = tokenizer(examples["chosen"], truncation=True) tokenized_rejected = tokenizer(examples["rejected"], truncation=True) return { "input_ids_chosen": tokenized_chosen["input_ids"], "attention_mask_chosen": tokenized_chosen["attention_mask"], "input_ids_rejected": tokenized_rejected["input_ids"], "attention_mask_rejected": tokenized_rejected["attention_mask"], } def run_training(args): print("Loading model...") model = AutoModelForSequenceClassification.from_pretrained( args.model_path, num_labels=1, torch_dtype=torch.bfloat16, device_map={"": Accelerator().process_index} ) tokenizer = AutoTokenizer.from_pretrained( args.model_path, use_auth_token=True, torch_dtype=torch.bfloat16, device_map={"": Accelerator().process_index} ) tokenizer.pad_token = tokenizer.eos_token model.config.pad_token_id = tokenizer.eos_token_id model.config.use_cache = not args.gradient_checkpointing print("Loading dataset...") dataset = load_dataset("Anthropic/hh-rlhf") train_dataset = dataset["train"] eval_dataset = dataset["test"] num_proc = 24 original_columns = train_dataset.column_names train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=num_proc, remove_columns=original_columns, fn_kwargs={"tokenizer": tokenizer} ) eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=num_proc, remove_columns=original_columns, fn_kwargs={"tokenizer": tokenizer} ) print("Setting up training...") peft_config = LoraConfig( r=args.lora_r, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, inference_mode=False, task_type="SEQ_CLS", ) training_args = TrainingArguments( output_dir=args.output_dir, dataloader_drop_last=True, num_train_epochs=args.num_train_epochs, evaluation_strategy="steps", eval_steps=args.eval_freq, logging_steps=args.log_freq, per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, warmup_steps=args.num_warmup_steps, gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_checkpointing=args.gradient_checkpointing, fp16=args.fp16, bf16=args.bf16, weight_decay=args.weight_decay, report_to="wandb", ) trainer = RewardTrainer( model=model, args=training_args, tokenizer=tokenizer, train_dataset=train_dataset, eval_dataset=eval_dataset, peft_config=peft_config, ) print("Training...") trainer.train() print("Saving model...") model = trainer.model print(f"Saving to {args.output_dir}") model.save_pretrained(args.output_dir) if __name__ == "__main__": args = get_args() set_seed(args.seed) os.makedirs(args.output_dir, exist_ok=True) logging.set_verbosity_error() run_training(args) ``` Running command: `accelerate launch reward_model.py` accelerate config: ``` compute_environment: LOCAL_MACHINE distributed_type: MULTI_GPU downcast_bf16: 'no' gpu_ids: all machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 8 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ```
Hi @Top34051 Thanks for the issue, indeed we did not tested the RM in a distributed setup. I think the issues comes from the fact that the default `compute_accuracy` returns a dict of np arrays and you are trying to all_gather thzm. Can you try to replace the compute accuracy metrics by: ```python def compute_accuracy(eval_pred) -> Dict[str, float]: predictions, labels = eval_pred # Here, predictions is rewards_chosen and rewards_rejected. # We want to see how much of the time rewards_chosen > rewards_rejected. predictions = np.argmax(predictions, axis=1) accuracy = np.array(predictions == labels, dtype=float).mean().item() return {"accuracy": torch.Tensor(accuracy)} ``` and pass it to the `RewardTrainer`'s init `compute_metrics=compute_accuracy` . Let me know how it goes Hi @younesbelkada Thanks for the suggestion. However, replacing the compute accuracy metric still gives the CUDA must be dense error. Error: ``` File "/nobackup/jirayu/llama-hh-rlhf/reward.py", line 186, in <module> run_training(args) File "/nobackup/jirayu/llama-hh-rlhf/reward.py", line 174, in run_training trainer.train() File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer.py", line 1645, in train return inner_training_loop( File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer.py", line 2020, in _inner_training_loop self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer.py", line 2321, in _maybe_log_save_evaluate metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer.py", line 3053, in evaluate output = eval_loop( File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer.py", line 3272, in evaluation_loop labels = self._nested_gather(labels) File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer.py", line 3388, in _nested_gather tensors = distributed_concat(tensors) File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/transformers/trainer_pt_utils.py", line 197, in distributed_concat dist.all_gather(output_tensors, tensor) File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/torch/distributed/distributed_c10d.py", line 1451, in wrapper return func(*args, **kwargs) File "/nobackup/jirayu/miniconda3/envs/llm/lib/python3.9/site-packages/torch/distributed/distributed_c10d.py", line 2448, in all_gather work = default_pg.allgather([tensor_list], [tensor]) RuntimeError: Tensors must be CUDA and dense ``` Updated code: ```python import argparse import numpy as np import os import torch from accelerate import Accelerator from datasets import load_dataset from peft import LoraConfig, TaskType from tqdm import tqdm from transformers import ( AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, logging, set_seed ) from trl import RewardTrainer os.environ["WANDB_PROJECT"] = "llama-hh-rlhf" os.environ["WANDB_RUN_NAME"] = "llama-7b-rm" def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--model_path", type=str, default="./llama-7b-sft") parser.add_argument("--output_dir", type=str, default="./llama-7b-rm-adapter") parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument("--per_device_train_batch_size", type=int, default=4) parser.add_argument("--per_device_eval_batch_size", type=int, default=4) parser.add_argument("--gradient_accumulation_steps", type=int, default=1) parser.add_argument("--gradient_checkpointing", action="store_true", default=True) parser.add_argument("--fp16", action="store_true", default=False) parser.add_argument("--bf16", action="store_true", default=True) parser.add_argument("--max_seq_length", type=int, default=512) parser.add_argument("--learning_rate", type=float, default=3e-5) parser.add_argument("--lr_scheduler_type", type=str, default="linear") parser.add_argument("--weight_decay", type=float, default=0.05) parser.add_argument("--warmup_ratio", type=float, default=0.0) parser.add_argument("--lora_r", type=int, default=16) parser.add_argument("--lora_alpha", type=int, default=32) parser.add_argument("--lora_dropout", type=float, default=0.1) parser.add_argument("--eval_steps", type=int, default=10) parser.add_argument("--logging_steps", type=int, default=1) parser.add_argument("--save_steps", type=int, default=2000) parser.add_argument("--seed", type=int, default=0) return parser.parse_args() def get_dataset(args, tokenizer): dataset = load_dataset("Anthropic/hh-rlhf", split="train") dataset = dataset.train_test_split(test_size=0.05, seed=args.seed) train_dataset = dataset["train"] eval_dataset = dataset["test"] def preprocess_function(examples): tokenized_chosen = tokenizer(examples["chosen"], truncation=True) tokenized_rejected = tokenizer(examples["rejected"], truncation=True) return { "input_ids_chosen": tokenized_chosen["input_ids"], "attention_mask_chosen": tokenized_chosen["attention_mask"], "input_ids_rejected": tokenized_rejected["input_ids"], "attention_mask_rejected": tokenized_rejected["attention_mask"], } num_proc = 24 original_columns = train_dataset.column_names train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=num_proc, remove_columns=original_columns ) train_dataset = train_dataset.filter( lambda x: len(x["input_ids_chosen"]) <= args.max_seq_length and len(x["input_ids_rejected"]) <= args.max_seq_length ) eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=num_proc, remove_columns=original_columns ) eval_dataset = eval_dataset.filter( lambda x: len(x["input_ids_chosen"]) <= args.max_seq_length and len(x["input_ids_rejected"]) <= args.max_seq_length ) return train_dataset, eval_dataset def print_trainable_parameters(model): trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print(f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}") def compute_accuracy(eval_pred): predictions, labels = eval_pred # Here, predictions is rewards_chosen and rewards_rejected. # We want to see how much of the time rewards_chosen > rewards_rejected. predictions = np.argmax(predictions, axis=1) accuracy = np.array(predictions == labels, dtype=float).mean().item() return {"accuracy": torch.Tensor(accuracy)} def run_training(args): model = AutoModelForSequenceClassification.from_pretrained( args.model_path, num_labels=1, load_in_8bit=False, device_map={"": Accelerator().process_index}, torch_dtype=torch.bfloat16, ) tokenizer = AutoTokenizer.from_pretrained(args.model_path, use_auth_token=True) tokenizer.pad_token = tokenizer.eos_token train_dataset, eval_dataset = get_dataset(args, tokenizer) model.config.pad_token_id = tokenizer.eos_token_id model.config.use_cache = not args.gradient_checkpointing peft_config = LoraConfig( r=args.lora_r, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, inference_mode=False, task_type=TaskType.SEQ_CLS, ) trainer = RewardTrainer( model=model, args=TrainingArguments( output_dir=args.output_dir, num_train_epochs=args.num_train_epochs, per_device_train_batch_size=args.per_device_train_batch_size, per_device_eval_batch_size=args.per_device_eval_batch_size, gradient_accumulation_steps=args.gradient_accumulation_steps, gradient_checkpointing=args.gradient_checkpointing, fp16=args.fp16, bf16=args.bf16, learning_rate=args.learning_rate, weight_decay=args.weight_decay, lr_scheduler_type=args.lr_scheduler_type, warmup_ratio=args.warmup_ratio, label_names=[], evaluation_strategy="steps", eval_steps=args.eval_steps, logging_steps=args.logging_steps, save_steps=args.save_steps, report_to="wandb", ), tokenizer=tokenizer, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_accuracy, peft_config=peft_config, ) print_trainable_parameters(trainer.model) print("Start training") trainer.train() print("Save last checkpoint of the model") trainer.model.save_pretrained(args.output_dir) if __name__ == "__main__": args = get_args() set_seed(args.seed) os.makedirs(args.output_dir, exist_ok=True) run_training(args) ``` I tracked this down a bit. The tensors that are failing are the `labels` that are returned in the `def prediction_step` of `reward_trainer.py`. Setting `labels = None` gets around the issue, but isn't really a solution. ```python def prediction_step( self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: inputs = self._prepare_inputs(inputs) if ignore_keys is None: if hasattr(self.model, "config"): ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) else: ignore_keys = [] with torch.no_grad(): loss, logits_dict = self.compute_loss(model, inputs, return_outputs=True) if prediction_loss_only: return (loss, None, None) loss = loss.detach() logits = tuple(v for k, v in logits_dict.items() if k not in ignore_keys) logits = nested_detach(logits) # Stack accepted against rejected, mean over logits # and softmax to get preferences between accepted and rejected to sum to 1 logits = torch.stack(logits).mean(dim=2).softmax(dim=0).T # PROBLEM IS HERE # labels = torch.zeros(logits.shape[0]) labels = None return loss, logits, labels ```
2023-07-12T18:47:47
huggingface/trl
528
huggingface__trl-528
[ "525" ]
84393f3b94b452adb71d28f42d52c10f0023a834
diff --git a/trl/trainer/dpo_trainer.py b/trl/trainer/dpo_trainer.py --- a/trl/trainer/dpo_trainer.py +++ b/trl/trainer/dpo_trainer.py @@ -204,7 +204,7 @@ def concatenated_inputs(self, batch: Dict[str, Union[List, torch.LongTensor]]) - pad_to_length(batch[k], max_length, pad_value=pad_value), ), dim=0, - ) + ).to(self.accelerator.device) return concatenated_batch def dpo_loss(
DPO evaluation error---tensors on two devices Hi! Thanks for the awesome codebase. I ran the DPO example `trl/examples/dpo.py` but encountered an error at the evaluation step: `Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!` Here is a [colab notebook](https://colab.research.google.com/drive/11AVym7U3gkTn_qTfrnSDtA_AO5_zJkkd?usp=sharing) that shows this problem. To expose the problem faster, I set `training_args.eval_steps = 1`. To solve it, a hotfix can be adding `.to(self.accelerator.device)` at a few places in [concatenated_forward](https://github.com/lvwerra/trl/blob/main/trl/trainer/dpo_trainer.py#L288-L296): ```python all_logits = model( concatenated_batch["concatenated_input_ids"].to(self.accelerator.device), attention_mask=concatenated_batch["concatenated_attention_mask"].to(self.accelerator.device), ).logits.to(torch.float32) all_logps = self._get_batch_logps( all_logits, concatenated_batch["concatenated_labels"].to(self.accelerator.device), average_log_prob=False, ) ``` However, I am not sure why the trainer does not handle the device change automatically. If this hotfix is fine, I can submit a pull request. Otherwise, I'm also happy to learn how to address this problem more generically. Tianlin
Hi @liutianlin0121 Thanks very much for the clean reproducer, this sounds like definitely the right fix. Would you mind contributing and opening a PR so that the community can benefit from the fix🙏 Thanks very much! also cc @kashif
2023-07-18T10:25:11
huggingface/trl
540
huggingface__trl-540
[ "537" ]
0e8d9f8504909fcc0571545b9f3777b285084e59
diff --git a/examples/dpo.py b/examples/dpo.py --- a/examples/dpo.py +++ b/examples/dpo.py @@ -1,10 +1,8 @@ # 0. imports -from collections import defaultdict from dataclasses import dataclass, field -from typing import Optional +from typing import Dict, Optional import torch -import tqdm from datasets import Dataset, load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, TrainingArguments @@ -66,8 +64,8 @@ def get_hh(split: str, sanity_check: bool = False, silent: bool = False, cache_d The dataset is converted to a dictionary with the following structure: { 'prompt': List[str], - 'responses': List[List[str]], - 'pairs': List[Tuple[int, int]] + 'chosen': List[str], + 'rejected': List[str], } Prompts should be structured as follows: @@ -78,30 +76,15 @@ def get_hh(split: str, sanity_check: bool = False, silent: bool = False, cache_d if sanity_check: dataset = dataset.select(range(min(len(dataset), 1000))) - def split_prompt_and_responses(ex): - prompt = extract_anthropic_prompt(ex["chosen"]) - chosen_response = ex["chosen"][len(prompt) :] - rejected_response = ex["rejected"][len(prompt) :] - return prompt, chosen_response, rejected_response - - data = defaultdict(lambda: defaultdict(list)) - for row in tqdm.tqdm(dataset, desc="Processing HH", disable=silent): - prompt, chosen, rejected = split_prompt_and_responses(row) - responses = [chosen, rejected] - n_responses = len(data[prompt]["responses"]) - data[prompt]["pairs"].append((n_responses, n_responses + 1)) - data[prompt]["responses"].extend(responses) - data[prompt]["sft_target"] = chosen - - def gen(): - for prompt, values in data.items(): - yield { - "prompt": prompt, - "responses": values["responses"], - "pairs": values["pairs"], - } - - return Dataset.from_generator(gen) + def split_prompt_and_responses(sample) -> Dict[str, str]: + prompt = extract_anthropic_prompt(sample["chosen"]) + return { + "prompt": prompt, + "chosen": sample["chosen"][len(prompt) :], + "rejected": sample["rejected"][len(prompt) :], + } + + return dataset.map(split_prompt_and_responses) if __name__ == "__main__": diff --git a/trl/trainer/utils.py b/trl/trainer/utils.py --- a/trl/trainer/utils.py +++ b/trl/trainer/utils.py @@ -326,23 +326,18 @@ def collate(self, batch): return padded_batch def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: - batch = [] + tokenized_batch = [] + for feature in features: prompt = feature["prompt"] - responses = feature["responses"] - pairs = feature["pairs"] - - done = False - for p in pairs: - if done: - break - - batch_element = self.tokenize_batch_element(prompt, responses[p[0]], responses[p[1]]) - batch.append(batch_element) + chosen = feature["chosen"] + rejected = feature["rejected"] - done = len(batch) == self.batch_size + batch_element = self.tokenize_batch_element(prompt, chosen, rejected) + tokenized_batch.append(batch_element) - return self.collate(batch) + # return collated batch + return self.collate(tokenized_batch) class ConstantLengthDataset(IterableDataset):
diff --git a/tests/test_dpo_trainer.py b/tests/test_dpo_trainer.py --- a/tests/test_dpo_trainer.py +++ b/tests/test_dpo_trainer.py @@ -48,19 +48,28 @@ def test_dpo_trainer(self): "hello", "how are you", "What is your name?", + "What is your name?", + "Which is the best programming language?", + "Which is the best programming language?", "Which is the best programming language?", ], - "responses": [ - ["hi nice to meet you", "leave me alone"], - ["I am not fine", "I am fine"], - ["My name is Mary", "Whats it to you?", "I dont have a name"], - ["Python", "Javascript", "C++", "Java"], + "chosen": [ + "hi nice to meet you", + "I am fine", + "My name is Mary", + "My name is Mary", + "Python", + "Python", + "Python", ], - "pairs": [ - [(0, 1)], - [(1, 0)], - [(0, 2), (0, 1)], - [(0, 1), (0, 2), (0, 3)], + "rejected": [ + "leave me alone", + "I am not fine", + "Whats it to you?", + "I dont have a name", + "Javascript", + "C++", + "Java", ], } # fmt: on
DPOTrainer ignores training & evaluation data if there are more than 2 responses Hello! First of all, thank you @younesbelkada and @lvwerra for maintaining this work, and thank you @kashif for spearheading the valuable DPO implementation. ## Bug overview * The `DPODataCollatorWithPadding` processes `batch_size` samples at once, yielding `batch_size` chosen-rejected pairs. However, `batch_size` samples may have much more than `batch_size` pairs. ## Details The issue originates in the `DPODataCollatorWithPadding.__call__` method: https://github.com/lvwerra/trl/blob/7f297b38c665e6d2357dfd5ba552ef685afe1302/trl/trainer/utils.py#L328-L345 The `break` will pre-emptively stop processing of all pairs in `features` to ensure that the output has a length of `batch_size`. Because of this, any training or evaluation data with more than 2 responses will result in data not being used. ## How to reproduce <details><summary>See reproduction steps</summary> Replace `DPODataCollatorWithPadding.__call__` with this variant, which tracks how many samples are used and how many are ignored: ```python def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: batch = [] done = False for feature in features: prompt = feature["prompt"] responses = feature["responses"] pairs = feature["pairs"] for p in pairs: if not done: batch_element = self.tokenize_batch_element(prompt, responses[p[0]], responses[p[1]]) batch.append(batch_element) done = len(batch) == self.batch_size self.included += 1 else: self.ignored += 1 print(f"{self.ignored / (self.ignored + self.included):%} of training pairs are ignored.") return self.collate(batch) ``` and add ```python included: int = 0 ignored: int = 0 ``` to the list of dataclass fields of the `DPODataCollatorWithPadding`. Then, run this script, which uses a dummy dataset with many responses: ```python from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments from trl import DPOTrainer from datasets import Dataset model = AutoModelForCausalLM.from_pretrained("gpt2") model_ref = AutoModelForCausalLM.from_pretrained("gpt2") tokenizer = AutoTokenizer.from_pretrained("gpt2") tokenizer.pad_token = tokenizer.eos_token train_dataset_dict = { "prompt": [ "trl", "is", "pretty", "great", "but", "it", "has", "a", "small", "bug", ], "responses": [ [ "good", "bad", "worse", "horrible", "not good", "poor", "awful", "substandard", "unsatisfactory", ], ] * 10, "pairs": [ [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8)], ] * 10, } train_dataset = Dataset.from_dict(train_dataset_dict) training_args = TrainingArguments( per_device_train_batch_size=4, remove_unused_columns=False, output_dir="./tmp", report_to="none", num_train_epochs=10, ) dpo_trainer = DPOTrainer( model, model_ref, args=training_args, beta=0.1, train_dataset=train_dataset, tokenizer=tokenizer, ) dpo_trainer.train() ``` Towards the end of training, you'll see that `85.00%` of all pairs have been ignored in this example. </details> ## Proposed fixes Before I start working on it, I'd like to discuss my proposed changes: ### The fix Spread out the training and evaluation datasets into a Dataset of `prompt`, `chosen`, `rejected` columns right in the `DPOTrainer.__init__`. This requires some small changes throughout this work, but will work conveniently otherwise: each dataset element will be one training sample. It also has a drawback: prompts and responses will need to be tokenized multiple times, unless we immediately store the `prompt`-`chosen`-`rejected` dataset in a tokenized way. ### Alternative input structures #### `prompt`, `chosen`, `rejected` Beyond that, I can also implement a more drastic change: require the user to provide a Dataset with `prompt`, `chosen`, `rejected` columns. #### `prompt`, `responses`, `chosen` | `pairs` A less drastic change is to allow users to provide a `chosen` column like: ``` "chosen": [0, 1, 0, 0] ``` where the `chosen` is simply the index of the best response, instead of `pairs` like: ```python "pairs": [ [(0, 1)], [(1, 0)], [(0, 2), (0, 1)], [(0, 1), (0, 2), (0, 3)], ] ``` Internally, we can use this `chosen` to create the same pairs. This is simpler for most users, but is less powerful, as the `pairs` approach allows for a ranking (e.g. also say that although response 1 is best, response 2 is better than response 3). I propose that the input dataset should *either* have a `pairs` or `chosen` column. ### What now? I would love some feedback on your thoughts on: * Is the proposed fix (internal `prompt`, `chosen`, `rejected` columns) a good idea? * Should we change the input structures to `prompt`, `chosen`, `rejected`, `prompt`, `responses`, `chosen` | `pairs`, or keep it as-is? --- - Tom Aarsen
thanks @tomaarsen for the report. I know of the batch issue but didn't realize it would be so bad for this use case... let me review your kind proposal and brainstorm. in the original code, the batch collation occurs on the dataset preparation side... Hi @tomaarsen! I'm new to the repository and I'm wondering if you could help me understand the issue you identified. I thought the current datasets only have 2 responses per prompt? (the chosen response and the rejected response). That is, I thought len(pairs) = len(responses) = 2 for all batches. In that case, the code should work fine, or did I misunderstand anything? Thanks! @liutianlin0121 no the dataset can potentially have more than one preference per prompt and in the collator, i am looping over each prompt and when the result has the required batch size i return from that prompt/preference example and move onto the next, meaning that for an example where there are many pairs (say more than the training batch size), currently the collator is just not using them... > Hi @tomaarsen! I'm new to the repository and I'm wondering if you could help me understand the issue you identified. I thought the current datasets only have 2 responses per prompt? (the chosen response and the rejected response). That is, I thought len(pairs) = len(responses) = 2 for all batches. In that case, the code should work fine, or did I misunderstand anything? > > Thanks! @kashif's response is correct. Here's a bit more information that should clear it right up: https://huggingface.co/docs/trl/main/en/dpo_trainer#expected-dataset-format - Tom Aarsen @tomaarsen @liutianlin0121 at the moment I am tending towards setting the default data format for the DPO to be "prompt", "chosen", and "rejected" keys and let the individual dataset helpers create these dicts. Then I believe everything should work with the DPO collator... let me send you a PR for review? It is your first suggestion but I believe the change is only in the docs and in the individual dataset helper, and not in the DPOTrainer... let me check Thanks both! 👍
2023-07-19T10:47:42
huggingface/trl
555
huggingface__trl-555
[ "415" ]
170d58ffcede84b3bc822294317fc2bb6df85865
diff --git a/examples/research_projects/stack_llama/scripts/rl_training.py b/examples/research_projects/stack_llama/scripts/rl_training.py --- a/examples/research_projects/stack_llama/scripts/rl_training.py +++ b/examples/research_projects/stack_llama/scripts/rl_training.py @@ -92,6 +92,7 @@ class ScriptArguments: train_dataset = load_dataset("lvwerra/stack-exchange-paired", data_dir="data/rl", split="train") train_dataset = train_dataset.select(range(100000)) + # We then define the arguments to pass to the sentiment analysis pipeline. # We set `return_all_scores` to True to get the sentiment score for each token. sent_kwargs = { @@ -207,9 +208,9 @@ def collator(data): optimizer=optimizer, ) -# We then build the sentiment analysis pipeline, passing the model name and the -# sentiment analysis pipeline arguments. Let's also make sure to set the device -# to the same device as the PPOTrainer. +# We then build the sentiment analysis pipeline using our reward model, passing the +# model name and the sentiment analysis pipeline arguments. Let's also make sure to +# set the device to the same device as the PPOTrainer. device = ppo_trainer.accelerator.device if ppo_trainer.accelerator.num_processes == 1: device = 0 if torch.cuda.is_available() else "cpu" # to avoid a ` pipeline` bug @@ -251,7 +252,7 @@ def collator(data): ) batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True) - # Compute sentiment score + # Compute reward score (using the sentiment analysis pipeline) texts = [q + r for q, r in zip(batch["query"], batch["response"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) rewards = [torch.tensor(output[0]["score"] - script_args.reward_baseline) for output in pipe_outputs]
StackLLAMA examples uses sentiment model as reward model The script to train the StackLLAMA model uses a sentiment pipeline for training the reward model while it should use the model that was trained before. https://github.com/lvwerra/trl/blob/main/examples/stack_llama/scripts/rl_training.py#L216
The "sentiment" pipeline is not training but an inference pipeline that outputs the scalar reward. It does use the reward model trained from the previous step. Understood! Does it help if I add a comment to explain? Feel free to add a comment to clarify. Closing the issue.
2023-07-22T12:37:59
huggingface/trl
566
huggingface__trl-566
[ "558" ]
cdde7f71d75151982ff8094577667a1d21b1495a
diff --git a/examples/scripts/reward_trainer.py b/examples/scripts/reward_trainer.py --- a/examples/scripts/reward_trainer.py +++ b/examples/scripts/reward_trainer.py @@ -43,8 +43,13 @@ class ScriptArguments: dataset_name: Optional[str] = field(default="Anthropic/hh-rlhf", metadata={"help": "the model name"}) dataset_text_field: Optional[str] = field(default="text", metadata={"help": "the text field of the dataset"}) log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) + logging_steps: Optional[int] = field(default=500, metadata={"help": "the number of update steps between two logs"}) + eval_split: Optional[str] = field( + default="none", metadata={"help": "the dataset split to evaluate on; default to 'none' (no evaluation)"} + ) learning_rate: Optional[float] = field(default=1.41e-5, metadata={"help": "the learning rate"}) batch_size: Optional[int] = field(default=64, metadata={"help": "the batch size"}) + num_train_epochs: Optional[int] = field(default=1, metadata={"help": "the number of training epochs"}) seq_length: Optional[int] = field(default=512, metadata={"help": "Input sequence length"}) gradient_accumulation_steps: Optional[int] = field( default=16, metadata={"help": "the number of gradient accumulation steps"} @@ -77,11 +82,14 @@ class ScriptArguments: quantization_config=quantization_config, device_map=device_map, trust_remote_code=script_args.trust_remote_code, + num_labels=1, ) # Step 2: Load the dataset and pre-process it tokenizer = AutoTokenizer.from_pretrained(script_args.model_name) -dataset = load_dataset(script_args.dataset_name, split="train") +train_dataset = load_dataset(script_args.dataset_name, split="train") + + # Turn the dataset into pairs of post + summaries, where text_j is the preferred question + answer and text_k is the other. # Then tokenize the dataset. # Adapt this section to your needs for custom datasets @@ -107,8 +115,7 @@ def preprocess_function(examples): # preprocess the dataset and filter out QAs that are longer than script_args.max_length -original_columns = dataset.column_names -train_dataset = dataset.map( +train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=4, @@ -118,13 +125,34 @@ def preprocess_function(examples): and len(x["input_ids_rejected"]) <= script_args.seq_length ) +if script_args.eval_split == "none": + eval_dataset = None +else: + eval_dataset = load_dataset(script_args.dataset_name, split=script_args.eval_split) + + eval_dataset = eval_dataset.map( + preprocess_function, + batched=True, + num_proc=4, + ) + eval_dataset = eval_dataset.filter( + lambda x: len(x["input_ids_chosen"]) <= script_args.seq_length + and len(x["input_ids_rejected"]) <= script_args.seq_length + ) + # Step 3: Define the training arguments training_args = TrainingArguments( output_dir=script_args.output_dir, per_device_train_batch_size=script_args.batch_size, + num_train_epochs=script_args.num_train_epochs, gradient_accumulation_steps=script_args.gradient_accumulation_steps, learning_rate=script_args.learning_rate, + report_to="wandb" if script_args.log_with == "wandb" else "tensorboard", + remove_unused_columns=False, + optim="adamw_torch", + logging_steps=script_args.logging_steps, + evaluation_strategy="steps" if script_args.eval_split != "none" else "no", ) # Step 4: Define the LoraConfig @@ -139,6 +167,7 @@ def preprocess_function(examples): tokenizer=tokenizer, args=training_args, train_dataset=train_dataset, + eval_dataset=eval_dataset, peft_config=peft_config, max_length=script_args.seq_length, )
Potential bug of model output dim in reward trainer example Hi! I've been exploring the `examples/scripts/reward_trainer.py` and believe I've found a potential bug that I'd like to address. ## Potential bug. To my understanding, the reward model should produce a scalar output. However, when we load the pre-trained model in the [example script](https://github.com/lvwerra/trl/blob/main/examples/scripts/reward_trainer.py#L75-L80), the model will be a binary classifier by default, with a 2-dim logit output: ```python import torch inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") model = model.cpu() with torch.no_grad(): out = model(**inputs) print(out.logits.shape) # torch.Size([1, 2]) ``` During training, this issue was hidden by the [the loss computation part](https://github.com/lvwerra/trl/blob/main/trl/trainer/reward_trainer.py#L169-L176) of the reward trainer: ```python rewards_chosen = model( input_ids=inputs["input_ids_chosen"], attention_mask=inputs["attention_mask_chosen"], )[0] rewards_rejected = model( input_ids=inputs["input_ids_rejected"], attention_mask=inputs["attention_mask_rejected"], )[0] loss = -torch.nn.functional.logsigmoid(rewards_chosen - rewards_rejected).mean() ``` There, the `rewards_rejected` and `rewards_chosen` are dim [sample, 2] matrices, which I think should be dim [sample, 1] instead. The `.mean()` operator hides the issue as it returns a scalar value. Here is a [colab](https://colab.research.google.com/drive/1szzGC8H1DQ7329_idVnnOnPIPE6D7J4j?usp=sharing) that shows the dimensionality problem. ## Proposed fix To resolve this, in the model training example, we can use: ```python model = AutoModelForSequenceClassification.from_pretrained( script_args.model_name, quantization_config=quantization_config, device_map=device_map, trust_remote_code=script_args.trust_remote_code, num_labels=1 # <---- make sure that the model output is a scalar ) ``` I'll submit a pull request if it is OK. In addition, while exploring `examples/scripts/reward_trainer.py`, I noticed a few other places that can be improved. For example, there is currently no validation steps that show the genearlization performance of the reward model; the `log_with` option in `ScriptArguments` is currently not being used anywhere and therefore has no effect. I'm happy to include the validation and fix the logging glitch in my pull request as well. Tianlin
Hi @liutianlin0121 Thanks for the issue, I went through the shared materials and details. I also have checked with @edbeeching and we think that you are right, the proposed fix seems to be the correct one. Would you ne happy opening a PR for the fix? Thanks! Yes I'll submit a PR soon :-)
2023-07-24T15:29:24
huggingface/trl
570
huggingface__trl-570
[ "569", "568" ]
d78d91788017a34ba2536fc1dc5f6461e3533089
diff --git a/trl/trainer/dpo_trainer.py b/trl/trainer/dpo_trainer.py --- a/trl/trainer/dpo_trainer.py +++ b/trl/trainer/dpo_trainer.py @@ -13,7 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import warnings -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from collections import defaultdict +from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union import torch import torch.nn as nn @@ -155,6 +156,8 @@ def __init__( self.beta = beta self.ref_model = ref_model + self._stored_metrics = defaultdict(lambda: defaultdict(list)) + super().__init__( model, args, @@ -304,7 +307,7 @@ def get_batch_metrics( self, model, batch: Dict[str, Union[List, torch.LongTensor]], - train_test: str = "train", + train_eval: Literal["train", "eval"] = "train", ): """Compute the DPO loss and other metrics for the given batch of inputs for train or test.""" metrics = {} @@ -331,17 +334,15 @@ def get_batch_metrics( ) reward_accuracies = (chosen_rewards > rejected_rewards).float() - metrics[f"rewards_{train_test}/chosen"] = chosen_rewards.cpu().numpy().mean() - metrics[f"rewards_{train_test}/rejected"] = rejected_rewards.cpu().numpy().mean() - metrics[f"rewards_{train_test}/accuracies"] = reward_accuracies.cpu().numpy().mean() - metrics[f"rewards_{train_test}/margins"] = (chosen_rewards - rejected_rewards).cpu().numpy().mean() - metrics[f"logps_{train_test}/rejected"] = policy_rejected_logps.detach().cpu().numpy().mean() - metrics[f"logps_{train_test}/chosen"] = policy_chosen_logps.detach().cpu().numpy().mean() - - metrics[f"logits_{train_test}/rejected"] = policy_rejected_logits.detach().cpu().numpy().mean() - metrics[f"logits_{train_test}/chosen"] = policy_chosen_logits.detach().cpu().numpy().mean() - - metrics[f"loss/{train_test}"] = losses.detach().cpu().numpy().mean() + prefix = "eval_" if train_eval == "eval" else "" + metrics[f"{prefix}rewards/chosen"] = chosen_rewards.cpu().numpy().mean() + metrics[f"{prefix}rewards/rejected"] = rejected_rewards.cpu().numpy().mean() + metrics[f"{prefix}rewards/accuracies"] = reward_accuracies.cpu().numpy().mean() + metrics[f"{prefix}rewards/margins"] = (chosen_rewards - rejected_rewards).cpu().numpy().mean() + metrics[f"{prefix}logps/rejected"] = policy_rejected_logps.detach().cpu().numpy().mean() + metrics[f"{prefix}logps/chosen"] = policy_chosen_logps.detach().cpu().numpy().mean() + metrics[f"{prefix}logits/rejected"] = policy_rejected_logits.detach().cpu().numpy().mean() + metrics[f"{prefix}logits/chosen"] = policy_chosen_logits.detach().cpu().numpy().mean() return losses.mean(), metrics @@ -356,11 +357,11 @@ def compute_loss( "compute_loss is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than " "DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator" ) - loss, metrics = self.get_batch_metrics(model, inputs, train_test="train") + loss, metrics = self.get_batch_metrics(model, inputs, train_eval="train") # force log the metrics if self.accelerator.is_main_process: - self.log_metrics("train", metrics) + self.store_metrics(metrics, train_eval="train") if return_outputs: return (loss, metrics) @@ -412,11 +413,11 @@ def prediction_step( ignore_keys = [] with torch.no_grad(): - loss, metrics = self.get_batch_metrics(model, inputs, train_test="test") + loss, metrics = self.get_batch_metrics(model, inputs, train_eval="eval") # force log the metrics if self.accelerator.is_main_process: - self.log_metrics("test", metrics) + self.store_metrics(metrics, train_eval="eval") if prediction_loss_only: return (loss.detach(), None, None) @@ -431,3 +432,23 @@ def prediction_step( labels = torch.zeros(logits.shape[0]) return (loss.detach(), logits, labels) + + def store_metrics(self, metrics: Dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None: + for key, value in metrics.items(): + self._stored_metrics[train_eval][key].append(value) + + def log(self, logs: Dict[str, float]) -> None: + """ + Log `logs` on the various objects watching training, including stored metrics. + + Args: + logs (`Dict[str, float]`): + The values to log. + """ + # logs either has 'loss' or 'eval_loss' + train_eval = "train" if "loss" in logs else "eval" + # Add averaged stored metrics to logs + for key, metrics in self._stored_metrics[train_eval].items(): + logs[key] = torch.tensor(metrics).mean().item() + del self._stored_metrics[train_eval] + return super().log(logs)
DPOTrainer logging too frequent Hello! Related to #568, discussed it https://github.com/lvwerra/trl/pull/540#issuecomment-1642307673 This tracks the issue that logging is: 1. too frequent and spammy, it can't easily be tracked. 2. not propagated to downstream loggers like W&B or Tensorboard. I'll be working on resolving these issues. - Tom Aarsen support wandb log in dpo 1. dpo use `self.log_metrics` to log metric now, please support more friendly wandb log like PPOTrainer 2. dpo need load 2 model, can you support lora model, so that only load one model
2023-07-25T12:25:52
huggingface/trl
644
huggingface__trl-644
[ "643" ]
98120d6aeb104e2b9d4e998774dfa0518a9bd0fa
diff --git a/trl/trainer/utils.py b/trl/trainer/utils.py --- a/trl/trainer/utils.py +++ b/trl/trainer/utils.py @@ -106,14 +106,18 @@ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> D response_token_ids_start_idx = idx if response_token_ids_start_idx is None: - raise RuntimeError( - f'Could not find response key {self.response_token_ids} in token IDs {batch["labels"][i]}' + warnings.warn( + f"Could not find response key `{self.response_template}` in the " + f'following instance: {self.tokenizer.decode(batch["input_ids"][i])} ' + f"This instance will be ignored in loss calculation. " + f"Note, if this happens often, consider increasing the `max_seq_length`." ) + batch["labels"][i, :] = self.ignore_index + else: + response_token_ids_end_idx = response_token_ids_start_idx + len(self.response_token_ids) - response_token_ids_end_idx = response_token_ids_start_idx + len(self.response_token_ids) - - # Make pytorch loss function ignore all tokens up through the end of the response key - batch["labels"][i, :response_token_ids_end_idx] = self.ignore_index + # Make pytorch loss function ignore all tokens up through the end of the response key + batch["labels"][i, :response_token_ids_end_idx] = self.ignore_index else: for i in range(len(examples)): @@ -128,10 +132,14 @@ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> D ): response_token_ids_idxs.append(assistant_idx + len(self.response_token_ids)) - if len(self.response_token_ids) == 0: - raise RuntimeError( - f'Could not find response key {self.response_token_ids} in token IDs {batch["labels"][i]}' + if len(response_token_ids_idxs) == 0: + warnings.warn( + f"Could not find response key `{self.response_template}` in the " + f'following instance: {self.tokenizer.decode(batch["input_ids"][i])} ' + f"This instance will be ignored in loss calculation. " + f"Note, if this happens often, consider increasing the `max_seq_length`." ) + batch["labels"][i, :] = self.ignore_index human_token_ids = self.tokenizer.encode(self.instruction_template, add_special_tokens=False) for human_idx in np.where(batch["labels"][i] == human_token_ids[0])[0]: @@ -140,9 +148,13 @@ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> D human_token_ids_idxs.append(human_idx) if len(human_token_ids_idxs) == 0: - raise RuntimeError( - f'Could not find response key {human_token_ids} in token IDs {batch["labels"][i]}' + warnings.warn( + f"Could not find instruction key `{self.instruction_template}` in the " + f'following instance: {self.tokenizer.decode(batch["input_ids"][i])} ' + f"This instance will be ignored in loss calculation. " + f"Note, if this happens often, consider increasing the `max_seq_length`." ) + batch["labels"][i, :] = self.ignore_index for idx, (start, end) in enumerate(zip(human_token_ids_idxs, response_token_ids_idxs)): # Make pytorch loss function ignore all non response tokens
diff --git a/tests/test_data_collator_completion_only.py b/tests/test_data_collator_completion_only.py --- a/tests/test_data_collator_completion_only.py +++ b/tests/test_data_collator_completion_only.py @@ -13,6 +13,7 @@ # limitations under the License. import unittest +import torch from transformers import AutoTokenizer from trl import DataCollatorForCompletionOnlyLM @@ -20,16 +21,16 @@ class DataCollatorForCompletionOnlyLMTester(unittest.TestCase): def test_data_collator_finds_response_template_llama2_tokenizer(self): - self.tokenizer = AutoTokenizer.from_pretrained( - "upstage/Llama-2-70b-instruct-v2" - ) # Not using official one to avoid logging in + self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") self.instruction = """### System: You are a helpful assistant. ### User: How much is 2+2? ### Assistant: 2+2 equals 4""" self.response_template = "\n### Assistant:" - # [29871, 13, 2277, 29937, 4007, 22137, 29901] -> [2277, 29937, 4007, 22137, 29901] + + # GPT2Tokenizer: [198, 21017, 15286, 25] -> [15286, 25] + # Llama2Tokenizer: [29871, 13, 2277, 29937, 4007, 22137, 29901] -> [2277, 29937, 4007, 22137, 29901] self.tokenized_response_w_context = self.tokenizer.encode(self.response_template, add_special_tokens=False)[2:] # Plain check on string @@ -40,3 +41,26 @@ def test_data_collator_finds_response_template_llama2_tokenizer(self): # Pass already tokenized (w context) and truncated response_template so token_ids are like in the instruction + response self.collator = DataCollatorForCompletionOnlyLM(self.tokenized_response_w_context, tokenizer=self.tokenizer) self.collator.torch_call([self.tokenized_instruction]) + + def test_data_collator_handling_of_long_sequences(self): + self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") + self.instruction = """### System: You are a helpful assistant. + +### User: How much is 2+2? I'm asking because I'm not sure. And I'm not sure because I'm not good at math. +""" + self.response_template = "\n### Assistant:" + # check DataCollatorForCompletionOnlyLM using response template only + self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False) + self.collator = DataCollatorForCompletionOnlyLM(self.response_template, tokenizer=self.tokenizer) + encoded_instance = self.collator.torch_call([self.tokenized_instruction]) + result = torch.all(encoded_instance["labels"] == -100) + self.assertTrue(result, "Not all values in the tensor are -100.") + + # check DataCollatorForCompletionOnlyLM using response template and instruction template + self.instruction_template = "\n### User:" + self.collator = DataCollatorForCompletionOnlyLM( + self.response_template, self.instruction_template, tokenizer=self.tokenizer + ) + encoded_instance = self.collator.torch_call([self.tokenized_instruction]) + result = torch.all(encoded_instance["labels"] == -100) + self.assertTrue(result, "Not all values in the tensor are -100.")
Exception handling in DataCollatorForCompletionOnlyLM Hey, While testing out the new `DataCollatorForCompletionOnlyLM` on custom data, I had some issues relating to the exception handling of batch instances in which we fail to find the `response_template` or `instruction_template` (https://github.com/huggingface/trl/blob/3b2c820db68aecca36c2ff5d152de59deec8fe5c/trl/trainer/utils.py#L142C3-L142C3). It turned out that I had some exceptionally long responses in my training data which didn't actually contain both a valid `response_template` and `instruction_template`. **Possible fix:** My current work around was to replace the `RuntimeError` with a more informative `warning` and simply set the labels for a problematic instance to `self.ignore_index`, so that it will be ignored for the loss calculation but training will still run successfully (provided this doesn't happen for every instance 😉 ), e.g.: ```python if len(human_token_ids_idxs) == 0: warnings.warn( f'Could not find instruction key `{self.instruction_template}` in the following instance {self.tokenizer.decode(batch["input_ids"][i])} ' \ f'This instance will be ignored in loss calculation.' ) batch["labels"][i, :] = self.ignore_index ``` Would something like this be more suitable than the current exception handling?
Thanks for the investigation - that looks like a good fix to me! Would you like to open a PR?
2023-08-14T14:44:50
huggingface/trl
651
huggingface__trl-651
[ "648" ]
98120d6aeb104e2b9d4e998774dfa0518a9bd0fa
diff --git a/trl/trainer/ppo_config.py b/trl/trainer/ppo_config.py --- a/trl/trainer/ppo_config.py +++ b/trl/trainer/ppo_config.py @@ -21,6 +21,8 @@ import numpy as np import requests +from trl.trainer.utils import exact_div + from ..core import flatten_dict @@ -175,6 +177,15 @@ def __post_init__(self): ) self.mini_batch_size = self.forward_batch_size + self.backward_batch_size = self.mini_batch_size * self.gradient_accumulation_steps + exact_div( + self.batch_size, + self.backward_batch_size, + "`batch_size`", + "`mini_batch_size * gradient_accumulation_steps`", + "`batch_size` must be a multiple of `mini_batch_size * gradient_accumulation_steps`", + ) + # check if wandb is installed if self.log_with == "wandb": # raise error if wandb is not installed diff --git a/trl/trainer/utils.py b/trl/trainer/utils.py --- a/trl/trainer/utils.py +++ b/trl/trainer/utils.py @@ -592,3 +592,10 @@ def disable_dropout_in_model(model: torch.nn.Module) -> None: for module in model.modules(): if isinstance(module, torch.nn.Dropout): module.p = 0 + + +def exact_div(a, b, a_str, b_str, custom_error_message=""): + q = a // b + if a != q * b: + raise ValueError(f"{custom_error_message}, {a_str}={a}, {b_str}={b}, inexact division: {a} / {b} = {a / b}") + return q
diff --git a/tests/test_ppo_trainer.py b/tests/test_ppo_trainer.py --- a/tests/test_ppo_trainer.py +++ b/tests/test_ppo_trainer.py @@ -18,6 +18,7 @@ import tempfile import unittest +import pytest import torch from huggingface_hub import HfApi, HfFolder, delete_repo from parameterized import parameterized @@ -1193,3 +1194,7 @@ def test_push_to_hub_if_best_reward(self): # train model _ = ppo_trainer.step([q for q in query_tensor], [r for r in response_tensor], reward) break + + def test_batch_size_check(self): + with pytest.raises(ValueError): + PPOConfig(batch_size=2, mini_batch_size=2, gradient_accumulation_steps=2)
PPOTrainer breaks when gradient_accumulation_steps > 1 PPOTrainer throws the following error when passed argument --gradient_accumulation_steps >=2. ``` $ python trl/examples/scripts/sentiment_tuning.py --gradient_accumulation_steps 2 [2023-08-15 20:35:29,345] [INFO] [real_accelerator.py:133:get_accelerator] Setting ds_accelerator to cuda (auto detect) Using /data/.cache/torch_extensions/py38_cu117 as PyTorch extensions root... Detected CUDA files, patching ldflags Emitting ninja build file /data/.cache/torch_extensions/py38_cu117/cuda_kernel/build.ninja... Building extension module cuda_kernel... Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N) ninja: no work to do. Loading extension module cuda_kernel... 0it [00:00, ?it/s]You're using a GPT2TokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. /home/ubuntu/envs/mpror/lib/python3.8/site-packages/transformers/pipelines/text_classification.py:104: UserWarning: `return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`. warnings.warn( 0it [00:14, ?it/s] Traceback (most recent call last): File "trl/examples/scripts/sentiment_tuning.py", line 203, in <module> stats = ppo_trainer.step(query_tensors, response_tensors, rewards) File "/usr/lib/python3.8/contextlib.py", line 75, in inner return func(*args, **kwds) File "/home/ubuntu/wdir/cold_reinforce/trl/trl/trainer/ppo_trainer.py", line 739, in step logprobs, logits, vpreds, _ = self.batched_forward_pass( File "/usr/lib/python3.8/contextlib.py", line 75, in inner return func(*args, **kwds) File "/home/ubuntu/wdir/cold_reinforce/trl/trl/trainer/ppo_trainer.py", line 967, in batched_forward_pass torch.cat(all_logprobs), RuntimeError: torch.cat(): expected a non-empty list of Tensors ``` TRL version: 0.5.1.dev0 ``` $ pip show trl Name: trl Version: 0.5.1.dev0 Summary: A Pytorch implementation of Proximal Policy Optimization for transfomer language models. Home-page: https://github.com/huggingface/trl Author: Leandro von Werra Author-email: [email protected] License: Apache 2.0 Location: /home/ubuntu/wdir/cold_reinforce/trl Requires: accelerate, datasets, numpy, torch, transformers Required-by: ``` Related issue here: https://github.com/huggingface/trl/issues/614
One further debugging it seems like this happens when `self.config.backward_batch_size > batch_size`. This can happen when `mini_batch_size * gradient_accumulation_steps > batch_size`. This would result in a scenario where `mini_batch_start` in [this line](https://github.com/huggingface/trl/blob/98120d6aeb104e2b9d4e998774dfa0518a9bd0fa/trl/trainer/ppo_trainer.py#L721 ) (the for loop in the snippet below) would be greater than the batch_size resulting in an empty `mini_batch_inds` splice which would result in a mini_batch_dict with empty entries, i.e, `mini_batch_dict['queries']` would be an empty tensor. ``` for mini_batch_start in range(0, self.config.backward_batch_size, self.config.mini_batch_size): mini_batch_end = mini_batch_start + self.config.mini_batch_size mini_batch_inds = backward_batch_inds[mini_batch_start:mini_batch_end] mini_batch_dict = { "logprobs": batch_dict["logprobs"][mini_batch_inds], "values": batch_dict["values"][mini_batch_inds], "masks": batch_dict["masks"][mini_batch_inds], ``` @vwxyzjn Seems like this was introduced in refactoring that was done in #546. Ah, thanks for the catch. In this case, we should add a check ensuring `self.config.backward_batch_size > batch_size`. Gonna prepare a PR tomorrow for this.
2023-08-16T13:12:43
huggingface/trl
660
huggingface__trl-660
[ "629" ]
029f961b7ced7ad7daf315620effecfdd057ec2b
diff --git a/examples/research_projects/stack_llama_2/scripts/sft_llama2.py b/examples/research_projects/stack_llama_2/scripts/sft_llama2.py --- a/examples/research_projects/stack_llama_2/scripts/sft_llama2.py +++ b/examples/research_projects/stack_llama_2/scripts/sft_llama2.py @@ -35,7 +35,8 @@ class ScriptArguments: gradient_checkpointing: Optional[bool] = field( default=True, metadata={"help": "whether to use gradient checkpointing"} ) - group_by_length: Optional[bool] = field(default=True, metadata={"help": "whether to group by length"}) + group_by_length: Optional[bool] = field(default=False, metadata={"help": "whether to group by length"}) + packing: Optional[bool] = field(default=True, metadata={"help": "whether to use packing for SFTTrainer"}) lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"}) lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"}) @@ -54,6 +55,9 @@ class ScriptArguments: parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] +if script_args.group_by_length and script_args.packing: + raise ValueError("Cannot use both packing and group by length") + def chars_token_ratio(dataset, tokenizer, nb_examples=400): """ @@ -189,7 +193,7 @@ def create_datasets(tokenizer, args): train_dataset=train_dataset, eval_dataset=eval_dataset, peft_config=peft_config, - packing=True, + packing=script_args.packing, max_seq_length=None, tokenizer=tokenizer, args=training_args,
sft_llama2 error Hello! I made no changes to the sample code or the dataset. I just simply wanted to get it to run the stacked llama2 example. However, I get this error: UserWarning: The passed formatting_func has more than one argument. Usually that function should have a single argument `example` which corresponds to the dictionary returned by each element of the dataset. Make sure you know what you are doing. in __init__ raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset") ValueError: the `--group_by_length` option is only available for `Dataset`, not `IterableDataset (Sorry, I must omit certain parts of the error message since I am working on an institution's server). Thank you!
Can someone please just follow the example in https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama_2/scripts ? (and don't change the code or the dataset) I am sure this error is reproducible. Thank you! Are you using the constant length dataset? That's indeed incompatible with `--group_by_length` as this requires globally sorting the dataset whereas with the constant length dataset the dataset is generated on the fly and thus not globally sortable. @lvwerra Thank you for your quick reply! Honestly, I did not change a single line of code in https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama_2/scripts (which is a very new tutorial published 2 days ago). I am using the default "lvwerra/stack-exchange-paired" as the dataset. I checked sft_llama2.py and I see that it is indeed using constant length dataset (https://github.com/huggingface/trl/blob/3b2c820db68aecca36c2ff5d152de59deec8fe5c/examples/research_projects/stack_llama_2/scripts/sft_llama2.py#L118). However, the default flag for --group_by_length is also set to "True" (https://github.com/huggingface/trl/blob/3b2c820db68aecca36c2ff5d152de59deec8fe5c/examples/research_projects/stack_llama_2/scripts/sft_llama2.py#L38C5-L38C34) Does it work if you set `--group_by_length` to `False`? It is training now after I set --group_by_length to False. Hopefully this is how it is supposed to work! Thank you so much @lvwerra . Sorry, I was working on another project recently, so I was not checking my email linked to my GitHub @younesbelkada I think we should catch that combination of args and throw an error before we start training. What do you think? Sounds good, will propose something!
2023-08-18T10:27:24
huggingface/trl
675
huggingface__trl-675
[ "671" ]
c837fbe5b984ff333bfc870bd2a55a8556bbf647
diff --git a/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py b/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py --- a/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py +++ b/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py @@ -164,6 +164,7 @@ def return_prompt_and_responses(samples) -> Dict[str, str]: # 4. initialize training arguments: training_args = TrainingArguments( per_device_train_batch_size=script_args.per_device_train_batch_size, + per_device_eval_batch_size=script_args.per_device_eval_batch_size, max_steps=script_args.max_steps, logging_steps=script_args.logging_steps, save_steps=script_args.save_steps,
Tutorial script missing eval_batch_size Following this tutorial https://huggingface.co/blog/dpo-trl, I ran into out of memory issues with the DPO script. The problem was that the script missed copying per_device_eval_batch_size to Training Arguments [here](https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py#L166C12-L166C12). This makes it use the default size of 8 for eval_batch_size instead of 1 as set in this script. The below addition fixes the issue. ``` training_args = TrainingArguments( per_device_train_batch_size=script_args.per_device_train_batch_size, --> per_device_eval_batch_size=script_args.per_device_eval_batch_size, ```
Would you mind opening a PR? Happy to do it, but I am getting a permission error. This is my first time using github (though I am a long time git user). I created a personal access token (classic) on github and used that as the password (with my username), but that didn't work. Do I need a specific access token for the trl repo? ``` git push -u origin rahuljha/fix_arg_setting Username for 'https://github.com': rahuljha Password for 'https://[email protected]': remote: Permission to huggingface/trl.git denied to rahuljha. fatal: unable to access 'https://github.com/huggingface/trl.git/': The requested URL returned error: 403 ``` Reading through some more docs, looks like I need to first fork the repo. Will do this and try creating a pull request.
2023-08-22T19:40:32
huggingface/trl
687
huggingface__trl-687
[ "669" ]
d3bbee3ab84d4d5dcb413003bf93541df5b4e54c
diff --git a/trl/trainer/dpo_trainer.py b/trl/trainer/dpo_trainer.py --- a/trl/trainer/dpo_trainer.py +++ b/trl/trainer/dpo_trainer.py @@ -205,7 +205,12 @@ def __init__( "You are using a `peft` version that does not support `disable_adapter`. Please update your `peft` version to the latest version." ) else: - self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True) + if self.is_deepspeed_enabled: + # Read more about the issue in https://github.com/huggingface/trl/pull/687 + self.ref_model = self.accelerator._prepare_deepspeed(self.ref_model) + self.ref_model.eval() + else: + self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True) def concatenated_inputs(self, batch: Dict[str, Union[List, torch.LongTensor]]) -> Dict[str, torch.LongTensor]: """Concatenate the chosen and rejected inputs into a single tensor.
DeepSpeed Zero3 dpo accured embedding weight error i used `Open-Orca/OpenOrca-Platypus2-13B` and dpo error accured dataset is used `Anthropic/hh-rlhf` ``` python3 Traceback (most recent call last): File "/data/workspace/train/train_dpo.py", line 133, in <module> main(model_args=model_args, dataset_args=dataset_args, training_args=training_args) File "/data/workspace/train/train_dpo.py", line 119, in main dpo_trainer.train() File "/data/workspace/.bart_venv/lib/python3.10/site-packages/transformers/trainer.py", line 1539, in train return inner_training_loop( File "/data/workspace/.bart_venv/lib/python3.10/site-packages/transformers/trainer.py", line 1809, in _inner_training_loop tr_loss_step = self.training_step(model, inputs) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/transformers/trainer.py", line 2654, in training_step loss = self.compute_loss(model, inputs) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/trl/trainer/dpo_trainer.py", line 360, in compute_loss loss, metrics = self.get_batch_metrics(model, inputs, train_eval="train") File "/data/workspace/.bart_venv/lib/python3.10/site-packages/trl/trainer/dpo_trainer.py", line 327, in get_batch_metrics ) = self.concatenated_forward(self.ref_model, batch) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/trl/trainer/dpo_trainer.py", line 290, in concatenated_forward Traceback (most recent call last): all_logits = model( File "/data/workspace/.bart_venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 806, in forward File "/data/workspace/train/train_dpo.py", line 133, in <module> main(model_args=model_args, dataset_args=dataset_args, training_args=training_args) File "/data/workspace/train/train_dpo.py", line 119, in main dpo_trainer.train() File "/data/workspace/.bart_venv/lib/python3.10/site-packages/transformers/trainer.py", line 1539, in train return inner_training_loop( outputs = self.model( File "/data/workspace/.bart_venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl File "/data/workspace/.bart_venv/lib/python3.10/site-packages/transformers/trainer.py", line 1809, in _inner_training_loop tr_loss_step = self.training_step(model, inputs) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/transformers/trainer.py", line 2654, in training_step loss = self.compute_loss(model, inputs) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/trl/trainer/dpo_trainer.py", line 360, in compute_loss loss, metrics = self.get_batch_metrics(model, inputs, train_eval="train") return forward_call(*args, **kwargs) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 646, in forward File "/data/workspace/.bart_venv/lib/python3.10/site-packages/trl/trainer/dpo_trainer.py", line 327, in get_batch_metrics ) = self.concatenated_forward(self.ref_model, batch) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/trl/trainer/dpo_trainer.py", line 290, in concatenated_forward all_logits = model( inputs_embeds = self.embed_tokens(input_ids) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl File "/data/workspace/.bart_venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 806, in forward outputs = self.model( File "/data/workspace/.bart_venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py", line 646, in forward inputs_embeds = self.embed_tokens(input_ids) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) return forward_call(*args, **kwargs) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/torch/nn/modules/sparse.py", line 162, in forward return F.embedding( File "/data/workspace/.bart_venv/lib/python3.10/site-packages/torch/nn/modules/sparse.py", line 162, in forward return F.embedding( File "/data/workspace/.bart_venv/lib/python3.10/site-packages/torch/nn/functional.py", line 2210, in embedding return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse) File "/data/workspace/.bart_venv/lib/python3.10/site-packages/torch/nn/functional.py", line 2210, in embedding RuntimeError: 'weight' must be 2-D return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse) ``` if i changed model, error accured too. how can i use zero3? ``` json { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "zero_allow_untested_optimizer": true, "zero_force_ds_cpu_optimizer": false, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 5e8, "allgather_partitions": true, "allgather_bucket_size": 1e10, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e10, "stage3_max_reuse_distance": 1e10, "stage3_gather_16bit_weights_on_model_save": true }, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": "auto" } ```
also have the same problem with zero3 cc @vwxyzjn who is currently looking at DS i find that problem is accured by `ref_wav`'s set accelerate initialize.... ref_wav can not prepare for deepspeed. because using just `prepare()` not `prepare_model()` `prepare` is for inference, i think. ref_wav must be freezed but not inference. so that have to on deepspeed. i just modified, that line to `_prepare_deepspeed()`, it work. but, i have little knowledge on deepspeed. so, i don't know that is right solution. plz check and modify it.🤗🤗🤗 I also encountered a similar problem, can you describe your solution in detail
2023-08-24T13:39:43
huggingface/trl
703
huggingface__trl-703
[ "699" ]
d3bbee3ab84d4d5dcb413003bf93541df5b4e54c
diff --git a/trl/trainer/dpo_trainer.py b/trl/trainer/dpo_trainer.py --- a/trl/trainer/dpo_trainer.py +++ b/trl/trainer/dpo_trainer.py @@ -29,7 +29,7 @@ if is_peft_available(): - from peft import get_peft_model, prepare_model_for_int8_training + from peft import PeftModel, get_peft_model, prepare_model_for_int8_training class DPOTrainer(Trainer): @@ -113,7 +113,7 @@ def __init__( model = prepare_model_for_int8_training(model) model = get_peft_model(model, peft_config) - self.is_peft_model = getattr(model, "is_peft_model", False) + self.is_peft_model = is_peft_available() and isinstance(model, PeftModel) if ref_model: self.ref_model = ref_model @@ -197,10 +197,7 @@ def __init__( ) if self.ref_model is None: - if not hasattr( - self.accelerator.unwrap_model(self.model).pretrained_model, - "disable_adapter", - ): + if not hasattr(self.accelerator.unwrap_model(self.model), "disable_adapter"): raise ValueError( "You are using a `peft` version that does not support `disable_adapter`. Please update your `peft` version to the latest version." ) @@ -347,7 +344,7 @@ def get_batch_metrics( ) = self.concatenated_forward(model, batch) with torch.no_grad(): if self.ref_model is None: - with self.accelerator.unwrap_model(self.model).pretrained_model.disable_adapter(): + with self.accelerator.unwrap_model(self.model).disable_adapter(): ( reference_chosen_logps, reference_rejected_logps, @@ -415,7 +412,7 @@ def get_batch_samples(self, model, batch: Dict[str, torch.LongTensor]) -> Tuple[ ) if self.ref_model is None: - with self.accelerator.unwrap_model(self.model).pretrained_model.disable_adapter(): + with self.accelerator.unwrap_model(self.model).disable_adapter(): reference_output = self.model.generate( batch["prompt_input_ids"], attention_mask=batch["prompt_attention_mask"],
Unable to use `ref_model=None` with DPOTrainer Hi, While trying to run DPO with `ref_model=None` ([PR #640](https://github.com/huggingface/trl/pull/640)), I ran into a couple of issues. Following the Stack-Llama-2 example, `dpo_llama2.py` runs ok with all the default settings. However, this example loads the active and ref model separately and passes both to the `DPOTrainer`. If I understand correctly, setting `ref_model=None`, should avoid copying the model and instead disable the adapters to compute the reference model score. But it seems like [this check](https://github.com/huggingface/trl/blob/d3bbee3ab84d4d5dcb413003bf93541df5b4e54c/trl/trainer/dpo_trainer.py#L116) consistently fails and the model gets copied in any case. This also produces the following warning with no updates to the loss: ```python UserWarning: None of the inputs have requires_grad=True. Gradients will be None warnings.warn("None of the inputs have requires_grad=True. Gradients will be None") ``` To reproduce, replace https://github.com/huggingface/trl/blob/d3bbee3ab84d4d5dcb413003bf93541df5b4e54c/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py#L141C5-L146C6 with `model_ref=None`. --- Libraries: ``` accelerate 0.21.0 bitsandbytes 0.41.1 peft 0.5.0 torch 2.0.1 torchvision 0.15.2 transformers 4.33.0.dev0 trl 0.6.1.dev0 ``` Any insights on this would be greatly appreciated!
Hi @tannonk Thanks for the issue, I am suspecting the model does not set the input to `requires_grad=True` as expected, can you try to call: ```python model.enable_input_require_grads() ``` Right before passing the model to the trainer? More details on what the method does: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L1224C16-L1224C16 Hi @younesbelkada Thanks for the response! I tried out your suggestion but unfortunately it doesn't seem to fix the issue. @tannonk so i can do the PR now if that is ok?
2023-08-29T10:20:33
huggingface/trl
719
huggingface__trl-719
[ "718" ]
d603e7c52704054a9e7f306ae63acdafaa3d179a
diff --git a/trl/trainer/reward_trainer.py b/trl/trainer/reward_trainer.py --- a/trl/trainer/reward_trainer.py +++ b/trl/trainer/reward_trainer.py @@ -47,6 +47,9 @@ class RewardTrainer(Trainer): - `input_ids_rejected` - `attention_mask_rejected` + Optionally, you can also pass a `margin` entry to the dataset. This entry should contain the margin used to modulate the + loss of the reward model as outlined in https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/. + If you don't pass a margin, no margin will be used. """ def __init__( @@ -209,7 +212,12 @@ def compute_loss( input_ids=inputs["input_ids_rejected"], attention_mask=inputs["attention_mask_rejected"], )[0] - loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected).mean() + # calculate loss, optionally modulate with margin + if "margin" in inputs: + loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected - inputs["margin"]).mean() + else: + loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected).mean() + if return_outputs: return loss, { "rewards_chosen": rewards_chosen, diff --git a/trl/trainer/utils.py b/trl/trainer/utils.py --- a/trl/trainer/utils.py +++ b/trl/trainer/utils.py @@ -195,6 +195,9 @@ class RewardDataCollatorWithPadding: def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: features_chosen = [] features_rejected = [] + margin = [] + # check if we have a margin. If we do, we need to batch it as well + has_margin = "margin" in features[0] for feature in features: # check if the keys are named as expected if ( @@ -219,6 +222,8 @@ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: "attention_mask": feature["attention_mask_rejected"], } ) + if has_margin: + margin.append(feature["margin"]) batch_chosen = self.tokenizer.pad( features_chosen, padding=self.padding, @@ -240,6 +245,9 @@ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: "attention_mask_rejected": batch_rejected["attention_mask"], "return_loss": True, } + if has_margin: + margin = torch.tensor(margin, dtype=torch.float) + batch["margin"] = margin return batch
diff --git a/tests/test_reward_trainer.py b/tests/test_reward_trainer.py --- a/tests/test_reward_trainer.py +++ b/tests/test_reward_trainer.py @@ -260,3 +260,55 @@ def test_reward_trainer_assert_value_error(self): tokenizer=self.tokenizer, train_dataset=dummy_dataset, ) + + def test_reward_trainer_margin(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = RewardConfig( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + evaluation_strategy="steps", + ) + + # fmt: off + dummy_dataset_dict = { + "input_ids_chosen": [ + torch.LongTensor([0, 1, 2,]), + ], + "attention_mask_chosen": [ + torch.LongTensor([1, 1, 1]), + ], + "input_ids_rejected": [ + torch.LongTensor([0, 2,]), + ], + "attention_mask_rejected": [ + torch.LongTensor([1, 1]), + ], + "margin": [ + torch.FloatTensor([1.0]), + ] + } + # fmt: on + dummy_dataset = Dataset.from_dict(dummy_dataset_dict) + + trainer = RewardTrainer( + model=self.model, + args=training_args, + tokenizer=self.tokenizer, + train_dataset=dummy_dataset, + eval_dataset=dummy_dataset, + ) + + batch = [dummy_dataset[0]] + batch = trainer.data_collator(batch) + loss, outputs = trainer.compute_loss(trainer.model, batch, return_outputs=True) + + self.assertAlmostEqual( + loss, + -torch.nn.functional.logsigmoid( + outputs["rewards_chosen"] - outputs["rewards_rejected"] - batch["margin"] + ).mean(), + )
Add margin to reward trainer, similar to LLAMA-2 In the LLAMA-2 paper, an additional margin term is added to the loss function of the reward model. This allows to add a higher weight to pairs with a higher score difference. ![image](https://github.com/huggingface/trl/assets/9884254/6c74c655-d0a5-4bd6-a82d-fa05db9c5c3f) This should also be possible with the TRL RewardTrainer. Adding that functionality probably requires to rebuild the trainer and collator. Does this make sense?
2023-08-31T13:15:49
huggingface/trl
743
huggingface__trl-743
[ "738" ]
453c4eca14007e2e13a2d75863683199de2c0861
diff --git a/trl/trainer/dpo_trainer.py b/trl/trainer/dpo_trainer.py --- a/trl/trainer/dpo_trainer.py +++ b/trl/trainer/dpo_trainer.py @@ -472,8 +472,8 @@ def prediction_step( # logits for the chosen and rejected samples from model logits_dict = { - "logits_test/chosen": metrics["logits_test/chosen"], - "logits_test/rejected": metrics["logits_test/rejected"], + "eval_logits/chosen": metrics["eval_logits/chosen"], + "eval_logits/rejected": metrics["eval_logits/rejected"], } logits = tuple(v for k, v in logits_dict.items() if k not in ignore_keys) logits = torch.stack(logits).mean(axis=1)
Bug in dpo_trainer.py, prediction_step function I see the following code in dpo_trainer.py's prediction_step method: ```python ... ... logits_dict = { "logits_test/chosen": metrics["logits_test/chosen"], "logits_test/rejected": metrics["logits_test/rejected"], } ... ... ``` But the keys and values are defined in get_batch_metrics function: ```python ... ... prefix = "eval_" if train_eval == "eval" else "" metrics[f"{prefix}rewards/chosen"] = chosen_rewards.cpu().numpy().mean() metrics[f"{prefix}rewards/rejected"] = rejected_rewards.cpu().numpy().mean() metrics[f"{prefix}rewards/accuracies"] = reward_accuracies.cpu().numpy().mean() metrics[f"{prefix}rewards/margins"] = (chosen_rewards - rejected_rewards).cpu().numpy().mean() metrics[f"{prefix}logps/rejected"] = policy_rejected_logps.detach().cpu().numpy().mean() metrics[f"{prefix}logps/chosen"] = policy_chosen_logps.detach().cpu().numpy().mean() metrics[f"{prefix}logits/rejected"] = policy_rejected_logits.detach().cpu().numpy().mean() metrics[f"{prefix}logits/chosen"] = policy_chosen_logits.detach().cpu().numpy().mean() ... ... ``` That is, the variable metrics doesn't have "logits_test/chosen"、"logits_test/rejected". I have run this code. For transformers framework, if the compute_metrics method is not defined, prediction_loss_only will be set to True and errors will be ignored. But if prediction_loss_only is forced to False, an error is indeed occurred. So should we use the keys defined in get_batch_metrics when constructing logits_dict?
Referring to @kashif for DPO related questions. indeed @Emperorizzis looks like a typo bug let me double check and make a PR
2023-09-06T14:50:52
huggingface/trl
840
huggingface__trl-840
[ "827" ]
ac0d5b726d5c91d0cec87d9678815aaa88bdeb3b
diff --git a/trl/models/modeling_base.py b/trl/models/modeling_base.py --- a/trl/models/modeling_base.py +++ b/trl/models/modeling_base.py @@ -22,6 +22,7 @@ from huggingface_hub import hf_hub_download from huggingface_hub.utils import EntryNotFoundError, HFValidationError, LocalEntryNotFoundError from transformers import PreTrainedModel +from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled from ..import_utils import is_peft_available @@ -519,6 +520,10 @@ def create_reference_model( Returns `PreTrainedModelWrapper` """ + if is_deepspeed_zero3_enabled(): + raise ValueError( + "DeepSpeed ZeRO-3 is enabled and is not compatible with `create_reference_model()`. Please instantiate your reference model directly with `AutoCausalLM.from_pretrained()`." + ) parameter_names = [n for n, _ in model.named_parameters()] ref_model = deepcopy(model)
PPO: DeepSpeed ZeRO 3 fails if reference model is created via `trl.create_reference_model()` Recent changes have added DeepSpeed ZeRO 3 support for `PPOTrainer`. This can be seen in the `sentiment_tuning.py` example. If we modify the script to create the reference model after the regular model via `trl.create_reference_model(model, num_shared_layers)`, we encounter the following error: ``` Traceback (most recent call last): File "/home/xxx/repos/llm-training/trl-latest/examples/scripts/sentiment_tuning.py", line 210, in <module> stats = ppo_trainer.step(query_tensors, response_tensors, rewards) File "/home/xxx/.conda/envs/trl/lib/python3.10/contextlib.py", line 79, in inner return func(*args, **kwds) File "/home/xxx/repos/llm-training/trl-latest/trl/trainer/ppo_trainer.py", line 696, in step ref_logprobs, ref_logits_or_none, _, _ = self.batched_forward_pass( File "/home/xxx/.conda/envs/trl/lib/python3.10/contextlib.py", line 79, in inner return func(*args, **kwds) File "/home/xxx/repos/llm-training/trl-latest/trl/trainer/ppo_trainer.py", line 955, in batched_forward_pass logits, _, values = model(**input_kwargs) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/deepspeed/utils/nvtx.py", line 15, in wrapped_fn ret_val = func(*args, **kwargs) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/deepspeed/runtime/engine.py", line 1801, in forward loss = self.module(*inputs, **kwargs) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1538, in _call_impl result = forward_call(*args, **kwargs) File "/home/xxx/repos/llm-training/trl-latest/trl/models/modeling_value_head.py", line 165, in forward base_model_output = self.pretrained_model( File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1538, in _call_impl result = forward_call(*args, **kwargs) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 1076, in forward transformer_outputs = self.transformer( File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1538, in _call_impl result = forward_call(*args, **kwargs) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/transformers/models/gpt2/modeling_gpt2.py", line 843, in forward inputs_embeds = self.wte(input_ids) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl result = hook(self, args) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/deepspeed/utils/nvtx.py", line 15, in wrapped_fn ret_val = func(*args, **kwargs) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/deepspeed/runtime/zero/parameter_offload.py", line 392, in _pre_forward_module_hook self.pre_sub_module_forward_function(module) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/deepspeed/runtime/zero/parameter_offload.py", line 505, in pre_sub_module_forward_function param_coordinator.fetch_sub_module(sub_module, forward=prev_grad_state) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/deepspeed/utils/nvtx.py", line 15, in wrapped_fn ret_val = func(*args, **kwargs) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "/home/xxx/.conda/envs/trl/lib/python3.10/site-packages/deepspeed/runtime/zero/partitioned_param_coordinator.py", line 310, in fetch_sub_module assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary() AssertionError: {'id': 151, 'status': 'NOT_AVAILABLE', 'numel': 0, 'ds_numel': 0, 'shape': (0,), 'ds_shape': (0,), 'requires_grad': True, 'grad_shape': None, 'persist': True, 'active_sub_modules': {171}, 'ds_tensor.shape': torch.Size([0])} ``` This occurs with the following changes to `sentiment_tuning.py`. Note that it occurs when setting `num_shared_layers=0` as well. ``` diff --git a/trl-latest/examples/scripts/sentiment_tuning.py b/trl-latest/examples/scripts/sentiment_tuning.py index df0576d..f13d3f4 100644 --- a/trl-latest/examples/scripts/sentiment_tuning.py +++ b/trl-latest/examples/scripts/sentiment_tuning.py @@ -23,7 +23,8 @@ from peft import LoraConfig from tqdm import tqdm from transformers import AutoTokenizer, pipeline -from trl import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, PPOConfig, PPOTrainer, set_seed +from trl import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, PPOConfig, PPOTrainer, set_seed, \ + create_reference_model from trl.core import LengthSampler @@ -121,24 +122,33 @@ def collator(data): # set seed before initializing value head for deterministic eval set_seed(args.ppo_config.seed) print(f"Args: {args}") -# Now let's build the model, the reference model, and the tokenizer. -if not args.use_peft: - ref_model = trl_model_class.from_pretrained(args.ppo_config.model_name, trust_remote_code=True) - device_map = None - peft_config = None -else: - peft_config = args.peft_config - ref_model = None - # Copy the model to each device - device_map = {"": Accelerator().local_process_index} model = trl_model_class.from_pretrained( args.ppo_config.model_name, trust_remote_code=True, - device_map=device_map, - peft_config=peft_config, + device_map=None, + peft_config=None, ) +ref_model = create_reference_model(model, num_shared_layers=5) +# # Now let's build the model, the reference model, and the tokenizer. +# if not args.use_peft: +# ref_model = trl_model_class.from_pretrained(args.ppo_config.model_name, trust_remote_code=True) +# device_map = None +# peft_config = None +# else: +# peft_config = args.peft_config +# ref_model = None +# # Copy the model to each device +# device_map = {"": Accelerator().local_process_index} + +# model = trl_model_class.from_pretrained( +# args.ppo_config.model_name, +# trust_remote_code=True, +# device_map=device_map, +# peft_config=peft_config, +# ) + ``` I am launching the script via ` CUDA_VISIBLE_DEVICES=0,1,2 && accelerate launch --config_file=examples/accelerate_configs/deepspeed_zero3.yaml --main_process_port 25678 examples/scripts/sentiment_tuning.py` **Expected behavior:** The script should either function as it does when instantiating the reference model before the regular model, or, if creating the reference model this way is somehow incompatible with ZeRO 3, it should fail early with an appropriate error message. @lewtun
Indeed, I think sharing the same LLM backbone for the reference and active model could be a challenge. @lewtun have you tried this configuration before? No I haven't tried this and I am not sure it would be easy to handle in DeepSpeed without some low-level changes. For now I'll open a PR to raise an error is this combination of shared layer + ZeRO-3 is used
2023-10-06T18:37:12
huggingface/trl
911
huggingface__trl-911
[ "670" ]
6ff0fac2c118250e1a14a0a793e96c5c3f654885
diff --git a/trl/models/modeling_value_head.py b/trl/models/modeling_value_head.py --- a/trl/models/modeling_value_head.py +++ b/trl/models/modeling_value_head.py @@ -33,10 +33,14 @@ def __init__(self, config, **kwargs): self.dropout = nn.Dropout(summary_dropout_prob) if summary_dropout_prob else nn.Identity() # some models such as OPT have a projection layer before the word embeddings - e.g. OPT-350m + if hasattr(config, "hidden_size"): + hidden_size = config.hidden_size if hasattr(config, "word_embed_proj_dim"): hidden_size = config.word_embed_proj_dim - else: - hidden_size = config.hidden_size + elif hasattr(config, "is_encoder_decoder"): + if config.is_encoder_decoder and hasattr(config, "decoder"): + if hasattr(config.decoder, "hidden_size"): + hidden_size = config.decoder.hidden_size self.summary = nn.Linear(hidden_size, 1)
Error while loading a pretrained EncoderDecoder model Hi all, I've trained an `EncoderDecoder` model but I cannot load it via `AutoModelForSeq2SeqLMWithValueHead.from_pretrained`. My `EncoderDecoder` model has been defined by tying together two RoBERTa model, as follows: ```python from transformers import EncoderDecoderModel pretrained_encoder = "seyonec/ChemBERTa-zinc-base-v1" bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained( pretrained_encoder, pretrained_encoder, tie_encoder_decoder=True, ) ``` Transformers and TRL versions: ``` trl.__version__: 0.5.0 transformers.__version__: 4.31.0 ``` Code to reproduce the issue: ```python import torch from transformers import ( EncoderDecoderConfig, AutoTokenizer, ) from trl import ( AutoModelForSeq2SeqLMWithValueHead, PPOConfig, PPOTrainer, create_reference_model, ) pretrained_model = "ribesstefano/ChemBERTa2ChemBERTa-58M" model = AutoModelForSeq2SeqLMWithValueHead.from_pretrained(pretrained_model) model_ref = create_reference_model(model, num_shared_layers=6) tokenizer = AutoTokenizer.from_pretrained(pretrained_model) tokenizer.pad_token = tokenizer.eos_token ``` Error message: ``` --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) [<ipython-input-45-7d8719dacb7f>](https://localhost:8080/#) in <cell line: 22>() 20 # print(bert2bert.config.hidden_size) 21 ---> 22 model = AutoModelForSeq2SeqLMWithValueHead.from_pretrained(pretrained_model) 23 # # model_ref = AutoModelForSeq2SeqLMWithValueHead.from_pretrained(pretrained_model, device_map="auto", load_in_8bit=True) 24 # model_ref = create_reference_model(model, num_shared_layers=6) [/usr/local/lib/python3.10/dist-packages/trl/models/modeling_base.py](https://localhost:8080/#) in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs) 230 raise ValueError("PromptLearningConfig is not supported for PPO training.") 231 # Then, create the full model by instantiating the wrapper class --> 232 model = cls(pretrained_model, **trl_model_args) 233 234 # if resume_training, load the state_dict again - this is ok since the [/usr/local/lib/python3.10/dist-packages/trl/models/modeling_value_head.py](https://localhost:8080/#) in __init__(self, pretrained_model, **kwargs) 289 raise ValueError("The model does not have a language model head, please use a model that has one.") 290 --> 291 self.v_head = ValueHead(self.pretrained_model.config, **v_head_kwargs) 292 293 self._init_weights(**v_head_kwargs) [/usr/local/lib/python3.10/dist-packages/trl/models/modeling_value_head.py](https://localhost:8080/#) in __init__(self, config, **kwargs) 37 hidden_size = config.word_embed_proj_dim 38 else: ---> 39 hidden_size = config.hidden_size 40 41 self.summary = nn.Linear(hidden_size, 1) [/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py](https://localhost:8080/#) in __getattribute__(self, key) 259 if key != "attribute_map" and key in super().__getattribute__("attribute_map"): 260 key = super().__getattribute__("attribute_map")[key] --> 261 return super().__getattribute__(key) 262 263 def __init__(self, **kwargs): AttributeError: 'EncoderDecoderConfig' object has no attribute 'hidden_size' ``` From what I can guess from the source code, `ValueHead` only inspect the model's top config, expecting an `hidden_size`. However, that information is in the `model.decoder.config.hidden_size` for an `EncoderDecoder` class. Does it sound like a bug or something to update in the code? Or is there any workaround I can try? Thanks in advance
Hmm I think the `AutoModelForSeq2SeqLMWithValueHead` class does not support `EncoderDecoder` class as it requires some patches. Indeed, the solution would be to look for the child configs, we can do that by passing a flag `is_composition_model=True` in `from_pretrained`. Would you be happy to propose a patch for this? Happy to guide you in more details! Thanks for the support! I wrote a quick fix. The following code seems to work now: ```python pretrained_model = "ribesstefano/ChemBERTa2ChemBERTa-58M" model = AutoModelForSeq2SeqLMWithValueHead.from_pretrained( pretrained_model, is_composition_model=True) model_ref = create_reference_model(model) # model_ref = create_reference_model(model, num_shared_layers=6) ``` but `create_reference_model(model, num_shared_layers=6)` still breaks when specifying the `num_shared_layers` argument. This is what I did: in the `from_pretrained` method of the `PreTrainedModelWrapper` I added the `is_composition_model` to the `trl_model_args`: ```python if kwargs is not None: peft_config = kwargs.pop("peft_config", None) reward_adapter = kwargs.pop("reward_adapter", None) is_trainable = kwargs.pop("is_trainable", False) is_composition_model = kwargs.pop("is_composition_model", False) trl_model_args, pretrained_kwargs, peft_quantization_kwargs = cls._split_kwargs(kwargs) trl_model_args["is_composition_model"] = is_composition_model token = pretrained_kwargs.get("token", None) else: peft_config = None is_trainable = False trl_model_args = {} pretrained_kwargs = {} peft_quantization_kwargs = {} token = None is_composition_model = False ``` Then, in the `AutoModelForSeq2SeqLMWithValueHead` `init` method I added `is_composition_model` to the `v_head_kwargs`: ```python self.is_composition_model = kwargs.pop("is_composition_model", False) v_head_kwargs["is_composition_model"] = self.is_composition_model ``` Finally, in the `ValueHead` `init` method I do: ```python is_composition_model = kwargs.pop("is_composition_model", False) if is_composition_model: hidden_size = config.decoder.hidden_size elif hasattr(config, "word_embed_proj_dim"): hidden_size = config.word_embed_proj_dim else: hidden_size = config.hidden_size ``` Does this make sense? According to the pull request guidelines, I shall write a test beforehand. Could give me some suggestions on that too please? Thanks very much @ribesstefano the solution sounds great ! I have a better workaround, instead of passing `is_composition_model=True` we can maybe do `is_composition_model = isinstance(model, transformers.EncoderDecoderModel)` - in any case let's see in the PR ! For the tests I have just pushed a tiny model on the Hub : https://huggingface.co/trl-internal-testing/tiny-Roberta-EncoderDecoderModel I would say you can just add that model id here: https://github.com/huggingface/trl/blob/main/tests/test_modeling_value_head.py#L54 and see if the tests pass Let me know how it goes! @younesbelkada I have a similar error when trying to load `Salesforce/codet5p-16b`. I was wondering if the `hidden_size` I need is referenced [here](https://huggingface.co/Salesforce/codet5p-16b/blob/d19a75e34b9fd56b260557468de793b112855f1a/configuration_codet5p.py#L17)? If so, I'm happy to make a PR for the fix if you would offer some guidance. In the meantime I'm looking at the solution posted above. hi @dshvimer Thanks! `hidden_size` should be properly defined in that model, e.g.: https://huggingface.co/Salesforce/codet5p-16b/blob/d19a75e34b9fd56b260557468de793b112855f1a/modeling_codet5p.py#L92 So I would expect your script to work. Can you confirm just running: ```python model = AutoModelForSeq2SeqLMWithValueHead.from_pretrained("Salesforce/codet5p-16b") ``` Leads to an error for you? @younesbelkada Here is an example [notebook](https://colab.research.google.com/drive/1PfblyCEqZQUkC-xye1gDqohqIJr8vgE3?usp=sharing) with the larger model failing. From my testing <= 2b works with trl but >2b does not Thanks for sharing the notebook! From what I can see it seems that this related to the fact the second model uses classic T5: https://huggingface.co/Salesforce/codet5p-220m/blob/main/config.json that has does not use composition in the configuration object as you can see here: https://huggingface.co/Salesforce/codet5p-6b/blob/main/configuration_codet5p.py#L71 As this is a model that uses a model on the Hub feature, I am not sure if we should support all scenarios as this will be challenging. One thing you could do is to push a copy of `Salesforce/codet5p-6b` under your namespace and manually add `self.hidden_size = self.encoder.hidden_size` here: https://huggingface.co/Salesforce/codet5p-6b/blob/main/configuration_codet5p.py#L90 @younesbelkada Thank you for the direction. I am thinking this will unblock me for my own use case. To make sure I understand things, could I also set the hidden_size from the `trl` side, similar to what is posted above? (If I create a subclass or similar) ``` is_composition_model = kwargs.pop("is_composition_model", False) if is_composition_model: hidden_size = config.decoder.hidden_size elif hasattr(config, "word_embed_proj_dim"): hidden_size = config.word_embed_proj_dim else: hidden_size = config.hidden_size ``` Yes the changes you shared make sense! @ribesstefano let us know if you have managed to start the PR so that we could jump over potential improvements, let us know also if you need help for it! :D @younesbelkada I've made a way simpler modification, as you suggested, and just changed the `ValueHead` init in file `trl/models/modeling_value_head.py`: ```python class ValueHead(nn.Module): r""" The ValueHead class implements a head for GPT2 that returns a scalar for each output token. """ def __init__(self, config, **kwargs): super().__init__() if not hasattr(config, "summary_dropout_prob"): summary_dropout_prob = kwargs.pop("summary_dropout_prob", 0.1) else: summary_dropout_prob = config.summary_dropout_prob self.dropout = nn.Dropout(summary_dropout_prob) if summary_dropout_prob else nn.Identity() # some models such as OPT have a projection layer before the word embeddings - e.g. OPT-350m if hasattr(config, "hidden_size"): hidden_size = config.hidden_size if hasattr(config, "word_embed_proj_dim"): hidden_size = config.word_embed_proj_dim elif hasattr(config, "is_encoder_decoder"): if config.is_encoder_decoder and hasattr(config, "decoder"): if hasattr(config.decoder, "hidden_size"): hidden_size = config.decoder.hidden_size self.summary = nn.Linear(hidden_size, 1) self.flatten = nn.Flatten() ``` After all, the `config`s that the `ValueHead` is expecting are from the pretrained model, so it is sufficient to check those only. However, when running tests, to which I've added the https://huggingface.co/trl-internal-testing/tiny-Roberta-EncoderDecoderModel model, it breaks for exactly that added `tiny-Roberta-EncoderDecoderModel` model (it passes for all the other models): ``` ===================================================================================================================== FAILURES ===================================================================================================================== ____________________________________________________________________________________________ Seq2SeqValueHeadModelTester.test_transformers_bf16_kwargs _____________________________________________________________________________________________ [gw0] linux -- Python 3.10.8 /opt/conda/bin/python self = <tests.test_modeling_value_head.Seq2SeqValueHeadModelTester testMethod=test_transformers_bf16_kwargs> def test_transformers_bf16_kwargs(self): r""" Test if the transformers kwargs are correctly passed Here we check that loading a model in half precision works as expected, i.e. the weights of the `pretrained_model` attribute is loaded in half precision and you can run a dummy forward pass without any issue. """ for model_name in self.all_model_names: trl_model = self.trl_model_class.from_pretrained(model_name, torch_dtype=torch.bfloat16) lm_head_namings = self.trl_model_class.lm_head_namings if model_name == "trl-internal-testing/tiny-random-FSMTForConditionalGeneration": # skip the test for FSMT as it does not support mixed-prec continue > self.assertTrue( any(hasattr(trl_model.pretrained_model, lm_head_naming) for lm_head_naming in lm_head_namings) ) E AssertionError: False is not true tests/test_modeling_value_head.py:430: AssertionError ``` Do you think the error is related to the modifications I've done in `ValueHead`? Thanks for reporting and for your hardwork ! Regarding the failing test since this is an edge case you can skip it and add a condition similarly as FSMT together with a comment. Let me know once you open the PR!
2023-10-24T08:42:35
huggingface/trl
934
huggingface__trl-934
[ "926" ]
2bbd594ec50f8d17232b4344e82b4e73f7b1e3b3
diff --git a/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py b/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py --- a/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py +++ b/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py @@ -5,8 +5,8 @@ import torch from datasets import Dataset, load_dataset -from peft import AutoPeftModelForCausalLM, LoraConfig -from transformers import AutoTokenizer, HfArgumentParser, TrainingArguments +from peft import LoraConfig +from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, TrainingArguments from trl import DPOTrainer @@ -124,7 +124,7 @@ def return_prompt_and_responses(samples) -> Dict[str, str]: script_args = parser.parse_args_into_dataclasses()[0] # 1. load a pretrained model - model = AutoPeftModelForCausalLM.from_pretrained( + model = AutoModelForCausalLM.from_pretrained( script_args.model_name_or_path, low_cpu_mem_usage=True, torch_dtype=torch.float16, @@ -138,7 +138,7 @@ def return_prompt_and_responses(samples) -> Dict[str, str]: name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool ] - model_ref = AutoPeftModelForCausalLM.from_pretrained( + model_ref = AutoModelForCausalLM.from_pretrained( script_args.model_name_or_path, low_cpu_mem_usage=True, torch_dtype=torch.float16,
dpo_llama2.py: peft on top of peft? https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py#L127 https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py#L186 It looks like the model for DPO training is a PeftModel built upon another PeftModel without weight merging. Is this expected? ``` (Pdb) PeftModelForCausalLM( (base_model): LoraModel( (model): PeftModelForCausalLM( (base_model): LoraModel( (model): LlamaForCausalLM( (model): LlamaModel( (embed_tokens): Embedding(32000, 4096) (layers): ModuleList( ...... ```
cc @kashif @younesbelkada Hi @ZHZisZZ Indeed this is not expected, all instances of `AutoPeftModelForCausalLM` should be in fact `AutoModelForCausalLM` here. I think what happened is that we first fine-tuned with SFT + PEFT and directly loaded that un-merged peft model for DPO and attached new adapters on top of it.
2023-10-31T14:59:15
huggingface/trl
956
huggingface__trl-956
[ "742" ]
6ff0fac2c118250e1a14a0a793e96c5c3f654885
diff --git a/trl/trainer/dpo_trainer.py b/trl/trainer/dpo_trainer.py --- a/trl/trainer/dpo_trainer.py +++ b/trl/trainer/dpo_trainer.py @@ -137,6 +137,10 @@ def __init__( "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models" ) elif is_peft_available() and peft_config is not None: + # if model is a peft model and we have a peft_config, we merge and unload it first + if isinstance(model, PeftModel): + model = model.merge_and_unload() + if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False): _support_gc_kwargs = hasattr( args, "gradient_checkpointing_kwargs" @@ -160,7 +164,10 @@ def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + + # get peft model with the given config model = get_peft_model(model, peft_config) + # For models that use gradient_checkpoiting, we need to attach a hook that enables input # to explicitly have `requires_grad=True`, otherwise training will either silently # fail or completely fail.
diff --git a/tests/test_dpo_trainer.py b/tests/test_dpo_trainer.py --- a/tests/test_dpo_trainer.py +++ b/tests/test_dpo_trainer.py @@ -240,3 +240,57 @@ def test_dpo_trainer_generate_during_eval_no_wandb(self): eval_dataset=dummy_dataset, generate_during_eval=True, ) + + @require_peft + @mark.peft_test + def test_dpo_lora_save(self): + from peft import LoraConfig, get_peft_model + + lora_config = LoraConfig( + r=16, + lora_alpha=32, + lora_dropout=0.05, + bias="none", + task_type="CAUSAL_LM", + ) + + # lora model + model = AutoModelForCausalLM.from_pretrained(self.model_id) + model_peft = get_peft_model(model, lora_config) + + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = TrainingArguments( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=4, + learning_rate=9e-1, + evaluation_strategy="steps", + ) + + dummy_dataset = self._init_dummy_dataset() + + # dpo train lora model with a lora config + trainer = DPOTrainer( + model=model_peft, + ref_model=None, + beta=0.1, + args=training_args, + tokenizer=self.tokenizer, + train_dataset=dummy_dataset, + eval_dataset=dummy_dataset, + peft_config=lora_config, + ) + + # train the model + trainer.train() + + # save peft adapter + trainer.save_model() + + # assert that the model is loaded without giving OSError + try: + AutoModelForCausalLM.from_pretrained(tmp_dir) + except OSError: + self.fail("Loading the saved peft adapter failed")
dpo training with Lora can not save fine-tuned weights ### issue The following script manage to train and save. However, the saved weights are incorrect. ### Training script ``` def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() # load config and tokenziers config = LlamaConfig.from_pretrained(model_args.model_name_or_path) config.use_cache = False tokenizer = LlamaTokenizer.from_pretrained(model_args.model_name_or_path, truncation_side='left') # initialize modules model = LlamaForCausalLM.from_pretrained(model_args.model_name_or_path, config=config) model.enable_input_require_grads() # add pad token in tokenizer if needed if tokenizer.pad_token is None: tokenizer.add_special_tokens({"pad_token":"<pad>"}) tokenizer.pad_token_id = 0 # Setup seed set_seed(training_args.seed) embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # Setup Trainer training_args = training_args.to_dict() training_args |= {'remove_unused_columns': False} training_args = TrainingArguments(**training_args) peft_config = LoraConfig( r=model_args.lora_r, lora_alpha=model_args.lora_alpha, lora_dropout=model_args.lora_dropout, target_modules=[ "q_proj", "v_proj", "k_proj", "out_proj", "fc_in", "fc_out", "wte", ], bias="none", task_type="CAUSAL_LM", ) model_peft = get_peft_model(model, peft_config) trainer = DPOTrainer( model=model_peft, ref_model=None, beta=0.1, # DPO temprature train_dataset=prepared_dataset["train"], eval_dataset=prepared_dataset["eval"], tokenizer=tokenizer, args=training_args, peft_config=peft_config, max_length=data_args.model_max_length, max_prompt_length=int(data_args.model_max_length) * 3 // 4, ) # Training train_result = trainer.train() trainer.save_state() trainer.save_model() ``` ### Training output ``` {'loss': 0.6934, 'learning_rate': 0.0, 'rewards/chosen': 0.0, 'rewards/rejected': 0.0, 'rewards/accuracies': 0.0, 'rewards/margins': 0.0, 'logps/rejected': -63.125, 'logps/chosen': -84.625, 'logits/rejected': -1.3544921875, 'logits/chosen': -1.353515625, 'epoch': 0.0} {'loss': 0.6933, 'learning_rate': 0.0005, 'rewards/chosen': 0.0263671875, 'rewards/rejected': 0.0164031982421875, 'rewards/accuracies': 0.5625, 'rewards/margins': 0.00995635986328125, 'logps/rejected': -55.625, 'logps/chosen': -38.59375, 'logits/rejected': -1.24609375, 'logits/chosen': -1.2490234375, 'epoch': 0.0} {'loss': 0.6938, 'learning_rate': 0.0005, 'rewards/chosen': 0.0035152435302734375, 'rewards/rejected': 0.006542205810546875, 'rewards/accuracies': 0.5, 'rewards/margins': -0.0030307769775390625, 'logps/rejected': -66.75, 'logps/chosen': -65.0625, 'logits/rejected': -1.296875, 'logits/chosen': -1.2890625, 'epoch': 0.0} {'loss': 0.6814, 'learning_rate': 0.0003333333333333333, 'rewards/chosen': 0.05535888671875, 'rewards/rejected': -0.007030487060546875, 'rewards/accuracies': 0.75, 'rewards/margins': 0.062408447265625, 'logps/rejected': -70.9375, 'logps/chosen': -40.875, 'logits/rejected': -1.255859375, 'logits/chosen': -1.259765625, 'epoch': 0.01} {'loss': 0.6477, 'learning_rate': 0.00016666666666666666, 'rewards/chosen': -0.0174407958984375, 'rewards/rejected': -0.07177734375, 'rewards/accuracies': 0.5, 'rewards/margins': 0.054351806640625, 'logps/rejected': -65.75, 'logps/chosen': -54.28125, 'logits/rejected': -1.2783203125, 'logits/chosen': -1.28515625, 'epoch': 0.01} ``` ### Weight check when doing merge ``` # initialize modules model = LlamaForCausalLM.from_pretrained(script_args.base_model_name, config=config) print("-" * 20 + "Weight before merge" + "-" * 20) print(model.get_output_embeddings().weight) # Load the Lora model model = PeftModel.from_pretrained(model, script_args.adapter_model_name) model.eval() # check Lora weights print("-" * 20 + "Check Lora Weights" + "-" * 20) print(model.model.model.layers[0].self_attn.q_proj.lora_B.default.weight) # merge lora weight and base model model = model.merge_and_unload() print("-" * 20 + "Weight after merge" + "-" * 20) print(model.get_output_embeddings().weight) ``` ### Weight check outputs ``` --------------------Weight before merge-------------------- Parameter containing: tensor([[-0.0027, 0.0020, -0.0072, ..., 0.0034, -0.0074, 0.0074], [-0.0315, 0.0452, -0.0030, ..., -0.0226, 0.0144, 0.0317], [-0.0127, 0.0016, 0.0189, ..., -0.0264, 0.0157, -0.0071], ..., [ 0.0199, 0.0242, 0.0271, ..., 0.0052, -0.0103, -0.0067], [ 0.0074, -0.0048, 0.0076, ..., -0.0273, -0.0171, 0.0308], [ 0.0192, 0.0271, 0.0170, ..., -0.0015, -0.0046, -0.0046]], requires_grad=True) --------------------Check Lora Weights-------------------- Parameter containing: tensor([[0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.], ..., [0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.]]) --------------------Weight after merge-------------------- Parameter containing: tensor([[-0.0027, 0.0020, -0.0072, ..., 0.0034, -0.0074, 0.0074], [-0.0315, 0.0452, -0.0030, ..., -0.0226, 0.0144, 0.0317], [-0.0127, 0.0016, 0.0189, ..., -0.0264, 0.0157, -0.0071], ..., [ 0.0199, 0.0242, 0.0271, ..., 0.0052, -0.0103, -0.0067], [ 0.0074, -0.0048, 0.0076, ..., -0.0273, -0.0171, 0.0308], [ 0.0192, 0.0271, 0.0170, ..., -0.0015, -0.0046, -0.0046]]) ``` I think even if the Lora fine-tuning is not convergent, the saved adpater value should not be zero?
I have the same question~ Maybe @younesbelkada and @kashif could have a look. @LuJunru did you try to save the model via: ``` model_peft.save_pretrained("peft_checkpoint") ``` @kashif In fact, I had the same problem when training DPO (without accelerate). I believe the code here is what you described, but it doesn't work ![image](https://github.com/huggingface/trl/assets/44131880/19853a83-272e-41a9-a3e0-bc35f35fc8c9) self.model.save_pretrained(output_dir) @kashif @Moyhub @lvwerra Hi Guys, thank you for the feedback. When i directly check the weights in adapter bin, i found the weights was saved. However, the weight key there was base_model.model.base_model.model.xxxx, this maybe related to the deepspeed wrapper. I tried to rename the key to base_model.model.xxxx, the merging succeeded. You may have a check as well. @younesbelkada something we could also test with #724. It seems like when you feed a `PEFTModel` and a `peft_config` to the `DPOTrainer`, the model to be trained gets nested into 2 `PEFTModel` so this is the reason why we get `base_model.model.base_model.model.xxx` as keys in the saved model and not `base_model.model.xxx`. In that case, to load the saved DPO model we must create a PEFTModel on top of the PEFTModel that was used as a reference model. Doing this, I managed to load the weights correctly. Also, when merging the weights, you must call it twice as follows: `merged_model = dpo_model.merge_and_unload().merge_and_unload()` > It seems like when you feed a `PEFTModel` and a `peft_config` to the `DPOTrainer`, the model to be trained gets nested into 2 `PEFTModel` so this is the reason why we get `base_model.model.base_model.model.xxx` as keys in the saved model and not `base_model.model.xxx`. In that case, to load the saved DPO model we must create a PEFTModel on top of the PEFTModel that was used as a reference model. Doing this, I managed to load the weights correctly. Also, when merging the weights, you must call it twice as follows: `merged_model = dpo_model.merge_and_unload().merge_and_unload()` Can you provide a code example for your solution? Seeing same issue. Model performance of training same as original model due to this even after dpo training. Any conclusion for this bug? @lvwerra can you point to any other similar thread which has a solution for this ? Same issue here > @kashif @Moyhub @lvwerra Hi Guys, thank you for the feedback. When i directly check the weights in adapter bin, i found the weights was saved. However, the weight key there was base_model.model.base_model.model.xxxx, this maybe related to the deepspeed wrapper. I tried to rename the key to base_model.model.xxxx, the merging succeeded. You may have a check as well. Could you share the example code? Thanks.
2023-11-05T13:40:48
huggingface/trl
971
huggingface__trl-971
[ "972" ]
2f726ce4e88a99b5d20eca3b5482954851d91ef6
diff --git a/trl/trainer/utils.py b/trl/trainer/utils.py --- a/trl/trainer/utils.py +++ b/trl/trainer/utils.py @@ -543,8 +543,7 @@ def __init__( self.formatting_func = formatting_func if formatting_func is not None: - formatting_func_signature = formatting_func.__code__.co_varnames - if len(formatting_func_signature) > 1: + if formatting_func.__code__.co_argcount > 1: warnings.warn( "The passed formatting_func has more than one argument. Usually that function should have a single argument `example`" " which corresponds to the dictionary returned by each element of the dataset. Make sure you know what you are doing."
Wrong trigger for warning "UserWarning: The passed formatting_func has more than one argument." In `trl/trainer/utils.py` the code `func.__code__.co_varnames` is being used to check if the user passed a **formatting_func** with more than 1 parameter. This code actually counts the function variables rather than function parameters. For instance ``` def add_v1(a): return a + 1 def add_v2(a): b = a + 1 return b print("Number of parameters for add_v1 is", len(add_v1.__code__.co_varnames)) print("Number of parameters for add_v2 is", len(add_v2.__code__.co_varnames)) ``` outputs ``` Number of parameters for add_v1 is 1 Number of parameters for add_v2 is 2 ``` The proposed fix is to change the following: ```python formatting_func_signature = formatting_func.__code__.co_varnames if len(formatting_func_signature) > 1: warnings.warn( "The passed formatting_func has more than one argument. Usually that function should have a single argument `example`" " which corresponds to the dictionary returned by each element of the dataset. Make sure you know what you are doing." ) ``` to: ```python if formatting_func.__code__.co_argcount > 1: warnings.warn( "The passed formatting_func has more than one argument. Usually that function should have a single argument `example`" " which corresponds to the dictionary returned by each element of the dataset. Make sure you know what you are doing." ) ``` Tested on python Python 2.7.5 and Python 3.6.8
2023-11-08T21:49:13
huggingface/trl
979
huggingface__trl-979
[ "1008" ]
9e9f024399b76842ece3552884bbc4f304fd4153
diff --git a/trl/trainer/sft_trainer.py b/trl/trainer/sft_trainer.py --- a/trl/trainer/sft_trainer.py +++ b/trl/trainer/sft_trainer.py @@ -20,6 +20,8 @@ import torch import torch.nn as nn from datasets import Dataset +from datasets.arrow_writer import SchemaInferenceError +from datasets.builder import DatasetGenerationError from transformers import ( AutoModelForCausalLM, AutoTokenizer, @@ -128,7 +130,7 @@ def __init__( packing: Optional[bool] = False, formatting_func: Optional[Callable] = None, max_seq_length: Optional[int] = None, - infinite: Optional[bool] = False, + infinite: Optional[bool] = None, num_of_sequences: Optional[int] = 1024, chars_per_token: Optional[float] = 3.6, dataset_num_proc: Optional[int] = None, @@ -141,6 +143,11 @@ def __init__( elif not isinstance(model, str): raise ValueError("You passed model_kwargs to the SFTTrainer. But your model is already instantiated.") + if infinite is not None: + warnings.warn( + "The `infinite` argument is deprecated and will be removed in a future version of TRL. Use `TrainingArguments.max_steps` or `TrainingArguments.num_train_epochs` instead to control training length." + ) + if isinstance(model, str): warnings.warn( "You passed a model_id to the SFTTrainer. This will automatically create an " @@ -226,7 +233,6 @@ def __init__( dataset_text_field, max_seq_length, formatting_func, - infinite, num_of_sequences, chars_per_token, ) @@ -238,7 +244,6 @@ def __init__( dataset_text_field, max_seq_length, formatting_func, - infinite, num_of_sequences, chars_per_token, ) @@ -301,7 +306,6 @@ def _prepare_dataset( dataset_text_field, max_seq_length, formatting_func, - infinite, num_of_sequences, chars_per_token, ): @@ -317,30 +321,19 @@ def _prepare_dataset( tokenizer, dataset, dataset_text_field, max_seq_length, formatting_func ) - if dataset_text_field is not None or formatting_func is not None: - if tokenizer is None: - raise ValueError( - "You need to pass a tokenizer when using the SFT Trainer when passing a `dataset_text_field`." - ) - - return ConstantLengthDataset( + else: + return self._prepare_packed_dataloader( tokenizer, dataset, - dataset_text_field=dataset_text_field, - formatting_func=formatting_func, - seq_length=max_seq_length, - infinite=infinite, - num_of_sequences=num_of_sequences, - chars_per_token=chars_per_token, - eos_token_id=tokenizer.eos_token_id, + dataset_text_field, + max_seq_length, + num_of_sequences, + chars_per_token, + formatting_func, ) - raise ValueError( - "You need to pass a `dataset_text_field` or `formatting_func` argument to the SFTTrainer if you want to use the `ConstantLengthDataset`." - ) - def _prepare_non_packed_dataloader( - self, tokenizer, dataset, dataset_text_field, max_seq_len, formatting_func=None + self, tokenizer, dataset, dataset_text_field, max_seq_length, formatting_func=None ): use_formatting_func = formatting_func is not None and dataset_text_field is None self._dataset_sanity_checked = False @@ -351,7 +344,7 @@ def tokenize(element): element[dataset_text_field] if not use_formatting_func else formatting_func(element), truncation=True, padding=False, - max_length=max_seq_len, + max_length=max_seq_length, return_overflowing_tokens=False, return_length=False, ) @@ -376,6 +369,50 @@ def tokenize(element): return tokenized_dataset + def _prepare_packed_dataloader( + self, + tokenizer, + dataset, + dataset_text_field, + max_seq_length, + num_of_sequences, + chars_per_token, + formatting_func=None, + ): + if dataset_text_field is not None or formatting_func is not None: + if tokenizer is None: + raise ValueError("You need to pass a tokenizer when using `dataset_text_field` with `SFTTrainer`.") + + constant_length_iterator = ConstantLengthDataset( + tokenizer, + dataset, + dataset_text_field=dataset_text_field, + formatting_func=formatting_func, + seq_length=max_seq_length, + infinite=False, + num_of_sequences=num_of_sequences, + chars_per_token=chars_per_token, + eos_token_id=tokenizer.eos_token_id, + ) + + def data_generator(constant_length_iterator): + for i in constant_length_iterator: + yield i + + try: + packed_dataset = Dataset.from_generator( + data_generator, gen_kwargs={"constant_length_iterator": constant_length_iterator} + ) + except (DatasetGenerationError, SchemaInferenceError): + raise ValueError( + "Error occurred while packing the dataset. Make sure that your dataset has enough samples to at least yield one packed sequence." + ) + return packed_dataset + else: + raise ValueError( + "You need to pass a `dataset_text_field` or `formatting_func` argument to the SFTTrainer if you want to use the `ConstantLengthDataset`." + ) + def _trl_activate_neftune(self, model): r""" Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: https://arxiv.org/abs/2310.05914
diff --git a/tests/test_sft_trainer.py b/tests/test_sft_trainer.py --- a/tests/test_sft_trainer.py +++ b/tests/test_sft_trainer.py @@ -178,9 +178,21 @@ def test_sft_trainer_uncorrect_data(self): args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func, + max_seq_length=32, # make sure there is at least 1 packed sequence packing=True, ) + with self.assertRaises(ValueError): + # This should not work because not enough data for one sample + _ = SFTTrainer( + model=self.model, + args=training_args, + train_dataset=self.dummy_dataset, + formatting_func=formatting_prompts_func, + max_seq_length=1024, # make sure there is NOT at least 1 packed sequence + packing=True, + ) + # This should not work as well with self.assertRaises(ValueError): _ = SFTTrainer( @@ -191,7 +203,7 @@ def test_sft_trainer_uncorrect_data(self): packing=False, ) - # but this shpuld work + # but this should work _ = SFTTrainer( model=self.model, args=training_args,
SFTTrainer training stops early? It seems it just happen today. Has anyone met the error where the training stops early? For example, it should have run 1000 steps but stops after 300 steps, and giving a output like: `TrainOutput(global_step=100, training_loss=1.2991660499572755, metrics={'train_runtime': 855.9696, 'train_samples_per_second': 1.869, 'train_steps_per_second': 0.117, 'total_flos': 6.50352940548096e+16, 'train_loss': 1.2991660499572755, 'epoch': 0.16})` I did my training on Colab, and i don't know if it is an internet issue. But my previous training never meets this kind of issue.
2023-11-10T14:51:15
huggingface/trl
1,045
huggingface__trl-1045
[ "1044" ]
6d9ea38ae18c7e266f797b62de4a68a12a13aba4
diff --git a/trl/trainer/ppo_config.py b/trl/trainer/ppo_config.py --- a/trl/trainer/ppo_config.py +++ b/trl/trainer/ppo_config.py @@ -104,7 +104,7 @@ class PPOConfig: """Number of optimisation epochs per batch of samples""" max_grad_norm: Optional[float] = None """Maximum gradient norm for gradient clipping""" - optimize_cuda_cache: bool = False + optimize_cuda_cache: Optional[bool] = None """DEPRECATED: use `optimize_device_cache` instead, which does the same thing.""" optimize_device_cache: Optional[bool] = False """Optimize device cache for slightly more memory-efficient training"""
Deprecated option `optimize_cuda_cache` warning on import of trl When you just import trl, we get the following warning: ``` $ python3 Python 3.10.8 (main, Nov 24 2022, 14:13:03) [GCC 11.2.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import trl /opt/conda/lib/python3.10/site-packages/trl/trainer/ppo_config.py:141: UserWarning: The `optimize_cuda_cache` arguement will be deprecated soon, please use `optimize_device_cache` instead. warnings.warn( ``` Unless this is left on purpose, I believe that this line https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_config.py#L107, should be changed from: ``` optimize_cuda_cache: bool = False ``` to: ``` optimize_cuda_cache: Optional[bool] = None ``` to get rid of the warning. I can open a PR if this warning is not needed on default config creation.
Indeed, that's correct! A PR would be great, thank you!
2023-11-30T15:04:47
huggingface/trl
1,075
huggingface__trl-1075
[ "1072" ]
7d0a8eea4e01dd4d3247ea3608dec2ec8be10b34
diff --git a/trl/trainer/dpo_trainer.py b/trl/trainer/dpo_trainer.py --- a/trl/trainer/dpo_trainer.py +++ b/trl/trainer/dpo_trainer.py @@ -65,9 +65,9 @@ class DPOTrainer(Trainer): beta (`float`, defaults to 0.1): The beta factor in DPO loss. Higher beta means less divergence from the initial policy. For the IPO loss, beta is the regularization parameter denoted by tau in the paper. label_smoothing (`float`, defaults to 0): - The robust DPO label smoothing parameter that should be between 0 and 0.5. + The robust DPO label smoothing parameter from the [cDPO](https://ericmitchell.ai/cdpo.pdf) report that should be between 0 and 0.5. loss_type (`str`, defaults to `"sigmoid"`): - The type of DPO loss to use. Either `"sigmoid"` the default DPO loss,`"hinge"` loss from SLiC paper or `"ipo"` from IPO paper. + The type of DPO loss to use. Either `"sigmoid"` the default DPO loss,`"hinge"` loss from [SLiC](https://arxiv.org/abs/2305.10425) paper, `"ipo"` from [IPO](https://arxiv.org/abs/2310.12036) paper, or `"kto"` from the HALOs [report](https://github.com/ContextualAI/HALOs/blob/main/assets/report.pdf). args (`transformers.TrainingArguments`): The arguments to use for training. data_collator (`transformers.DataCollator`): @@ -123,7 +123,7 @@ def __init__( ref_model: Optional[Union[PreTrainedModel, nn.Module, str]] = None, beta: float = 0.1, label_smoothing: float = 0, - loss_type: Literal["sigmoid", "hinge", "ipo"] = "sigmoid", + loss_type: Literal["sigmoid", "hinge", "ipo", "kto"] = "sigmoid", args: TrainingArguments = None, data_collator: Optional[DataCollator] = None, label_pad_token_id: int = -100, @@ -311,7 +311,7 @@ def make_inputs_require_grad(module, input, output): self.label_pad_token_id = label_pad_token_id self.padding_value = padding_value - if loss_type in ["hinge", "ipo"] and label_smoothing > 0: + if loss_type in ["hinge", "ipo", "kto"] and label_smoothing > 0: warnings.warn( "You are using a loss type that does not support label smoothing. Ignoring label_smoothing parameter." ) @@ -465,8 +465,25 @@ def dpo_loss( elif self.loss_type == "ipo": # eqn (17) of the paper where beta is the regularization parameter for the IPO loss, denoted by tau in the paper. losses = (logits - 1 / (2 * self.beta)) ** 2 + elif self.loss_type == "kto": + # eqn (7) of the HALOs paper + chosen_KL = (policy_chosen_logps - reference_chosen_logps).mean().clamp(min=0) + rejected_KL = (policy_rejected_logps - reference_rejected_logps).mean().clamp(min=0) + + chosen_logratios = policy_chosen_logps - reference_chosen_logps + rejected_logratios = policy_rejected_logps - reference_rejected_logps + # As described in the KTO report, the KL term for chosen (rejected) is estimated using the rejected (chosen) half. + losses = torch.cat( + ( + 1 - F.sigmoid(self.beta * (chosen_logratios - rejected_KL)), + 1 - F.sigmoid(self.beta * (chosen_KL - rejected_logratios)), + ), + 0, + ) else: - raise ValueError(f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo']") + raise ValueError( + f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo', 'kto']" + ) chosen_rewards = self.beta * (policy_chosen_logps - reference_chosen_logps).detach() rejected_rewards = self.beta * (policy_rejected_logps - reference_rejected_logps).detach()
diff --git a/tests/test_dpo_trainer.py b/tests/test_dpo_trainer.py --- a/tests/test_dpo_trainer.py +++ b/tests/test_dpo_trainer.py @@ -74,7 +74,9 @@ def _init_dummy_dataset(self): # fmt: on return Dataset.from_dict(dummy_dataset_dict) - @parameterized.expand([["gpt2", "sigmoid"], ["t5", "hinge"], ["gpt2", "ipo"], ["t5", "ipo"]]) + @parameterized.expand( + [["gpt2", "sigmoid"], ["t5", "hinge"], ["gpt2", "ipo"], ["t5", "ipo"], ["gpt2", "kto"], ["t5", "kto"]] + ) def test_dpo_trainer(self, name, loss_type): with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments(
Implement KTO https://github.com/ContextualAI/HALOs/blob/main/assets/report.pdf
2023-12-08T10:18:37
huggingface/trl
1,081
huggingface__trl-1081
[ "1032" ]
94fa4b022b5078374f005b5bfcf0fea8c810c311
diff --git a/examples/research_projects/stack_llama_2/scripts/sft_llama2.py b/examples/research_projects/stack_llama_2/scripts/sft_llama2.py --- a/examples/research_projects/stack_llama_2/scripts/sft_llama2.py +++ b/examples/research_projects/stack_llama_2/scripts/sft_llama2.py @@ -4,12 +4,11 @@ from typing import Optional import torch -import tyro from accelerate import Accelerator from datasets import load_dataset from peft import AutoPeftModelForCausalLM, LoraConfig from tqdm import tqdm -from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TrainingArguments +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments from trl import SFTTrainer from trl.import_utils import is_xpu_available @@ -19,7 +18,6 @@ @dataclass class ScriptArguments: model_name: Optional[str] = field(default="meta-llama/Llama-2-7b-hf", metadata={"help": "the model name"}) - dataset_name: Optional[str] = field(default="lvwerra/stack-exchange-paired", metadata={"help": "the dataset name"}) subset: Optional[str] = field(default="data/finetune", metadata={"help": "the subset to use"}) split: Optional[str] = field(default="train", metadata={"help": "the split to use"}) @@ -28,52 +26,31 @@ class ScriptArguments: shuffle_buffer: Optional[int] = field(default=5000, metadata={"help": "the shuffle buffer size"}) seq_length: Optional[int] = field(default=1024, metadata={"help": "the sequence length"}) num_workers: Optional[int] = field(default=4, metadata={"help": "the number of workers"}) - - training_args: TrainingArguments = field( - default_factory=lambda: TrainingArguments( - output_dir="./results", - max_steps=500, - logging_steps=10, - save_steps=10, - per_device_train_batch_size=4, - per_device_eval_batch_size=1, - gradient_accumulation_steps=2, - gradient_checkpointing=False, - group_by_length=False, - learning_rate=1e-4, - lr_scheduler_type="cosine", - warmup_steps=100, - weight_decay=0.05, - optim="paged_adamw_32bit", - bf16=True, - remove_unused_columns=False, - run_name="sft_llama2", - report_to="wandb", - ) - ) - packing: Optional[bool] = field(default=True, metadata={"help": "whether to use packing for SFTTrainer"}) - peft_config: LoraConfig = field( - default_factory=lambda: LoraConfig( - r=8, - lora_alpha=16, - lora_dropout=0.05, - target_modules=["q_proj", "v_proj"], - bias="none", - task_type="CAUSAL_LM", - ) - ) - - -script_args = tyro.cli(ScriptArguments) + # LoraConfig + lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"}) + lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"}) + lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"}) + + +parser = HfArgumentParser((ScriptArguments, TrainingArguments)) +script_args, training_args = parser.parse_args_into_dataclasses() +peft_config = LoraConfig( + r=script_args.lora_r, + lora_alpha=script_args.lora_alpha, + lora_dropout=script_args.lora_dropout, + target_modules=["q_proj", "v_proj"], + bias="none", + task_type="CAUSAL_LM", +) -if script_args.training_args.group_by_length and script_args.packing: +if training_args.group_by_length and script_args.packing: raise ValueError("Cannot use both packing and group by length") # `gradient_checkpointing` was True by default until `1f3314`, but it's actually not used. # `gradient_checkpointing=True` will cause `Variable._execution_engine.run_backward`. -if script_args.training_args.gradient_checkpointing: +if training_args.gradient_checkpointing: raise ValueError("gradient_checkpointing not supported") @@ -171,14 +148,11 @@ def create_datasets(tokenizer, args): ) base_model.config.use_cache = False -peft_config = script_args.peft_config tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training -training_args = script_args.training_args - train_dataset, eval_dataset = create_datasets(tokenizer, script_args) trainer = SFTTrainer( @@ -192,9 +166,9 @@ def create_datasets(tokenizer, args): args=training_args, ) trainer.train() -trainer.save_model(script_args.training_args.output_dir) +trainer.save_model(training_args.output_dir) -output_dir = os.path.join(script_args.training_args.output_dir, "final_checkpoint") +output_dir = os.path.join(training_args.output_dir, "final_checkpoint") trainer.model.save_pretrained(output_dir) # Free memory for merging weights @@ -207,5 +181,5 @@ def create_datasets(tokenizer, args): model = AutoPeftModelForCausalLM.from_pretrained(output_dir, device_map="auto", torch_dtype=torch.bfloat16) model = model.merge_and_unload() -output_merged_dir = os.path.join(script_args.training_args.output_dir, "final_merged_checkpoint") +output_merged_dir = os.path.join(training_args.output_dir, "final_merged_checkpoint") model.save_pretrained(output_merged_dir, safe_serialization=True)
Error when run `trl/examples/research_projects/stack_llama_2/scripts /sft_llama2.py` I tried to run the stack_llama_2 project and encountered the following error. The command I used: ``` accelerate launch sft_llama2.py --training_args.output_dir="sft" ``` The error is as folloing: ``` [2023-11-24 03:28:10,125] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) /usr/local/lib/python3.10/dist-packages/trl/trainer/ppo_config.py:141: UserWarning: The `optimize_cuda_cache` arguement will be deprecated soon, please use `optimize_device_cache` instead. warnings.warn( /usr/local/lib/python3.10/dist-packages/tyro/_resolver.py:311: UserWarning: <class 'int'> does not match any type in Union: [<class 'float'>, <class 'NoneType'>] warnings.warn( /usr/local/lib/python3.10/dist-packages/tyro/_resolver.py:311: UserWarning: <class 'dict'> does not match any type in Union: [<class 'str'>, <class 'NoneType'>] warnings.warn( Traceback (most recent call last): File "/home/trl/examples/research_projects/stack_llama_2/scripts/sft_llama2.py", line 69, in <module> script_args = tyro.cli(ScriptArguments) File "/usr/local/lib/python3.10/dist-packages/tyro/_cli.py", line 187, in cli output = _cli_impl( File "/usr/local/lib/python3.10/dist-packages/tyro/_cli.py", line 374, in _cli_impl parser_spec = _parsers.ParserSpecification.from_callable_or_type( File "/usr/local/lib/python3.10/dist-packages/tyro/_parsers.py", line 106, in from_callable_or_type field_out = handle_field( File "/usr/local/lib/python3.10/dist-packages/tyro/_parsers.py", line 320, in handle_field return ParserSpecification.from_callable_or_type( File "/usr/local/lib/python3.10/dist-packages/tyro/_parsers.py", line 106, in from_callable_or_type field_out = handle_field( File "/usr/local/lib/python3.10/dist-packages/tyro/_parsers.py", line 312, in handle_field if _fields.is_nested_type(field.type_or_callable, field.default): File "/usr/local/lib/python3.10/dist-packages/tyro/_unsafe_cache.py", line 33, in wrapped_f out = f(*args, **kwargs) File "/usr/local/lib/python3.10/dist-packages/tyro/_fields.py", line 224, in is_nested_type _try_field_list_from_callable(typ, default_instance), File "/usr/local/lib/python3.10/dist-packages/tyro/_fields.py", line 348, in _try_field_list_from_callable return _field_list_from_dict(f, default_instance) File "/usr/local/lib/python3.10/dist-packages/tyro/_fields.py", line 734, in _field_list_from_dict if default_instance in MISSING_SINGLETONS or len(cast(dict, default_instance)) == 0: TypeError: object of type 'NoneType' has no len() Traceback (most recent call last): File "/usr/local/bin/accelerate", line 8, in <module> sys.exit(main()) File "/usr/local/lib/python3.10/dist-packages/accelerate/commands/accelerate_cli.py", line 47, in main args.func(args) File "/usr/local/lib/python3.10/dist-packages/accelerate/commands/launch.py", line 994, in launch_command simple_launcher(args) File "/usr/local/lib/python3.10/dist-packages/accelerate/commands/launch.py", line 636, in simple_launcher raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) subprocess.CalledProcessError: Command '['/usr/bin/python', 'sft_llama2.py', '--training_args.output_dir=sft']' returned non-zero exit status 1. ``` It seems that line 69 of sft_llama2.py `script_args = tyro.cli(ScriptArguments)` leads to the error `TypeError: object of type 'NoneType' has no len()`. I am not familiar with tyro, could anyone help me with this problem? Any suggestions would be appreciated!
cc @vwxyzjn Please update the `transformers` (`pip install --upgrade transformers`). This is a duplicate issue of https://github.com/huggingface/trl/pull/906#issuecomment-1815312980. I still have no idea why to import 'tyro'... It throws another error: File "/home/nus_ids_user3/Projects/ultramarine/src/sft_train.py", line 71, in <module> script_args = tyro.cli(ScriptArguments) File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/site-packages/tyro/_cli.py", line 187, in cli output = _cli_impl( File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/site-packages/tyro/_cli.py", line 374, in _cli_impl parser_spec = _parsers.ParserSpecification.from_callable_or_type( File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/site-packages/tyro/_parsers.py", line 110, in from_callable_or_type field_out = handle_field( File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/site-packages/tyro/_parsers.py", line 322, in handle_field if _fields.is_nested_type(field.type_or_callable, field.default): File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/site-packages/tyro/_unsafe_cache.py", line 33, in wrapped_f out = f(*args, **kwargs) File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/site-packages/tyro/_fields.py", line 249, in is_nested_type _try_field_list_from_callable(typ, default_instance), File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/site-packages/tyro/_fields.py", line 412, in _try_field_list_from_callable return field_list_from_class(cls, default_instance) File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/site-packages/tyro/_fields.py", line 559, in _field_list_from_dataclass for dc_field in filter(lambda field: field.init, _resolver.resolved_fields(cls)): File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/site-packages/tyro/_unsafe_cache.py", line 33, in wrapped_f out = f(*args, **kwargs) File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/site-packages/tyro/_resolver.py", line 97, in resolved_fields annotations = get_type_hints(cls, include_extras=True) File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/site-packages/typing_extensions.py", line 1266, in get_type_hints hint = typing.get_type_hints( File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/typing.py", line 1459, in get_type_hints value = _eval_type(value, base_globals, localns) File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/typing.py", line 292, in _eval_type return t._evaluate(globalns, localns, recursive_guard) File "/mnt/dataDisk1/miniconda3/envs/mercury/lib/python3.9/typing.py", line 554, in _evaluate eval(self.__forward_code__, globalns, localns), File "<string>", line 1, in <module> TypeError: unsupported operand type(s) for |: 'type' and '_LiteralGenericAlias'
2023-12-11T16:10:44
huggingface/trl
1,125
huggingface__trl-1125
[ "1107" ]
2aff709144fab147e35a8591dc0985ce42a04ab0
diff --git a/trl/trainer/dpo_trainer.py b/trl/trainer/dpo_trainer.py --- a/trl/trainer/dpo_trainer.py +++ b/trl/trainer/dpo_trainer.py @@ -490,6 +490,9 @@ def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoa name="reference_rejected_logps", column=all_reference_rejected_logps ) + # Save calculated reference_chosen_logps and reference_rejected_logps to the eval_dataset for subsequent runs + if self.eval_dataset is not None: + self.eval_dataset = eval_dataset self._precomputed_eval_ref_log_probs = True return super().get_eval_dataloader(eval_dataset=eval_dataset)
Eval dataset issue in DPOTrainer when precompute_ref_log_probs=True and ref_model=None when `precompute_ref_log_probs=True`, `reference_chosen_logps` and `reference_rejected_logps` was not saved to `self.eval_dataset`. When `ref_model=None`, subsequent evaluations will use `self.model` to recalculate, resulting in eval/acc is always zero (because the policy and reference are using the same model). https://github.com/huggingface/trl/blob/d708ec272f292ded00a4d0f14b5bd214741b87f3/trl/trainer/dpo_trainer.py#L448 Perhaps it should be modified like this: ```python def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: """ Returns the evaluation [`~torch.utils.data.DataLoader`]. Subclass of transformers.src.transformers.trainer.get_eval_dataloader to precompute `ref_log_probs`. Args: eval_dataset (`torch.utils.data.Dataset`, *optional*): If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. It must implement `__len__`. """ if eval_dataset is None and self.eval_dataset is None: raise ValueError("Trainer: evaluation requires an eval_dataset.") eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset if self.precompute_ref_log_probs and not self._precomputed_eval_ref_log_probs: dataloader_params = { "batch_size": self.args.per_device_eval_batch_size, "collate_fn": self.data_collator, "num_workers": self.args.dataloader_num_workers, "pin_memory": self.args.dataloader_pin_memory, "shuffle": False, } # prepare dataloader data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params)) reference_chosen_logps = [] reference_rejected_logps = [] for padded_batch in tqdm(iterable=data_loader, desc="Eval dataset reference log probs"): reference_chosen_logp, reference_rejected_logp = self.compute_reference_log_probs(padded_batch) reference_chosen_logp, reference_rejected_logp = self.accelerator.gather_for_metrics( (reference_chosen_logp, reference_rejected_logp) ) reference_chosen_logps.append(reference_chosen_logp.cpu()) reference_rejected_logps.append(reference_rejected_logp.cpu()) all_reference_chosen_logps = torch.cat(reference_chosen_logps).float().numpy() all_reference_rejected_logps = torch.cat(reference_rejected_logps).float().numpy() eval_dataset = eval_dataset.add_column(name="reference_chosen_logps", column=all_reference_chosen_logps) eval_dataset = eval_dataset.add_column( name="reference_rejected_logps", column=all_reference_rejected_logps ) #### Save calculated reference_chosen_logps and reference_rejected_logps ##### if self.eval_dataset is not None: self.eval_dataset = eval_dataset self._precomputed_eval_ref_log_probs = True return super().get_eval_dataloader(eval_dataset=eval_dataset) ```
tagging @kashif here :) @Sanster so i had assumed `super().get_eval_dataloader(eval_dataset=eval_dataset)` would then set the dataset... so you are saying that is not the case? ah no I see its because in the trainer we use the `self.eval_dataset` right? great catch @Sanster thanks!
2023-12-21T16:01:37
huggingface/trl
1,152
huggingface__trl-1152
[ "1149" ]
fa074e6a1565fb6dc4dc0035805a763c64b33c3d
diff --git a/trl/trainer/utils.py b/trl/trainer/utils.py --- a/trl/trainer/utils.py +++ b/trl/trainer/utils.py @@ -304,6 +304,12 @@ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: to_pad = [torch.LongTensor(ex[k]) for ex in features] if (k.startswith("prompt")) and (k.endswith("input_ids")): + if self.pad_token_id is None: + raise ValueError( + "Padding is enabled, but the tokenizer is not configured with a padding token." + " Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`)" + " before calling the trainer." + ) padding_value = self.pad_token_id elif k.endswith("_attention_mask"): padding_value = 0 @@ -319,6 +325,12 @@ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: else: to_pad = [torch.LongTensor(ex[k]) for ex in features] if k.endswith("_input_ids"): + if self.pad_token_id is None: + raise ValueError( + "Padding is enabled, but the tokenizer is not configured with a padding token." + " Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`)" + " before calling the trainer." + ) padding_value = self.pad_token_id elif k.endswith("_labels"): padding_value = self.label_pad_token_id
diff --git a/tests/test_dpo_trainer.py b/tests/test_dpo_trainer.py --- a/tests/test_dpo_trainer.py +++ b/tests/test_dpo_trainer.py @@ -228,6 +228,41 @@ def test_dpo_trainer_without_providing_ref_model_with_lora(self): if param.sum() != 0: self.assertFalse(torch.equal(param, new_param)) + def test_dpo_trainer_padding_token_is_none(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = TrainingArguments( + output_dir=tmp_dir, + per_device_train_batch_size=2, + max_steps=3, + remove_unused_columns=False, + gradient_accumulation_steps=1, + learning_rate=9e-1, + evaluation_strategy="steps", + ) + + dummy_dataset = self._init_dummy_dataset() + + tokenizer = AutoTokenizer.from_pretrained(self.model_id) + tokenizer.pad_token = None + + with self.assertRaisesRegex( + ValueError, + expected_regex=r"Padding is enabled, but the tokenizer is not configured with a padding token." + r" Explicitly set `tokenizer.pad_token` \(e.g. `tokenizer.pad_token = tokenizer.eos_token`\)" + r" before calling the trainer.", + ): + trainer = DPOTrainer( + model=self.model, + ref_model=None, + beta=0.1, + args=training_args, + tokenizer=tokenizer, + train_dataset=dummy_dataset, + eval_dataset=dummy_dataset, + ) + + trainer.train() + @require_no_wandb def test_dpo_trainer_generate_during_eval_no_wandb(self): with tempfile.TemporaryDirectory() as tmp_dir:
Suggestion (DPO): throw an error/warn if `tokenizer.pad_token` is `None` ## Description trl/trainer/utils.py uses `tokenizer.pad_token` to pad the input sequence. This is problematic when `tokenizer.pad_token` is `None`, which results in a rather cryptic `TypeError`: https://github.com/huggingface/trl/blob/911d3658e23786740c29b8042474bc05ce20b466/trl/trainer/utils.py#L314-L315 https://github.com/huggingface/trl/blob/911d3658e23786740c29b8042474bc05ce20b466/trl/trainer/utils.py#L323 ``` ... [/usr/local/lib/python3.10/dist-packages/trl/trainer/utils.py](https://localhost:8080/#) in __call__(self, features) 320 raise ValueError(f"Unexpected key in batch '{k}'") 321 --> 322 padded_batch[k] = pad_sequence(to_pad, batch_first=True, padding_value=padding_value) 323 # for the prompt, flip back so padding is on left side 324 if "prompt" in k: [/usr/local/lib/python3.10/dist-packages/torch/nn/utils/rnn.py](https://localhost:8080/#) in pad_sequence(sequences, batch_first, padding_value) 398 # assuming trailing dimensions and type of all the Tensors 399 # in sequences are same and fetching those from sequences[0] --> 400 return torch._C._nn.pad_sequence(sequences, batch_first, padding_value) 401 402 TypeError: pad_sequence(): argument 'padding_value' (position 3) must be float, not NoneType ``` Perhaps checking whether `tokenizer.pad_token == None` beforehand and raising an error would help users debug the issue. Any thoughts on this suggestion? Thanks!
Sounds good to me, feel free to open a PR! :)
2023-12-28T12:02:57
huggingface/trl
1,174
huggingface__trl-1174
[ "1122" ]
20428c48ba651b2bc11b2a73e9eb9568b1af3f96
diff --git a/trl/trainer/sft_trainer.py b/trl/trainer/sft_trainer.py --- a/trl/trainer/sft_trainer.py +++ b/trl/trainer/sft_trainer.py @@ -402,7 +402,11 @@ def tokenize(element): else: self._dataset_sanity_checked = True - return {"input_ids": outputs["input_ids"], "attention_mask": outputs["attention_mask"]} + return { + "input_ids": outputs["input_ids"], + "labels": outputs["input_ids"], + "attention_mask": outputs["attention_mask"], + } tokenized_dataset = dataset.map( tokenize, diff --git a/trl/trainer/utils.py b/trl/trainer/utils.py --- a/trl/trainer/utils.py +++ b/trl/trainer/utils.py @@ -452,6 +452,7 @@ def __iter__(self): yield { "input_ids": torch.LongTensor(example), "labels": torch.LongTensor(example), + "attention_mask": torch.ones(len(example)), }
`_prepare_packed_dataloader` and `_prepare_non_packed_dataloader` have inconsistent keys `_prepare_packed_dataloader` outputs a `ConstantLengthDataset` with [keys](https://github.com/huggingface/trl/blob/830cadfc4c80bdced0d3753de392070e6760d1f5/trl/trainer/utils.py#L450) ``` 'input_ids`, 'labels' ``` `_prepare_non_packed_dataloader` outputs some type of dataset, with [keys](https://github.com/huggingface/trl/blob/830cadfc4c80bdced0d3753de392070e6760d1f5/trl/trainer/sft_trainer.py#L393) ``` 'input_ids`, 'attention_mask' ``` The lack of `'labels'` in the latter might be source of confusing bugs. If this is intended behavior, I would at least suggest to post a warning in the documentation of the argument `packing` of `SFTTrainer`
I agree it's a bit confusing but it shouldn't make a difference in practice: the `labels` in the first case are identical to `input_ids` which is the default value filled also when no `labels` are provided in the second case. Similarly for the `attention_mask` in the second case. If you indeed encountered weird behaviour let us know so we can fix. I think we can refactor this in the future to be a bit more coherent. cc @younesbelkada An example of undesired (or, at least, confusing) behavior is the following: ``` def do(packing, collator): # some code to prepare the dataset etc.... trainer = SFTTrainer( model=my_model, train_dataset=my_dataset, eval_dataset=eval_dataset, packing=packing, dataset_text_field='text', data_collator=collator, ) trainer.train() do(True, None) # works do(True, DefaultDataCollator()) # works do(False, None) # works do(False, DefaultDataCollator()) # does not work ``` Even just a warning not to use `DefaultDataCollator` I guess should be enough. By `does not work` I mean that the model only gets `input_ids` and not `labels`. Indeed, that's not ideal. I'll have a look at it hopefully soon, but if you find time in the meantime feel free to open a PR!
2024-01-03T17:51:16
huggingface/trl
1,177
huggingface__trl-1177
[ "1103" ]
be32d304db6ad7d19028c7359a0a91e1dd0af066
diff --git a/trl/trainer/ppo_trainer.py b/trl/trainer/ppo_trainer.py --- a/trl/trainer/ppo_trainer.py +++ b/trl/trainer/ppo_trainer.py @@ -1320,10 +1320,26 @@ def log_stats( rewards (`List[torch.FloatTensor]`): A tensor of rewards. """ + + # all gather stats if not isinstance(rewards, torch.Tensor): rewards = torch.tensor(rewards).to(self.current_device) rewards = self.accelerator.gather(rewards).flatten() + if self.config.log_with == "wandb": + import wandb + + if any([column_to_log not in batch.keys() for column_to_log in columns_to_log]): + raise ValueError(f"Columns to log {columns_to_log} are not present in the batch {batch.keys()}.") + + batch_list = [batch[column_to_log] for column_to_log in columns_to_log] + if self.is_distributed: + gathered_batch_list = [] + for b in batch_list: + flattened = gather_object(b) + gathered_batch_list.append(flattened) + batch_list = gathered_batch_list + # Log only if we are in the main process if self.accelerator.is_main_process: logs = {} @@ -1336,20 +1352,6 @@ def log_stats( "'response'. " ) elif self.config.log_with == "wandb": - import wandb - - if any([column_to_log not in batch.keys() for column_to_log in columns_to_log]): - raise ValueError(f"Columns to log {columns_to_log} are not present in the batch {batch.keys()}.") - - batch_list = [batch[column_to_log] for column_to_log in columns_to_log] - if self.is_distributed: - self.accelerator.wait_for_everyone() - gathered_batch_list = [] - for batch in batch_list: - flattened = gather_object(batch) - gathered_batch_list.append(flattened) - batch_list = gathered_batch_list - table_rows = [list(r) for r in zip(*batch_list, rewards.cpu().tolist())] logs.update({"game_log": wandb.Table(columns=[*columns_to_log, "reward"], rows=table_rows)})
PPO script hangs when logging to wandb in multi-gpu environments ## Description The ppo script hangs when logging to wandb in a multi-gpu setup. But, it works fine without wandb. ## Potential diagnosis It is caused when calling `log_stats` in [here](https://github.com/huggingface/trl/blob/d708ec272f292ded00a4d0f14b5bd214741b87f3/examples/scripts/ppo.py#L212), which probably triggers some error [here](https://github.com/huggingface/trl/blob/d708ec272f292ded00a4d0f14b5bd214741b87f3/trl/trainer/ppo_trainer.py#L1342) when the condition is true. It could be related to 481ef96293d9ecc68acc636287ce97489f1d23d4. ## Code to reproduce I am testing it on a machine with two 3090 GPUs. Packages: ``` accelerate==0.25.0 peft @ git+https://github.com/huggingface/peft@9c70468a3c9efcd1aadc106642f59d38fccb8a5c torch==2.1.0 transformers @ git+https://github.com/huggingface/transformers@df40edfb00715880f7432899b7a854aad7ae39d9 trl @ git+https://github.com/huggingface/trl@f06f357e9cb6618ed28bea547b0ef1014b18f38b wandb==0.16.1 ``` Command: `accelerate launch --main_process_port "${RANDOMPORT}" --multi_gpu --num_processes 2 main.py` Code: ```py from dataclasses import dataclass, field from typing import Optional import torch import tyro from accelerate import Accelerator from datasets import load_dataset from peft import LoraConfig from tqdm import tqdm from transformers import AutoTokenizer, pipeline from trl import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, PPOConfig, PPOTrainer, set_seed from trl.core import LengthSampler from trl.import_utils import is_xpu_available tqdm.pandas() @dataclass class ScriptArguments: ppo_config: PPOConfig = field( default_factory=lambda: PPOConfig( model_name="lvwerra/gpt2-imdb", query_dataset="imdb", reward_model="sentiment-analysis:lvwerra/distilbert-imdb", learning_rate=1.41e-5, log_with='wandb', mini_batch_size=1, batch_size=2, gradient_accumulation_steps=2, early_stopping=False, target_kl=6.0, kl_penalty="kl", seed=0, use_score_scaling=False, use_score_norm=False, score_clip=None, ) ) use_seq2seq: bool = False """whether to use seq2seq models""" use_peft: bool = True """whether to use peft""" peft_config: Optional[LoraConfig] = field( default_factory=lambda: LoraConfig( r=16, lora_alpha=16, bias="none", task_type="CAUSAL_LM", ), ) trust_remote_code: bool = field(default=True, metadata={"help": "Enable `trust_remote_code`"}) args = tyro.cli(ScriptArguments) # We then define the arguments to pass to the sentiment analysis pipeline. # We set `return_all_scores` to True to get the sentiment score for each token. sent_kwargs = {"return_all_scores": True, "function_to_apply": "none", "batch_size": 16} trl_model_class = AutoModelForCausalLMWithValueHead if not args.use_seq2seq else AutoModelForSeq2SeqLMWithValueHead # Below is an example function to build the dataset. In our case, we use the IMDB dataset # from the `datasets` library. One should customize this function to train the model on # its own dataset. def build_dataset(config, query_dataset, input_min_text_length=2, input_max_text_length=8): """ Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: query_dataset (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset. """ tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token # load imdb with datasets ds = load_dataset(query_dataset, split="train") ds = ds.rename_columns({"text": "review"}) ds = ds.filter(lambda x: len(x["review"]) > 200, batched=False) input_size = LengthSampler(input_min_text_length, input_max_text_length) def tokenize(sample): sample["input_ids"] = tokenizer.encode(sample["review"])[: input_size()] sample["query"] = tokenizer.decode(sample["input_ids"]) return sample ds = ds.map(tokenize, batched=False) ds.set_format(type="torch") return ds # We retrieve the dataloader by calling the `build_dataset` function. dataset = build_dataset(args.ppo_config, args.ppo_config.query_dataset) def collator(data): return dict((key, [d[key] for d in data]) for key in data[0]) # set seed before initializing value head for deterministic eval set_seed(args.ppo_config.seed) # Now let's build the model, the reference model, and the tokenizer. if not args.use_peft: ref_model = trl_model_class.from_pretrained(args.ppo_config.model_name, trust_remote_code=args.trust_remote_code) device_map = None peft_config = None else: peft_config = args.peft_config ref_model = None # Copy the model to each device device_map = {"": Accelerator().local_process_index} model = trl_model_class.from_pretrained( args.ppo_config.model_name, trust_remote_code=args.trust_remote_code, device_map=device_map, peft_config=peft_config, ) tokenizer = AutoTokenizer.from_pretrained(args.ppo_config.model_name) # Some tokenizers like GPT-2's don't have a padding token by default, so we set one here. tokenizer.pad_token_id = tokenizer.eos_token_id # We then build the PPOTrainer, passing the model, the reference model, the tokenizer ppo_trainer = PPOTrainer(args.ppo_config, model, ref_model, tokenizer, dataset=dataset, data_collator=collator) # We then build the sentiment analysis pipeline, passing the model name and the # sentiment analysis pipeline arguments. Let's also make sure to set the device # to the same device as the PPOTrainer. device = ppo_trainer.accelerator.device if ppo_trainer.accelerator.num_processes == 1: if is_xpu_available(): device = "xpu:0" else: device = 0 if torch.cuda.is_available() else "cpu" # to avoid a `pipeline` bug ds_plugin = ppo_trainer.accelerator.state.deepspeed_plugin task, model_name = args.ppo_config.reward_model.split(":") if ds_plugin is not None and ds_plugin.is_zero3_init_enabled(): with ds_plugin.zero3_init_context_manager(enable=False): sentiment_pipe = pipeline(task, model=model_name, device=device) else: sentiment_pipe = pipeline(task, model=model_name, device=device) # Some tokenizers like GPT-2's don't have a padding token by default, so we set one here. if sentiment_pipe.tokenizer.pad_token_id is None: sentiment_pipe.tokenizer.pad_token_id = tokenizer.pad_token_id if sentiment_pipe.model.config.pad_token_id is None: sentiment_pipe.model.config.pad_token_id = tokenizer.pad_token_id # We then define the arguments to pass to the `generate` function. These arguments # are passed to the `generate` function of the PPOTrainer, which is a wrapper around # the `generate` function of the trained model. generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, "max_new_tokens": 32, } for epoch, batch in enumerate(tqdm(ppo_trainer.dataloader)): query_tensors = batch["input_ids"] # Get response from gpt2 response_tensors, ref_response_tensors = ppo_trainer.generate( query_tensors, return_prompt=False, generate_ref_response=True, **generation_kwargs ) batch["response"] = tokenizer.batch_decode(response_tensors) batch["ref_response"] = tokenizer.batch_decode(ref_response_tensors) # Compute sentiment score texts = [q + r for q, r in zip(batch["query"], batch["response"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) rewards = [torch.tensor(output[1]["score"]) for output in pipe_outputs] ref_texts = [q + r for q, r in zip(batch["query"], batch["ref_response"])] ref_pipe_outputs = sentiment_pipe(ref_texts, **sent_kwargs) ref_rewards = [torch.tensor(output[1]["score"]) for output in ref_pipe_outputs] batch["ref_rewards"] = ref_rewards # Run PPO step stats = ppo_trainer.step(query_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards, columns_to_log=["query", "response", "ref_response", "ref_rewards"]) ```
Can you identify where exactly it hangs? Maybe @edbeeching or @vwxyzjn have tested this in #850. I have faced the same issue, it stuck at https://github.com/huggingface/trl/blob/20428c48ba651b2bc11b2a73e9eb9568b1af3f96/trl/trainer/ppo_trainer.py#L1347-L1351 I put a ValueError after L1346 and it does raise the error I put a ValueError after L1351 and it never raises the error Thanks! cc @lvwerra Thanks for raising the issue. I can reproduce this issue. I think the problem is `gather_object(batch)` function call. I remember it hangs with nested inputs, so I changed it to deal with flattened inputs, not sure why it stopped working again... Investigating further.
2024-01-04T16:26:38
huggingface/trl
1,180
huggingface__trl-1180
[ "1161" ]
ec3d41b8797d6b7e389b07c222f42961ad2f4188
diff --git a/trl/trainer/ppo_trainer.py b/trl/trainer/ppo_trainer.py --- a/trl/trainer/ppo_trainer.py +++ b/trl/trainer/ppo_trainer.py @@ -733,11 +733,11 @@ def step( active_full_logprobs = logprobs_from_logits(logits_or_none, None, gather=False) ref_full_logprobs = logprobs_from_logits(ref_logits_or_none, None, gather=False) - rewards, non_score_reward = self.compute_rewards( + rewards, non_score_reward, kls = self.compute_rewards( scores, active_full_logprobs, ref_full_logprobs, masks ) else: - rewards, non_score_reward = self.compute_rewards(scores, all_logprobs, ref_logprobs, masks) + rewards, non_score_reward, kls = self.compute_rewards(scores, all_logprobs, ref_logprobs, masks) timing["time/ppo/compute_rewards"] = time.time() - t t = time.time() @@ -831,6 +831,7 @@ def step( masks=masks, queries=queries, responses=responses, + kls=kls, ) # Gather/Reduce stats from all processes if self.is_distributed: @@ -1091,11 +1092,17 @@ def compute_rewards( Log probabilities of the model, shape (`batch_size`, `response_length`) ref_logprobs (`torch.FloatTensor`): Log probabilities of the reference model, shape (`batch_size`, `response_length`) + + Returns: + `torch.FloatTensor`: Per token rewards, shape (`batch_size`, `response_length`) + `torch.FloatTensor`: Non score rewards, shape (`batch_size`, `response_length`) + `torch.FloatTensor`: KL penalty, shape (`batch_size`, `response_length`) """ - rewards, non_score_rewards = [], [] + rewards, non_score_rewards, kls = [], [], [] for score, logprob, ref_logprob, mask in zip(scores, logprobs, ref_logprobs, masks): # compute KL penalty (from difference in logprobs) kl = self._kl_penalty(logprob, ref_logprob) + kls.append(kl) non_score_reward = -self.kl_ctl.value * kl non_score_rewards.append(non_score_reward) reward = non_score_reward.clone() @@ -1104,7 +1111,7 @@ def compute_rewards( # reward is preference model score + KL penalty reward[last_non_masked_index] += score rewards.append(reward) - return torch.stack(rewards), torch.stack(non_score_rewards) + return torch.stack(rewards), torch.stack(non_score_rewards), torch.stack(kls) def _kl_penalty(self, logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor) -> torch.FloatTensor: if self.config.kl_penalty == "kl": @@ -1256,7 +1263,8 @@ def record_step_stats(self, kl_coef: float, **data): """ mask = data.pop("masks") - kl_list = ((data["logprobs"] - data["ref_logprobs"]) * mask).sum(axis=-1) + kls = data.pop("kls") + kl_list = ((kls) * mask).sum(axis=-1) mean_kl = kl_list.mean() mean_entropy = (-data["logprobs"] * mask).sum(axis=-1).mean()
diff --git a/tests/test_ppo_trainer.py b/tests/test_ppo_trainer.py --- a/tests/test_ppo_trainer.py +++ b/tests/test_ppo_trainer.py @@ -579,7 +579,7 @@ def test_loss_trainer(self): logits = torch.exp(all_logprobs) vpreds = values + 0.1 - score, non_score = ppo_trainer.compute_rewards(dummy_scores, all_logprobs, ref_logprobs, mask) + score, non_score, kls = ppo_trainer.compute_rewards(dummy_scores, all_logprobs, ref_logprobs, mask) values, advantages, returns = ppo_trainer.compute_advantages(values, score, mask) # just make sure a dummy loss is computed
Negative KL warning even if KL='abs' and KL='full' I'm using PPOTrainer to RLHF a llama2 model, and I sometimes get negative KL in my experiments. I've noticed that PPOTrainer prints a warning about that, even if I set `kl_penalty="abs"` or `kl_penalty="full"`, even though (by my understanding) it should be impossible to get negative KL in those cases. Two questions: * Checking the code, it looks like the KL divergence that's reported to wandb, and that is also used to determine whether the "negative KL" warning is printed, is calculated separately from the KL that is used for training; and the reported KL is always using the standard estimate. Did I understand that correctly, and does that mean I can ignore the "negative KL" warning if I'm using "abs" or "full" KL? (I can see that the non-score reward is non-positive if I use abs or full KL, and if I'm reading the code correctly the does indicate that the KL used in training is likely positive.) * Conceptually, is there any issue or drawback with using "abs" or "full" instead of "kl"? Thank you so much!
Indeed, that might be a bug, would you like to open a PR to fix it? In general `full` should be fine but we have observed some issues with the KL becoming negative and then `abs` is safer. > Indeed, that might be a bug, would you like to open a PR to fix it? In general `full` should be fine but we have observed some issues with the KL becoming negative and then `abs` is safer. Yes, happy to open a PR! Give me a few days. And just to make sure my understanding of everything is correct: - With `kl` and negative KL, the real issue is that the negative KL leads to a positive non-score reward, and that opens up a loophole where the algorithm can get almost arbitrarily good returns just by driving down the KL into large-magnitude negative numbers. - Negative KL with `kl` means that the log-prob of the chosen tokens were on average less than the log-probs of those same tokens in the reference model. - That in itself (log-probs of chosen tokens being less than in the ref model) isn't necessarily an issue. It probably just means that the 'peak' of the token probability distribution decreased, i.e. the probability weight was spread out more across tokens, i.e. entropy increased. That could be indicative of an issue depending on what you're trying to do, but it doesn't *have* to be a problem. Is my intuition here roughly in line with how you are thinking about this? And as an aside, can you get negative `full` KL? I thought KL by definition is non-negative; or is `full` still just an estimate pf the real KL? Thank you so much!
2024-01-05T05:58:40
huggingface/trl
1,185
huggingface__trl-1185
[ "1184" ]
ec3d41b8797d6b7e389b07c222f42961ad2f4188
diff --git a/trl/trainer/utils.py b/trl/trainer/utils.py --- a/trl/trainer/utils.py +++ b/trl/trainer/utils.py @@ -176,6 +176,13 @@ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> D ) batch["labels"][i, :] = self.ignore_index + if ( + len(human_token_ids_idxs) > 0 + and len(response_token_ids_idxs) > 0 + and human_token_ids_idxs[0] > response_token_ids_idxs[0] + ): + human_token_ids_idxs = [0] + human_token_ids_idxs + for idx, (start, end) in enumerate(zip(human_token_ids_idxs, response_token_ids_idxs)): # Make pytorch loss function ignore all non response tokens if idx != 0:
diff --git a/tests/test_data_collator_completion_only.py b/tests/test_data_collator_completion_only.py --- a/tests/test_data_collator_completion_only.py +++ b/tests/test_data_collator_completion_only.py @@ -31,11 +31,14 @@ def test_data_collator_finds_response_template_llama2_tokenizer(self): self.instruction_template = "\n### User:" self.response_template = "\n### Assistant:" - # GPT2Tokenizer: [198, 21017, 11787, 25] -> [11787, 25] + # GPT2Tokenizer: [198, 21017, 11787, 25] -> [21017, 11787, 25] # Llama2Tokenizer: [29871, 13, 2277, 29937, 4911, 29901] -> [2277, 29937, 4911, 29901] + # Note: If this test is ever switched to Llama2Tokenizer, this should be double checked, + # and possibly switched back to [2:] instead of [1:]. + # With GPT2Tokenizer, [1:] is correct - we want the 21017 token included, which is ###. self.tokenized_instruction_w_context = self.tokenizer.encode( self.instruction_template, add_special_tokens=False - )[2:] + )[1:] # GPT2Tokenizer: [198, 21017, 15286, 25] -> [15286, 25] # Llama2Tokenizer: [29871, 13, 2277, 29937, 4007, 22137, 29901] -> [2277, 29937, 4007, 22137, 29901] @@ -57,6 +60,28 @@ def test_data_collator_finds_response_template_llama2_tokenizer(self): ) self.collator.torch_call([self.tokenized_instruction]) + # Test for PR #1185 + # We pass in a string where the first user template is different than the rest. + # Usually this would happen due to context-sensitive tokenization, but here we + # explicitly change the template to test the fix. + self.instruction = """## User: First instruction + +### Assistant: First response + +### User: Second instruction + +### Assistant: Second response""" + self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False) + self.collator = DataCollatorForCompletionOnlyLM( + self.tokenized_response_w_context, self.tokenized_instruction_w_context, tokenizer=self.tokenizer + ) + collator_output = self.collator.torch_call([self.tokenized_instruction]) + collator_text = self.tokenizer.decode( + collator_output["labels"][torch.where(collator_output["labels"] != -100)] + ) + expected_text = " First response\n\n Second response" "" + self.assertEqual(collator_text, expected_text) + def test_data_collator_handling_of_long_sequences(self): self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/dummy-GPT2-correct-vocab") self.instruction = """### System: You are a helpful assistant.
DataCollatorForCompletionOnlyLM instruction token masking fails if first occurence of instruction is marked differently The current code in DataCollatorForCompletionOnlyLM assumes that the first deteced occurence of `instruction_template` comes before the first detected occurence of `response_template`. This is reasonable, since in current applications conversations are initiated by the user, not the assistant. However, this can fail if the first instruction is marked differently from all the other instructions, which can if a context-sensitive tokenizer such as Llama-2 tokenizes the instruction_template differently at the start of a string than in the middle. In particular this happens in practice with TinyLlama: `<|user|>` gets tokenized as `529, 29989, 1792, 29989, 29958` at the start of a conversation, but as `29966, 29989, 1792, 29989, 29958` in later messages. Reproduction snippet: ``` from transformers import AutoTokenizer from trl import DataCollatorForCompletionOnlyLM tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0") chat = [ {"role": "user", "content": "Which is bigger, the moon or the sun?"}, {"role": "assistant", "content": "The sun."}, {"role": "user", "content": "Really?"}, {"role": "assistant", "content": "Yes."}, {"role": "user", "content": "I don't believe you."}, {"role": "assistant", "content": "That's okay."}, ] collator = DataCollatorForCompletionOnlyLM(response_template=[29966, 29989, 465, 22137, 29989, 29958, 13], instruction_template=[29871, 13, 29966, 29989, 1792, 29989, 29958], tokenizer=tokenizer) collator([tokenizer.apply_chat_template(chat)]) # This doesn't mask user messages 2 & 3. ``` PR #1185 fixes this, and makes the above snippet mask out all the user messages correctly.
2024-01-06T17:57:39
huggingface/trl
1,188
huggingface__trl-1188
[ "1186" ]
ec3d41b8797d6b7e389b07c222f42961ad2f4188
diff --git a/trl/trainer/sft_trainer.py b/trl/trainer/sft_trainer.py --- a/trl/trainer/sft_trainer.py +++ b/trl/trainer/sft_trainer.py @@ -258,6 +258,7 @@ def make_inputs_require_grad(module, input, output): formatting_func, num_of_sequences, chars_per_token, + remove_unused_columns=args.remove_unused_columns if args is not None else True, **dataset_kwargs, ) if eval_dataset is not None: @@ -273,6 +274,7 @@ def make_inputs_require_grad(module, input, output): formatting_func, num_of_sequences, chars_per_token, + remove_unused_columns=args.remove_unused_columns if args is not None else True, **dataset_kwargs, ) if not _multiple: @@ -348,6 +350,7 @@ def _prepare_dataset( formatting_func, num_of_sequences, chars_per_token, + remove_unused_columns=True, append_concat_token=True, add_special_tokens=True, ): @@ -360,7 +363,13 @@ def _prepare_dataset( if not packing: return self._prepare_non_packed_dataloader( - tokenizer, dataset, dataset_text_field, max_seq_length, formatting_func, add_special_tokens + tokenizer, + dataset, + dataset_text_field, + max_seq_length, + formatting_func, + add_special_tokens, + remove_unused_columns, ) else: @@ -377,7 +386,14 @@ def _prepare_dataset( ) def _prepare_non_packed_dataloader( - self, tokenizer, dataset, dataset_text_field, max_seq_length, formatting_func=None, add_special_tokens=True + self, + tokenizer, + dataset, + dataset_text_field, + max_seq_length, + formatting_func=None, + add_special_tokens=True, + remove_unused_columns=True, ): use_formatting_func = formatting_func is not None and dataset_text_field is None self._dataset_sanity_checked = False @@ -407,7 +423,7 @@ def tokenize(element): tokenized_dataset = dataset.map( tokenize, batched=True, - remove_columns=dataset.column_names, + remove_columns=dataset.column_names if remove_unused_columns else None, num_proc=self.dataset_num_proc, batch_size=self.dataset_batch_size, )
SFTTrainer removes dataset columns even when `remove_unused_columns = False` I've noticed that SFTTrainer removes dataset columns before passing samples to the data collator, even when `remove_unused_columns` is set to `False` in the training arguments. This happens here: https://github.com/huggingface/trl/blob/main/trl/trainer/sft_trainer.py#L410 Any objections to adding a check for `args.remove_unused_columns is True` here?
2024-01-07T16:11:27
huggingface/trl
1,196
huggingface__trl-1196
[ "1195" ]
b181e401a73ee943382a9bdcdbc0a1000325b8e0
diff --git a/trl/trainer/reward_trainer.py b/trl/trainer/reward_trainer.py --- a/trl/trainer/reward_trainer.py +++ b/trl/trainer/reward_trainer.py @@ -220,11 +220,13 @@ def compute_loss( rewards_chosen = model( input_ids=inputs["input_ids_chosen"], attention_mask=inputs["attention_mask_chosen"], - )[0] + return_dict=True, + )["logits"] rewards_rejected = model( input_ids=inputs["input_ids_rejected"], attention_mask=inputs["attention_mask_rejected"], - )[0] + return_dict=True, + )["logits"] # calculate loss, optionally modulate with margin if "margin" in inputs: loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected - inputs["margin"]).mean()
RewardTrainer fails with FSDP I've just run into an odd issue with FSDP & RewardTrainer. It seems then when using FSDP, the output of the (sequence classification) model's `forward` function isn't as expected. Normally, it returns a `SequenceClassifierOutputWithPast` where `logits` contains a tensor with the logits, and `loss` is empty or contains some sort of generator object.. When using FSDP, I'm getting a `dict` inside the loss field (and oddly enough that dict again contains a single key `logits`, althouh that's not the issue). Not sure why this happens, but the net effect is that when the RewardTrainer tries to get the logits through `model(...)[0]` (see [here](https://github.com/huggingface/trl/blob/3267be0fcd424c8d224d7f28a45a64358c857ed0/trl/trainer/reward_trainer.py#L220)), in the non-FSDP case it gets the logits, while in the FSDP case it gets the dict from the now non-empty`loss` field, and then fails a few lines later. Two questions: 1. This is easily fixed by doing `model(...)["logits"]` instead. Any problem with doing that? 2. Purely out of curiosity, does anyone know why this behaves differently with FSDP? To reproduce: Run `examples/scripts/reward_modeling.py` with accelerate + FSDP. `forward` output in a single process: ``` SequenceClassifierOutputWithPast(loss=<generator object gather.<locals>.gather_map.<locals>.<genexpr> at 0x15360f993040>, logits=tensor([[...]], device='cuda:0', grad_fn=<GatherBackward>), past_key_values=None, hidden_states=None, attentions=None) ``` And in FSDP: ``` SequenceClassifierOutputWithPast(loss={'logits': tensor([[...]], device='cuda:1', grad_fn=<ToCopyBackward0>)}, logits=tensor([[...]], device='cuda:1', grad_fn=<ToCopyBackward0>), past_key_values=None, hidden_states=None, attentions=None) ```
2024-01-09T00:56:52
huggingface/trl
1,415
huggingface__trl-1415
[ "1409" ]
4d862da181620ccdf274138e94eff74f0c9b83be
diff --git a/trl/core.py b/trl/core.py --- a/trl/core.py +++ b/trl/core.py @@ -22,7 +22,7 @@ import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence -from transformers import top_k_top_p_filtering +from transformers.generation import TopKLogitsWarper, TopPLogitsWarper from .import_utils import is_npu_available, is_xpu_available @@ -36,6 +36,42 @@ WANDB_PADDING = -1 +def top_k_top_p_filtering( + logits: torch.FloatTensor, + top_k: int = 0, + top_p: float = 1.0, + filter_value: float = -float("Inf"), + min_tokens_to_keep: int = 1, +) -> torch.FloatTensor: + """ + Filter a distribution of logits using top-k and/or nucleus (top-p) filtering. + + Args: + logits: logits distribution shape (batch size, vocabulary size) + top_k (`int`, *optional*, defaults to 0): + If > 0, only keep the top k tokens with highest probability (top-k filtering) + top_p (`float`, *optional*, defaults to 1.0): + If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus + filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimumber of tokens we keep per batch example in the output. + + From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 + """ + + if top_k > 0: + logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( + None, logits + ) + + if 0 <= top_p <= 1.0: + logits = TopPLogitsWarper(top_p=top_p, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( + None, logits + ) + + return logits + + def flatten_dict(nested: Dict, sep: str = "/") -> Dict: """Flatten dictionary and concatenate nested keys with separator."""
Can not import name top_k_top_p_filtering Hello everyone, I just came across this kind of error, I cannot import trl because of this kind of error: ```ImportError: cannot import name 'top_k_top_p_filtering' from 'transformers' (/usr/local/lib/python3.10/dist-packages/transformers/__init__.py)``` although I upgrade transformers library, it remained not working https://github.com/huggingface/trl/blob/22b4f548f4954319ece6f17cab226a26e2db65be/trl/core.py#L25
Same here I encountered the same error this week. Last week can imported SFTTrainer without this error. Another developer also bumped into this error this week see this closed [issue](https://github.com/huggingface/trl/issues/6) ![git](https://github.com/huggingface/trl/assets/8589224/97c935af-b310-4cb5-a614-71d3270e07f4) Make sure that you have transformers version 4.38.2. Then, importing 'top_k_top_p_filtering' from"transformers.generation.utils" should work. I would like to continue finetuning starcoder2. So I have to install transformers from the source with !pip install git+https://github.com/huggingface/transformers.git I already SFT the model, but now I cannot import DPOTrainer. Using transformers 4.38.2 is not feasible as it doesn't recognize starcoder2. It does however solve the issue with DPOTrainer import -- but I need the newest version, which I think is 4.39.* I tried to install top_k_top_p_filtering (cannot install DPOTRainer becuse of this) as suggested above but I get an error --------------------------------------------------------------------------- ImportError Traceback (most recent call last) [<ipython-input-8-22d4c10675d7>](https://localhost:8080/#) in <cell line: 1>() ----> 1 from transformers.generation.utils import top_k_top_p_filtering ImportError: cannot import name 'top_k_top_p_filtering' from 'transformers.generation.utils' (/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py) --------------------------------------------------------------------------- NOTE: If your import is failing due to a missing package, you can manually install dependencies using either !pip or !apt. To view examples of installing some common dependencies, click the "Open Examples" button below. What is the workaround for installing DPOTrainer for the latest transformer version? Thanks
2024-03-11T11:50:32
huggingface/trl
1,468
huggingface__trl-1468
[ "1455" ]
423991c204c4b36d4aea6955eeb53140fbbea9bb
diff --git a/trl/trainer/sft_trainer.py b/trl/trainer/sft_trainer.py --- a/trl/trainer/sft_trainer.py +++ b/trl/trainer/sft_trainer.py @@ -17,6 +17,7 @@ from functools import wraps from typing import Callable, Dict, List, Optional, Tuple, Union +import datasets import torch import torch.nn as nn from accelerate.state import PartialState @@ -401,7 +402,9 @@ def _prepare_dataset( raise ValueError("The dataset should not be None") # check if torch dataset / dataloader and do nothing - if isinstance(dataset, (torch.utils.data.IterableDataset, torch.utils.data.Dataset, ConstantLengthDataset)): + if isinstance( + dataset, (torch.utils.data.IterableDataset, torch.utils.data.Dataset, ConstantLengthDataset) + ) and not isinstance(dataset, datasets.IterableDataset): return dataset if not packing: @@ -513,6 +516,9 @@ def _prepare_packed_dataloader( add_special_tokens=add_special_tokens, ) + if isinstance(dataset, datasets.IterableDataset): + return constant_length_iterator + def data_generator(constant_length_iterator): yield from constant_length_iterator
Support streaming + packing in SFTTrainer Now that the alignment handbook has support for continued pretraining (using SFTTrainer under the hood), it'd be great to account for those cases where massive datasets are used. In such scenarios we want to use packing + streaming. However, in my experience that does not work well. From my experiments and digging I have uncovered the following. In a typical scenario we first load a dataset with a given text column e.g. `text` and with streaming enabled through load_dataset. The result is a `datasets.IterableDataset`. This is passed to the SFTTrainer. However, the first thing goes wrong here when deciding how to prepare the dataset (packing/non-packing): https://github.com/huggingface/trl/blob/1705aebebafea53ac7ad3509ce6e5002e0f36240/trl/trainer/sft_trainer.py#L404 This if-statement will evaluate to true because a `datasets.IterableDataset` is [also a `torch.IterableDataset`](https://github.com/huggingface/datasets/blob/ca8409a8bec4508255b9c3e808d0751eb1005260/src/datasets/iterable_dataset.py#L1187-L1188). As such tokenization/packing never happens for "stream" (IterableDataset) datasets and the dataset remains as-is: a dataset with just a text field. The first suggestion therefore is to check explicitly that this is NOT a `datasets.IterableDataset`, so replace the if-statement by: ```python if isinstance(dataset, (torch.utils.data.IterableDataset, torch.utils.data.Dataset, ConstantLengthDataset)) and not isinstance(dataset, datasets.IterableDataset): return dataset ``` Unfortunately even after that change we run into undesired behavior. Because packing is enabled, we are preparing a constantlengthdataset. That's fine - it's iter method is a nice generator that will not by itself process the whole massive streaming dataset. However, it seems that that behavior IS triggered here, where the constantlengthdataset is exhausted into a new, regular Dataset (which is not an `IterableDataset` anymore!) https://github.com/huggingface/trl/blob/1705aebebafea53ac7ad3509ce6e5002e0f36240/trl/trainer/sft_trainer.py#L520-L522 For small datasets that is not an issue, and I imagine that it is useful then to have all data preprocessed in-memory. However, for streaming datasets that is exactly what we do not want! Both in terms of memory and in terms of how long this preprocessing takes. This problem can be resolved by simply returning the constantlengthdataset itself, without transforming it into a regular Dataset. ```python if isinstance(dataset, IterableDataset): return constant_length_iterator ``` If you agree I can make a PR for these two changes. However, it is also possible that I missed something that motivates the reason why the current implementation does not deal well with streaming. --- To repro the original issue, you can install the alignment handbook ([from this branch](https://github.com/BramVanroy/alignment-handbook/tree/allow_streaming)), use the config file below ```yaml # Model arguments model_name_or_path: gpt2 model_revision: main torch_dtype: bfloat16 use_flash_attention_2: false # Data training arguments dataset_mixer: uonlp/CulturaX: 1.0 dataset_configs: - nl dataset_splits: - train use_streaming: true preprocessing_num_workers: 8 # SFT trainer config bf16: true do_eval: False evaluation_strategy: "no" gradient_accumulation_steps: 8 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: False hub_model_id: gpt2-cpt-dutch learning_rate: 1.0e-05 weight_decay: 0.01 log_level: info logging_steps: 5 logging_strategy: steps lr_scheduler_type: cosine max_seq_length: 480 max_steps: 917130 output_dir: data/gpt2-cpt-dutch overwrite_output_dir: true per_device_train_batch_size: 4 remove_unused_columns: true save_strategy: "steps" save_steps: 100000 save_total_limit: 3 seed: 42 warmup_ratio: 0.01 ``` and launch with ```bash python scripts/run_cpt.py config.yaml ```
2024-03-22T09:49:14
huggingface/trl
1,478
huggingface__trl-1478
[ "1477" ]
2ce8e45bb222db68fe9eb92dc8b2a2f1339a2d75
diff --git a/trl/trainer/cpo_trainer.py b/trl/trainer/cpo_trainer.py --- a/trl/trainer/cpo_trainer.py +++ b/trl/trainer/cpo_trainer.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import inspect import random import warnings @@ -665,6 +666,7 @@ def concatenated_forward( outputs = model( concatenated_batch["concatenated_input_ids"], attention_mask=concatenated_batch["concatenated_attention_mask"], + use_cache=False, **model_kwargs, ) all_logits = outputs.logits diff --git a/trl/trainer/orpo_trainer.py b/trl/trainer/orpo_trainer.py --- a/trl/trainer/orpo_trainer.py +++ b/trl/trainer/orpo_trainer.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import inspect import random import warnings @@ -685,6 +686,7 @@ def concatenated_forward( outputs = model( concatenated_batch["concatenated_input_ids"], attention_mask=concatenated_batch["concatenated_attention_mask"], + use_cache=False, **model_kwargs, ) all_logits = outputs.logits
`ORPOTrainer` fails with `flash-attention2` ## Description The `ORPOTrainer` fails when training using `attn_implementation="flash_attention_2"`, since the cache is being used, and falls back to the default configuration i.e. `padding_side="right"` for the tokenizer in this case. ## Bug in code Missing `use_cache=False` to prevent the model from using the cache, to avoid issues with Flash Attention 2. https://github.com/huggingface/trl/blob/2ce8e45bb222db68fe9eb92dc8b2a2f1339a2d75/trl/trainer/orpo_trainer.py#L685-L689
2024-03-24T10:24:18
huggingface/trl
1,496
huggingface__trl-1496
[ "1495" ]
0ee349dcd43b0f4b3169449f16751c38ac4a609f
diff --git a/trl/trainer/utils.py b/trl/trainer/utils.py --- a/trl/trainer/utils.py +++ b/trl/trainer/utils.py @@ -746,8 +746,8 @@ class RichProgressCallback(TrainerCallback): """ def __init__(self): - self.training_bar = Progress() - self.prediction_bar = Progress() + self.training_bar = None + self.prediction_bar = None self.training_task_id = None self.prediction_task_id = None @@ -755,8 +755,14 @@ def __init__(self): self.rich_group = None self.rich_console = None + self.training_status = None + self.current_step = None + def on_train_begin(self, args, state, control, **kwargs): if state.is_world_process_zero: + self.training_bar = Progress() + self.prediction_bar = Progress() + self.rich_console = Console() self.training_status = self.rich_console.status("Nothing to log yet ...") @@ -764,9 +770,8 @@ def on_train_begin(self, args, state, control, **kwargs): self.rich_group = Live(Panel(Group(self.training_bar, self.prediction_bar, self.training_status))) self.rich_group.start() - # self.training_bar.start() self.training_task_id = self.training_bar.add_task("[blue]Training the model", total=state.max_steps) - self.current_step = 0 + self.current_step = 0 def on_step_end(self, args, state, control, **kwargs): if state.is_world_process_zero: @@ -775,25 +780,23 @@ def on_step_end(self, args, state, control, **kwargs): def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): if state.is_world_process_zero and has_length(eval_dataloader): - if self.prediction_bar is None: - # self.prediction_bar.start() + if self.prediction_task_id is None: self.prediction_task_id = self.prediction_bar.add_task( - "[blue]Predicting on the evaluation dataset", total=state.max_steps + "[blue]Predicting on the evaluation dataset", total=len(eval_dataloader) ) self.prediction_bar.update(self.prediction_task_id, advance=1, update=True) def on_evaluate(self, args, state, control, **kwargs): if state.is_world_process_zero: - if self.prediction_bar is not None: - self.prediction_bar.close() - self.prediction_bar = None + if self.prediction_task_id is not None: + self.prediction_bar.remove_task(self.prediction_task_id) + self.prediction_task_id = None def on_predict(self, args, state, control, **kwargs): if state.is_world_process_zero: - if self.prediction_bar is not None: - self.prediction_bar.stop() + if self.prediction_task_id is not None: self.prediction_bar.remove_task(self.prediction_task_id) - self.prediction_bar = None + self.prediction_task_id = None def on_log(self, args, state, control, logs=None, **kwargs): if state.is_world_process_zero and self.training_bar is not None: @@ -802,6 +805,13 @@ def on_log(self, args, state, control, logs=None, **kwargs): def on_train_end(self, args, state, control, **kwargs): if state.is_world_process_zero: - self.training_bar.stop() self.rich_group.stop() + self.training_bar = None + self.prediction_bar = None + self.training_task_id = None + self.prediction_task_id = None + self.rich_group = None + self.rich_console = None + self.training_status = None + self.current_step = None
diff --git a/tests/test_rich_progress_callback.py b/tests/test_rich_progress_callback.py new file mode 100644 --- /dev/null +++ b/tests/test_rich_progress_callback.py @@ -0,0 +1,53 @@ +import tempfile +import unittest + +import torch +import torch.nn as nn +from datasets import Dataset +from transformers import Trainer, TrainingArguments + +from trl.trainer.utils import RichProgressCallback + + +class DummyModel(nn.Module): + def __init__(self): + super().__init__() + self.a = nn.Parameter(torch.tensor(1.0)) + + def forward(self, x): + return self.a * x + + +class TestRichProgressCallback(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.dummy_model = DummyModel() + cls.dummy_train_dataset = Dataset.from_list([{"x": 1.0, "y": 2.0}] * 5) + cls.dummy_val_dataset = Dataset.from_list([{"x": 1.0, "y": 2.0}] * 101) + + def test_rich_progress_callback_logging(self): + with tempfile.TemporaryDirectory() as tmp_dir: + training_args = TrainingArguments( + output_dir=tmp_dir, + per_device_eval_batch_size=2, + per_device_train_batch_size=2, + num_train_epochs=4, + evaluation_strategy="steps", + eval_steps=1, + logging_strategy="steps", + logging_steps=1, + save_strategy="no", + report_to="none", + disable_tqdm=True, + ) + callbacks = [RichProgressCallback()] + trainer = Trainer( + model=self.dummy_model, + train_dataset=self.dummy_train_dataset, + eval_dataset=self.dummy_val_dataset, + args=training_args, + callbacks=callbacks, + ) + + trainer.train() + trainer.train()
`RichProgressCallback` would break model evaluation and prediction Hi! It's awesome to have a CLI for `trl`. However, there seems to be a problem with the newly introduced `ProgressCallback`. This issue affects both the evaluation and prediction stages. To reproduce the issue, simply run ``` trl sft --model_name_or_path facebook/opt-125m --dataset_name imdb --output_dir opt-sft-imdb --evaluation_strategy steps --eval_steps 1 ``` this would leads to the following error message: ``` Traceback (most recent call last): File "**************/lib/python3.11/site-packages/trl/commands/scripts/sft.py", line 148, in <module> trainer.train() File "**************/lib/python3.11/site-packages/trl/trainer/sft_trainer.py", line 360, in train output = super().train(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "**************/lib/python3.11/site-packages/transformers/trainer.py", line 1780, in train return inner_training_loop( ^^^^^^^^^^^^^^^^^^^^ File "**************/lib/python3.11/site-packages/transformers/trainer.py", line 2193, in _inner_training_loop self._maybe_log_save_evaluate(tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval) File "**************/lib/python3.11/site-packages/transformers/trainer.py", line 2577, in _maybe_log_save_evaluate metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "**************/lib/python3.11/site-packages/transformers/trainer.py", line 3365, in evaluate output = eval_loop( ^^^^^^^^^^ File "**************/lib/python3.11/site-packages/transformers/trainer.py", line 3586, in evaluation_loop self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "**************/lib/python3.11/site-packages/transformers/trainer_callback.py", line 410, in on_prediction_step return self.call_event("on_prediction_step", args, state, control) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "**************/lib/python3.11/site-packages/transformers/trainer_callback.py", line 414, in call_event result = getattr(callback, event)( ^^^^^^^^^^^^^^^^^^^^^^^^^ File "**************/lib/python3.11/site-packages/trl/trainer/utils.py", line 783, in on_prediction_step self.prediction_bar.update(self.prediction_task_id, advance=1, update=True) File "**************/lib/python3.11/site-packages/rich/progress.py", line 1425, in update task = self._tasks[task_id] ~~~~~~~~~~~^^^^^^^^^ KeyError: None ```
I’m working on a PR to address this issue. For those with urgent needs, a simple workaround is to comment out this line. https://github.com/huggingface/trl/blob/0ee349dcd43b0f4b3169449f16751c38ac4a609f/trl/commands/cli.py#L45
2024-03-31T11:04:03
huggingface/trl
1,509
huggingface__trl-1509
[ "1400" ]
9a28b3fd0505aa38798f0122ab0ff3bb795384dd
diff --git a/trl/trainer/sft_trainer.py b/trl/trainer/sft_trainer.py --- a/trl/trainer/sft_trainer.py +++ b/trl/trainer/sft_trainer.py @@ -264,6 +264,12 @@ def make_inputs_require_grad(module, input, output): # check if dataset has ChatML format or instruction format and is supported # if not stays #None formatting_func = get_formatting_func_from_dataset(train_dataset, tokenizer) + # if a template is detected, we don't need to add special tokens again + if formatting_func is not None: + if dataset_kwargs is None: + dataset_kwargs = {"add_special_tokens": False} + else: + dataset_kwargs["add_special_tokens"] = False if not packing: if dataset_text_field is None and formatting_func is None:
conversational data for SFTTrainer For SFTTrainer, if we load the dataset using a conversational form (ChatML format), the function `apply_chat_template` is used (https://github.com/huggingface/trl/blob/v0.7.11/trl/extras/dataset_formatting.py#L55) with `tokenize=False`. Later in SFTTrainer, the data is tokenized again with `add_special_tokens=True`. In tokenizer like LLaMATokenizer, there will be two bos tokens at the very beginning: `<s><s> ...`, which is not intended. Maybe we should modify `dataset_kwargs` at this line https://github.com/huggingface/trl/blob/v0.7.11/trl/trainer/sft_trainer.py#L246 so that `dataset_kwargs['add_special_tokens']=True`?
Yes that would make sense, would you like to open a PR for the fix? cc @philschmid what do you think? sure I will do that Good idea @edixiong, thats what I currently do manually. https://www.philschmid.de/fine-tune-llms-in-2024-with-trl#4-fine-tune-llm-using-trl-and-the-sfttrainer This probably should only be applied if the "chatml" or template is detected.
2024-04-06T00:17:12
huggingface/trl
1,527
huggingface__trl-1527
[ "1469" ]
995f1174da89da4dc0ad04c45de11d67b6d06274
diff --git a/trl/__init__.py b/trl/__init__.py --- a/trl/__init__.py +++ b/trl/__init__.py @@ -51,7 +51,7 @@ "SFTTrainer", ], "commands": [], - "commands.utils": ["SftArgumentParser", "init_zero_verbose", "TrlParser", "DpoArgumentParser"], + "commands.cli_utils": ["init_zero_verbose", "SftScriptArguments", "DpoScriptArguments", "TrlParser"], "trainer.utils": ["get_kbit_device_map", "get_peft_config", "get_quantization_config", "RichProgressCallback"], "multitask_prompt_tuning": [ "MultitaskPromptEmbedding", @@ -115,7 +115,7 @@ SFTTrainer, ) from .trainer.utils import get_kbit_device_map, get_peft_config, get_quantization_config, RichProgressCallback - from .commands.utils import init_zero_verbose, SftScriptArguments, DpoScriptArguments, TrlParser + from .commands.cli_utils import init_zero_verbose, SftScriptArguments, DpoScriptArguments, TrlParser try: if not is_diffusers_available():
from trl import SFTTrainer getting issue when importing SFTTrainer from trl then getting below issue Same happening when i copy and paste the trl code from github --------------------------------------------------------------------------- TypeError Traceback (most recent call last) File /opt/conda/lib/python3.10/site-packages/trl/import_utils.py:172, in _LazyModule._get_module(self, module_name) 171 try: --> 172 return importlib.import_module("." + module_name, self.__name__) 173 except Exception as e: File /opt/conda/lib/python3.10/importlib/__init__.py:126, in import_module(name, package) 125 level += 1 --> 126 return _bootstrap._gcd_import(name[level:], package, level) File <frozen importlib._bootstrap>:1050, in _gcd_import(name, package, level) File <frozen importlib._bootstrap>:1027, in _find_and_load(name, import_) File <frozen importlib._bootstrap>:1006, in _find_and_load_unlocked(name, import_) File <frozen importlib._bootstrap>:688, in _load_unlocked(spec) File <frozen importlib._bootstrap_external>:883, in exec_module(self, module) File <frozen importlib._bootstrap>:241, in _call_with_frames_removed(f, *args, **kwds) File /opt/conda/lib/python3.10/site-packages/trl/trainer/sft_trainer.py:23 22 from accelerate.state import PartialState ---> 23 from datasets import Dataset 24 from datasets.arrow_writer import SchemaInferenceError File /opt/conda/lib/python3.10/site-packages/datasets/__init__.py:18 16 __version__ = "2.18.0" ---> 18 from .arrow_dataset import Dataset 19 from .arrow_reader import ReadInstruction File /opt/conda/lib/python3.10/site-packages/datasets/arrow_dataset.py:66 64 from tqdm.contrib.concurrent import thread_map ---> 66 from . import config 67 from .arrow_reader import ArrowReader File /opt/conda/lib/python3.10/site-packages/datasets/config.py:41 40 DILL_VERSION = version.parse(importlib.metadata.version("dill")) ---> 41 FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec")) 42 PANDAS_VERSION = version.parse(importlib.metadata.version("pandas")) File /opt/conda/lib/python3.10/site-packages/packaging/version.py:49, in parse(version) 48 try: ---> 49 return Version(version) 50 except InvalidVersion: File /opt/conda/lib/python3.10/site-packages/packaging/version.py:264, in Version.__init__(self, version) 261 def __init__(self, version: str) -> None: 262 263 # Validate the version and parse it into pieces --> 264 match = self._regex.search(version) 265 if not match: TypeError: expected string or bytes-like object The above exception was the direct cause of the following exception: RuntimeError Traceback (most recent call last) Cell In[4], line 4 2 import transformers 3 import torch ----> 4 from trl import SFTTrainer 5 from peft import LoraConfig 6 from transformers import AutoTokenizer, AutoModelForCausalLM File <frozen importlib._bootstrap>:1075, in _handle_fromlist(module, fromlist, import_, recursive) File /opt/conda/lib/python3.10/site-packages/trl/import_utils.py:163, in _LazyModule.__getattr__(self, name) 161 elif name in self._class_to_module.keys(): 162 module = self._get_module(self._class_to_module[name]) --> 163 value = getattr(module, name) 164 else: 165 raise AttributeError(f"module {self.__name__} has no attribute {name}") File /opt/conda/lib/python3.10/site-packages/trl/import_utils.py:162, in _LazyModule.__getattr__(self, name) 160 value = self._get_module(name) 161 elif name in self._class_to_module.keys(): --> 162 module = self._get_module(self._class_to_module[name]) 163 value = getattr(module, name) 164 else: File /opt/conda/lib/python3.10/site-packages/trl/import_utils.py:174, in _LazyModule._get_module(self, module_name) 172 return importlib.import_module("." + module_name, self.__name__) 173 except Exception as e: --> 174 raise RuntimeError( 175 f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its" 176 f" traceback):\n{e}" 177 ) from e RuntimeError: Failed to import trl.trainer.sft_trainer because of the following error (look up to see its traceback): expected string or bytes-like object
Hi @DimensionZer0 Thanks for the issue! just tried it locally and the import worked for me, it also works in our CI, can you try again on a fresh new env? ```bash > python Python 3.9.12 | packaged by conda-forge | (main, Mar 24 2022, 23:25:14) [Clang 12.0.1 ] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> from trl import SFTTrainer >>> ``` Can you share more details about your setup / env? I am using kaggle notebook @younesbelkada and still getting same issue .. version of python is 3.10.13 Till yesterday it was working for me on same env its working in my local but getting issue in kaggle notebook I can confirm, I am also receiving this error using Kaggle notebooks, on both 0.8.0 and 0.8.1 of trl. Trying 0.7.11 gave me #1409.
2024-04-12T10:05:20
huggingface/trl
1,531
huggingface__trl-1531
[ "1526" ]
aba4df02c128d661685b16773ad9425313cbfdfc
diff --git a/trl/trainer/cpo_trainer.py b/trl/trainer/cpo_trainer.py --- a/trl/trainer/cpo_trainer.py +++ b/trl/trainer/cpo_trainer.py @@ -738,7 +738,7 @@ def get_batch_loss_metrics( metrics[f"{prefix}logps/chosen"] = policy_chosen_logps.detach().mean().cpu() metrics[f"{prefix}logits/rejected"] = policy_rejected_logits.detach().mean().cpu() metrics[f"{prefix}logits/chosen"] = policy_chosen_logits.detach().mean().cpu() - metrics[f"{prefix}nll_loss"] = policy_nll_loss.cpu().mean() + metrics[f"{prefix}nll_loss"] = policy_nll_loss.detach().mean().cpu() return loss, metrics
a bug which leads to "Cuda: Out of memory" in CPOTrainer (cpo_trainer.py), trl 0.8.1 0.8.2, please fix this bug there is a bug in CPOTrainer. when runing CPOTrainer after runing sevreal steps, the usage of gpu memory increases and it raises the out-of-memory exception. we found that the exception is caused by missing the "detach" in line 741 of CPOTrainer (cpo_trainer.py), please fix this bug, be careful! line 741 of CPOTrainer (cpo_trainer.py): metrics[f"{prefix}nll_loss"] = policy_nll_loss.cpu().mean() -> metrics[f"{prefix}nll_loss"] = policy_nll_loss.detach().cpu().mean()
2024-04-12T13:03:16
huggingface/trl
1,542
huggingface__trl-1542
[ "1430" ]
e823458a6a793778b959a1c134cd2ee3eaa9a9bd
diff --git a/trl/trainer/kto_trainer.py b/trl/trainer/kto_trainer.py --- a/trl/trainer/kto_trainer.py +++ b/trl/trainer/kto_trainer.py @@ -16,7 +16,7 @@ import random import warnings from collections import defaultdict -from contextlib import nullcontext +from contextlib import contextmanager, nullcontext from copy import deepcopy from functools import wraps from operator import itemgetter @@ -257,6 +257,10 @@ class KTOTrainer(Trainer): compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*): The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to metric values. + model_adapter_name (`str`, defaults to `None`): + Name of the train target PEFT adapter, when using LoRA with multiple adapters. + ref_adapter_name (`str`, defaults to `None`): + Name of the reference PEFT adapter, when using LoRA with multiple adapters. """ _tag_names = ["trl", "kto"] @@ -276,6 +280,8 @@ def __init__( preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, peft_config: Optional[Dict] = None, compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]] = None, + model_adapter_name: Optional[str] = None, + ref_adapter_name: Optional[str] = None, ): if type(args) == TrainingArguments: raise ValueError("Please use `KTOConfig` instead TrainingArguments.") @@ -392,6 +398,8 @@ def make_inputs_require_grad(module, input, output): self.is_encoder_decoder = args.is_encoder_decoder self.is_peft_model = is_peft_available() and isinstance(model, PeftModel) + self.model_adapter_name = model_adapter_name + self.ref_adapter_name = ref_adapter_name if ref_model: self.ref_model = ref_model @@ -677,6 +685,18 @@ def _prepare_deepspeed(self, model: PreTrainedModelWrapper): model.eval() return model + @contextmanager + def null_ref_context(self): + """Context manager for handling null reference model (that is, peft adapter manipulation).""" + with self.accelerator.unwrap_model( + self.model + ).disable_adapter() if self.is_peft_model and not self.ref_adapter_name else nullcontext(): + if self.ref_adapter_name: + self.model.set_adapter(self.ref_adapter_name) + yield + if self.ref_adapter_name: + self.model.set_adapter(self.model_adapter_name or "default") + def get_train_dataloader(self) -> DataLoader: """ Returns the training [`~torch.utils.data.DataLoader`]. @@ -775,9 +795,7 @@ def compute_reference_log_probs(self, padded_batch: Dict) -> Dict: """Computes log probabilities of the reference model for a single padded batch of a KTO specific dataset.""" with torch.no_grad(): if self.ref_model is None: - with self.accelerator.unwrap_model( - self.model - ).disable_adapter() if self.is_peft_model else nullcontext(): + with self.null_ref_context(): if self.is_encoder_decoder: completion_logits = self.model( padded_batch["prompt_input_ids"], @@ -1029,7 +1047,7 @@ def get_batch_loss_metrics( else: with torch.no_grad(): if self.ref_model is None: - with self.accelerator.unwrap_model(self.model).disable_adapter(): + with self.null_ref_context(): ( reference_chosen_logps, reference_rejected_logps,
KTO - support loading the adapter twice For DPOTrainer there exists the option to load the Adapter from SFT training twice, as in [Reference model considerations with PEFT - load-the-adapter-twice](https://huggingface.co/docs/trl/main/en/dpo_trainer#using-option-3---load-the-adapter-twice): ``` python # Initialize the trainer, without a ref_model param. dpo_trainer = DPOTrainer( model, ... model_adapter_name="train", ref_adapter_name="reference", ) ``` In DPOTrainer this allows me to use a larger batch size and speed up training. Would be cool to have this option in KTOTrainer as well. Anyone working on this already?
Hi @lewtun , we had a discussion about KTO. Do you already work on this or should we come up with a PR? We would try and use the code from DPO and apply it to KTO to implement this. @younesbelkada @kashif, is this a desired feature for you as well? If yes, is someone already implementing it or should we come up with a PR? I believe would be good to have yes, and no one is working on it That feature would be super useful @claralp . Thanks.
2024-04-16T14:02:55