repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
DeepSpeed
DeepSpeed-master/op_builder/cpu_adam.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os from .builder import TorchCPUOpBuilder class CPUAdamBuilder(TorchCPUOpBuilder): BUILD_VAR = "DS_BUILD_CPU_ADAM" NAME = "cpu_adam" def __init__(self): super().__init__(name=self.NAME) def absolute_name(self): return f'deepspeed.ops.adam.{self.NAME}_op' def sources(self): if self.build_for_cpu: return ['csrc/adam/cpu_adam.cpp'] return ['csrc/adam/cpu_adam.cpp', 'csrc/common/custom_cuda_kernel.cu'] def libraries_args(self): args = super().libraries_args() if self.build_for_cpu: return args if not self.is_rocm_pytorch(): args += ['curand'] return args def include_paths(self): import torch if self.build_for_cpu: CUDA_INCLUDE = [] elif not self.is_rocm_pytorch(): CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")] else: CUDA_INCLUDE = [ os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include"), os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "rocrand"), os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "hiprand"), ] return ['csrc/includes'] + CUDA_INCLUDE
1,397
27.530612
89
py
DeepSpeed
DeepSpeed-master/op_builder/transformer.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .builder import CUDAOpBuilder class TransformerBuilder(CUDAOpBuilder): BUILD_VAR = "DS_BUILD_TRANSFORMER" NAME = "transformer" def __init__(self, name=None): name = self.NAME if name is None else name super().__init__(name=name) def absolute_name(self): return f'deepspeed.ops.transformer.{self.NAME}_op' def extra_ldflags(self): if not self.is_rocm_pytorch(): return ['-lcurand'] else: return [] def sources(self): return [ 'csrc/transformer/ds_transformer_cuda.cpp', 'csrc/transformer/cublas_wrappers.cu', 'csrc/transformer/transform_kernels.cu', 'csrc/transformer/gelu_kernels.cu', 'csrc/transformer/dropout_kernels.cu', 'csrc/transformer/normalize_kernels.cu', 'csrc/transformer/softmax_kernels.cu', 'csrc/transformer/general_kernels.cu' ] def include_paths(self): includes = ['csrc/includes'] if self.is_rocm_pytorch(): from torch.utils.cpp_extension import ROCM_HOME includes += ['{}/hiprand/include'.format(ROCM_HOME), '{}/rocrand/include'.format(ROCM_HOME)] return includes
1,294
31.375
104
py
DeepSpeed
DeepSpeed-master/op_builder/async_io.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import distutils.spawn import subprocess from .builder import OpBuilder class AsyncIOBuilder(OpBuilder): BUILD_VAR = "DS_BUILD_AIO" NAME = "async_io" def __init__(self): super().__init__(name=self.NAME) def absolute_name(self): return f'deepspeed.ops.aio.{self.NAME}_op' def sources(self): return [ 'csrc/aio/py_lib/deepspeed_py_copy.cpp', 'csrc/aio/py_lib/py_ds_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp', 'csrc/aio/py_lib/deepspeed_aio_thread.cpp', 'csrc/aio/common/deepspeed_aio_utils.cpp', 'csrc/aio/common/deepspeed_aio_common.cpp', 'csrc/aio/common/deepspeed_aio_types.cpp', 'csrc/aio/py_lib/deepspeed_pin_tensor.cpp' ] def include_paths(self): return ['csrc/aio/py_lib', 'csrc/aio/common'] def cxx_args(self): # -O0 for improved debugging, since performance is bound by I/O CPU_ARCH = self.cpu_arch() SIMD_WIDTH = self.simd_width() return [ '-g', '-Wall', '-O0', '-std=c++14', '-shared', '-fPIC', '-Wno-reorder', CPU_ARCH, '-fopenmp', SIMD_WIDTH, '-laio', ] def extra_ldflags(self): return ['-laio'] def check_for_libaio_pkg(self): libs = dict( dpkg=["-l", "libaio-dev", "apt"], pacman=["-Q", "libaio", "pacman"], rpm=["-q", "libaio-devel", "yum"], ) found = False for pkgmgr, data in libs.items(): flag, lib, tool = data path = distutils.spawn.find_executable(pkgmgr) if path is not None: cmd = f"{pkgmgr} {flag} {lib}" result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) if result.wait() == 0: found = True else: self.warning(f"{self.NAME}: please install the {lib} package with {tool}") break return found def is_compatible(self, verbose=True): # Check for the existence of libaio by using distutils # to compile and link a test program that calls io_submit, # which is a function provided by libaio that is used in the async_io op. # If needed, one can define -I and -L entries in CFLAGS and LDFLAGS # respectively to specify the directories for libaio.h and libaio.so. aio_compatible = self.has_function('io_submit', ('aio', )) if verbose and not aio_compatible: self.warning(f"{self.NAME} requires the dev libaio .so object and headers but these were not found.") # Check for the libaio package via known package managers # to print suggestions on which package to install. self.check_for_libaio_pkg() self.warning( "If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found." ) return super().is_compatible(verbose) and aio_compatible
3,333
34.468085
154
py
DeepSpeed
DeepSpeed-master/op_builder/all_ops.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import pkgutil import importlib try: # during installation time accelerator is visible, otherwise return deepspeed.accelerator from accelerator import get_accelerator except ImportError: from deepspeed.accelerator import get_accelerator # List of all available ops # reflect all builder names into __op_builders__ op_builder_dir = get_accelerator().op_builder_dir() op_builder_module = importlib.import_module(op_builder_dir) __op_builders__ = [] for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(op_builder_module.__file__)]): # avoid self references if module_name != 'all_ops' and module_name != 'builder': module = importlib.import_module("{}.{}".format(op_builder_dir, module_name)) for member_name in module.__dir__(): if member_name.endswith('Builder'): # append builder to __op_builders__ list builder = get_accelerator().create_op_builder(member_name) __op_builders__.append(builder) ALL_OPS = {op.name: op for op in __op_builders__ if op is not None}
1,180
34.787879
93
py
DeepSpeed
DeepSpeed-master/op_builder/__init__.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import sys import os import pkgutil import importlib from .builder import get_default_compute_capabilities, OpBuilder # Do not remove, required for abstract accelerator to detect if we have a deepspeed or 3p op_builder __deepspeed__ = True # List of all available op builders from deepspeed op_builder try: import deepspeed.ops.op_builder # noqa: F401 op_builder_dir = "deepspeed.ops.op_builder" except ImportError: op_builder_dir = "op_builder" __op_builders__ = [] this_module = sys.modules[__name__] def builder_closure(member_name): if op_builder_dir == "op_builder": # during installation time cannot get builder due to torch not installed, # return closure instead def _builder(): from deepspeed.accelerator import get_accelerator builder = get_accelerator().create_op_builder(member_name) return builder return _builder else: # during runtime, return op builder class directly from deepspeed.accelerator import get_accelerator builder = get_accelerator().get_op_builder(member_name) return builder # reflect builder names and add builder closure, such as 'TransformerBuilder()' creates op builder wrt current accelerator for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(this_module.__file__)]): if module_name != 'all_ops' and module_name != 'builder': module = importlib.import_module(f".{module_name}", package=op_builder_dir) for member_name in module.__dir__(): if member_name.endswith('Builder') and member_name != "OpBuilder" and member_name != "CUDAOpBuilder": # assign builder name to variable with same name # the following is equivalent to i.e. TransformerBuilder = "TransformerBuilder" this_module.__dict__[member_name] = builder_closure(member_name)
1,990
35.87037
122
py
DeepSpeed
DeepSpeed-master/op_builder/fused_lamb.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .builder import CUDAOpBuilder import sys class FusedLambBuilder(CUDAOpBuilder): BUILD_VAR = 'DS_BUILD_FUSED_LAMB' NAME = "fused_lamb" def __init__(self): super().__init__(name=self.NAME) def absolute_name(self): return f'deepspeed.ops.lamb.{self.NAME}_op' def sources(self): return ['csrc/lamb/fused_lamb_cuda.cpp', 'csrc/lamb/fused_lamb_cuda_kernel.cu'] def include_paths(self): return ['csrc/includes'] def cxx_args(self): args = super().cxx_args() return args + self.version_dependent_macros() def nvcc_args(self): nvcc_flags = ['-O3'] + self.version_dependent_macros() if self.is_rocm_pytorch(): ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version() nvcc_flags += ['-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR, '-DROCM_VERSION_MINOR=%s' % ROCM_MINOR] else: nvcc_flags.extend( ['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] + self.compute_capability_args()) return nvcc_flags
1,216
28.682927
116
py
DeepSpeed
DeepSpeed-master/op_builder/fused_adam.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .builder import CUDAOpBuilder import sys class FusedAdamBuilder(CUDAOpBuilder): BUILD_VAR = "DS_BUILD_FUSED_ADAM" NAME = "fused_adam" def __init__(self): super().__init__(name=self.NAME) def absolute_name(self): return f'deepspeed.ops.adam.{self.NAME}_op' def sources(self): return ['csrc/adam/fused_adam_frontend.cpp', 'csrc/adam/multi_tensor_adam.cu'] def include_paths(self): return ['csrc/includes', 'csrc/adam'] def cxx_args(self): args = super().cxx_args() return args + self.version_dependent_macros() def nvcc_args(self): nvcc_flags = ['-O3'] + self.version_dependent_macros() if not self.is_rocm_pytorch(): nvcc_flags.extend( ['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] + self.compute_capability_args()) return nvcc_flags
1,044
26.5
116
py
DeepSpeed
DeepSpeed-master/op_builder/random_ltd.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .builder import CUDAOpBuilder class RandomLTDBuilder(CUDAOpBuilder): BUILD_VAR = "DS_BUILD_RANDOM_LTD" NAME = "random_ltd" def __init__(self, name=None): name = self.NAME if name is None else name super().__init__(name=name) def absolute_name(self): return f'deepspeed.ops.{self.NAME}_op' def extra_ldflags(self): if not self.is_rocm_pytorch(): return ['-lcurand'] else: return [] def sources(self): return [ 'csrc/random_ltd/pt_binding.cpp', 'csrc/random_ltd/gather_scatter.cu', 'csrc/random_ltd/slice_attn_masks.cu', 'csrc/random_ltd/token_sort.cu' ] def include_paths(self): includes = ['csrc/includes'] if self.is_rocm_pytorch(): from torch.utils.cpp_extension import ROCM_HOME includes += ['{}/hiprand/include'.format(ROCM_HOME), '{}/rocrand/include'.format(ROCM_HOME)] return includes
1,079
27.421053
104
py
DeepSpeed
DeepSpeed-master/op_builder/builder.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import sys import time import importlib from pathlib import Path import subprocess import shlex import shutil import tempfile import distutils.ccompiler import distutils.log import distutils.sysconfig from distutils.errors import CompileError, LinkError from abc import ABC, abstractmethod from typing import List YELLOW = '\033[93m' END = '\033[0m' WARNING = f"{YELLOW} [WARNING] {END}" DEFAULT_TORCH_EXTENSION_PATH = "/tmp/torch_extensions" DEFAULT_COMPUTE_CAPABILITIES = "6.0;6.1;7.0" try: import torch except ImportError: print(f"{WARNING} unable to import torch, please install it if you want to pre-compile any deepspeed ops.") else: TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) def installed_cuda_version(name=""): import torch.utils.cpp_extension cuda_home = torch.utils.cpp_extension.CUDA_HOME assert cuda_home is not None, "CUDA_HOME does not exist, unable to compile CUDA op(s)" # Ensure there is not a cuda version mismatch between torch and nvcc compiler output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True) output_split = output.split() release_idx = output_split.index("release") release = output_split[release_idx + 1].replace(',', '').split(".") # Ignore patch versions, only look at major + minor cuda_major, cuda_minor = release[:2] return int(cuda_major), int(cuda_minor) def get_default_compute_capabilities(): compute_caps = DEFAULT_COMPUTE_CAPABILITIES import torch.utils.cpp_extension if torch.utils.cpp_extension.CUDA_HOME is not None and installed_cuda_version()[0] >= 11: if installed_cuda_version()[0] == 11 and installed_cuda_version()[1] == 0: # Special treatment of CUDA 11.0 because compute_86 is not supported. compute_caps += ";8.0" else: compute_caps += ";8.0;8.6" return compute_caps # list compatible minor CUDA versions - so that for example pytorch built with cuda-11.0 can be used # to build deepspeed and system-wide installed cuda 11.2 cuda_minor_mismatch_ok = { 10: [ "10.0", "10.1", "10.2", ], 11: ["11.0", "11.1", "11.2", "11.3", "11.4", "11.5", "11.6", "11.7", "11.8"], 12: ["12.0", "12.1"], } def assert_no_cuda_mismatch(name=""): cuda_major, cuda_minor = installed_cuda_version(name) sys_cuda_version = f'{cuda_major}.{cuda_minor}' torch_cuda_version = ".".join(torch.version.cuda.split('.')[:2]) # This is a show-stopping error, should probably not proceed past this if sys_cuda_version != torch_cuda_version: if (cuda_major in cuda_minor_mismatch_ok and sys_cuda_version in cuda_minor_mismatch_ok[cuda_major] and torch_cuda_version in cuda_minor_mismatch_ok[cuda_major]): print(f"Installed CUDA version {sys_cuda_version} does not match the " f"version torch was compiled with {torch.version.cuda} " "but since the APIs are compatible, accepting this combination") return True elif os.getenv("DS_SKIP_CUDA_CHECK", "0") == "1": print( f"{WARNING} DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the " f"version torch was compiled with {torch.version.cuda}." "Detected `DS_SKIP_CUDA_CHECK=1`: Allowing this combination of CUDA, but it may result in unexpected behavior." ) return True raise Exception(f">- DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the " f"version torch was compiled with {torch.version.cuda}, unable to compile " "cuda/cpp extensions without a matching cuda version.") return True class OpBuilder(ABC): _rocm_version = None _is_rocm_pytorch = None def __init__(self, name): self.name = name self.jit_mode = False self.build_for_cpu = False self.enable_bf16 = False self.error_log = None @abstractmethod def absolute_name(self): ''' Returns absolute build path for cases where the op is pre-installed, e.g., deepspeed.ops.adam.cpu_adam will be installed as something like: deepspeed/ops/adam/cpu_adam.so ''' pass @abstractmethod def sources(self): ''' Returns list of source files for your op, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed) ''' pass def hipify_extension(self): pass @staticmethod def validate_torch_version(torch_info): install_torch_version = torch_info['version'] current_torch_version = ".".join(torch.__version__.split('.')[:2]) if install_torch_version != current_torch_version: raise RuntimeError("PyTorch version mismatch! DeepSpeed ops were compiled and installed " "with a different version than what is being used at runtime. " f"Please re-install DeepSpeed or switch torch versions. " f"Install torch version={install_torch_version}, " f"Runtime torch version={current_torch_version}") @staticmethod def validate_torch_op_version(torch_info): if not OpBuilder.is_rocm_pytorch(): current_cuda_version = ".".join(torch.version.cuda.split('.')[:2]) install_cuda_version = torch_info['cuda_version'] if install_cuda_version != current_cuda_version: raise RuntimeError("CUDA version mismatch! DeepSpeed ops were compiled and installed " "with a different version than what is being used at runtime. " f"Please re-install DeepSpeed or switch torch versions. " f"Install CUDA version={install_cuda_version}, " f"Runtime CUDA version={current_cuda_version}") else: current_hip_version = ".".join(torch.version.hip.split('.')[:2]) install_hip_version = torch_info['hip_version'] if install_hip_version != current_hip_version: raise RuntimeError("HIP version mismatch! DeepSpeed ops were compiled and installed " "with a different version than what is being used at runtime. " f"Please re-install DeepSpeed or switch torch versions. " f"Install HIP version={install_hip_version}, " f"Runtime HIP version={current_hip_version}") @staticmethod def is_rocm_pytorch(): if OpBuilder._is_rocm_pytorch is not None: return OpBuilder._is_rocm_pytorch _is_rocm_pytorch = False try: import torch except ImportError: pass else: if TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 5): _is_rocm_pytorch = hasattr(torch.version, 'hip') and torch.version.hip is not None if _is_rocm_pytorch: from torch.utils.cpp_extension import ROCM_HOME _is_rocm_pytorch = ROCM_HOME is not None OpBuilder._is_rocm_pytorch = _is_rocm_pytorch return OpBuilder._is_rocm_pytorch @staticmethod def installed_rocm_version(): if OpBuilder._rocm_version: return OpBuilder._rocm_version ROCM_MAJOR = '0' ROCM_MINOR = '0' if OpBuilder.is_rocm_pytorch(): from torch.utils.cpp_extension import ROCM_HOME rocm_ver_file = Path(ROCM_HOME).joinpath(".info/version-dev") if rocm_ver_file.is_file(): with open(rocm_ver_file, 'r') as file: ROCM_VERSION_DEV_RAW = file.read() elif "rocm" in torch.__version__: ROCM_VERSION_DEV_RAW = torch.__version__.split("rocm")[1] else: assert False, "Could not detect ROCm version" assert ROCM_VERSION_DEV_RAW != "", "Could not detect ROCm version" ROCM_MAJOR = ROCM_VERSION_DEV_RAW.split('.')[0] ROCM_MINOR = ROCM_VERSION_DEV_RAW.split('.')[1] OpBuilder._rocm_version = (int(ROCM_MAJOR), int(ROCM_MINOR)) return OpBuilder._rocm_version def include_paths(self): ''' Returns list of include paths, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed) ''' return [] def nvcc_args(self): ''' Returns optional list of compiler flags to forward to nvcc when building CUDA sources ''' return [] def cxx_args(self): ''' Returns optional list of compiler flags to forward to the build ''' return [] def is_compatible(self, verbose=True): ''' Check if all non-python dependencies are satisfied to build this op ''' return True def extra_ldflags(self): return [] def libraries_installed(self, libraries): valid = False check_cmd = 'dpkg -l' for lib in libraries: result = subprocess.Popen(f'dpkg -l {lib}', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) valid = valid or result.wait() == 0 return valid def has_function(self, funcname, libraries, verbose=False): ''' Test for existence of a function within a tuple of libraries. This is used as a smoke test to check whether a certain library is available. As a test, this creates a simple C program that calls the specified function, and then distutils is used to compile that program and link it with the specified libraries. Returns True if both the compile and link are successful, False otherwise. ''' tempdir = None # we create a temporary directory to hold various files filestderr = None # handle to open file to which we redirect stderr oldstderr = None # file descriptor for stderr try: # Echo compile and link commands that are used. if verbose: distutils.log.set_verbosity(1) # Create a compiler object. compiler = distutils.ccompiler.new_compiler(verbose=verbose) # Configure compiler and linker to build according to Python install. distutils.sysconfig.customize_compiler(compiler) # Create a temporary directory to hold test files. tempdir = tempfile.mkdtemp() # Define a simple C program that calls the function in question prog = "void %s(void); int main(int argc, char** argv) { %s(); return 0; }" % (funcname, funcname) # Write the test program to a file. filename = os.path.join(tempdir, 'test.c') with open(filename, 'w') as f: f.write(prog) # Redirect stderr file descriptor to a file to silence compile/link warnings. if not verbose: filestderr = open(os.path.join(tempdir, 'stderr.txt'), 'w') oldstderr = os.dup(sys.stderr.fileno()) os.dup2(filestderr.fileno(), sys.stderr.fileno()) # Workaround for behavior in distutils.ccompiler.CCompiler.object_filenames() # Otherwise, a local directory will be used instead of tempdir drive, driveless_filename = os.path.splitdrive(filename) root_dir = driveless_filename[0] if os.path.isabs(driveless_filename) else '' output_dir = os.path.join(drive, root_dir) # Attempt to compile the C program into an object file. cflags = shlex.split(os.environ.get('CFLAGS', "")) objs = compiler.compile([filename], output_dir=output_dir, extra_preargs=self.strip_empty_entries(cflags)) # Attempt to link the object file into an executable. # Be sure to tack on any libraries that have been specified. ldflags = shlex.split(os.environ.get('LDFLAGS', "")) compiler.link_executable(objs, os.path.join(tempdir, 'a.out'), extra_preargs=self.strip_empty_entries(ldflags), libraries=libraries) # Compile and link succeeded return True except CompileError: return False except LinkError: return False except: return False finally: # Restore stderr file descriptor and close the stderr redirect file. if oldstderr is not None: os.dup2(oldstderr, sys.stderr.fileno()) if filestderr is not None: filestderr.close() # Delete the temporary directory holding the test program and stderr files. if tempdir is not None: shutil.rmtree(tempdir) def strip_empty_entries(self, args): ''' Drop any empty strings from the list of compile and link flags ''' return [x for x in args if len(x) > 0] def cpu_arch(self): try: from cpuinfo import get_cpu_info except ImportError as e: cpu_info = self._backup_cpuinfo() if cpu_info is None: return "-march=native" try: cpu_info = get_cpu_info() except Exception as e: self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), " "falling back to `lscpu` to get this information.") cpu_info = self._backup_cpuinfo() if cpu_info is None: return "-march=native" if cpu_info['arch'].startswith('PPC_'): # gcc does not provide -march on PowerPC, use -mcpu instead return '-mcpu=native' return '-march=native' def is_cuda_enable(self): try: assert_no_cuda_mismatch(self.name) return '-D__ENABLE_CUDA__' except BaseException: print(f"{WARNING} {self.name} cuda is missing or is incompatible with installed torch, " "only cpu ops can be compiled!") return '-D__DISABLE_CUDA__' return '-D__DISABLE_CUDA__' def _backup_cpuinfo(self): # Construct cpu_info dict from lscpu that is similar to what py-cpuinfo provides if not self.command_exists('lscpu'): self.warning(f"{self.name} attempted to query 'lscpu' after failing to use py-cpuinfo " "to detect the CPU architecture. 'lscpu' does not appear to exist on " "your system, will fall back to use -march=native and non-vectorized execution.") return None result = subprocess.check_output('lscpu', shell=True) result = result.decode('utf-8').strip().lower() cpu_info = {} cpu_info['arch'] = None cpu_info['flags'] = "" if 'genuineintel' in result or 'authenticamd' in result: cpu_info['arch'] = 'X86_64' if 'avx512' in result: cpu_info['flags'] += 'avx512,' elif 'avx512f' in result: cpu_info['flags'] += 'avx512f,' if 'avx2' in result: cpu_info['flags'] += 'avx2' elif 'ppc64le' in result: cpu_info['arch'] = "PPC_" return cpu_info def simd_width(self): try: from cpuinfo import get_cpu_info except ImportError as e: cpu_info = self._backup_cpuinfo() if cpu_info is None: return '-D__SCALAR__' try: cpu_info = get_cpu_info() except Exception as e: self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), " "falling back to `lscpu` to get this information.") cpu_info = self._backup_cpuinfo() if cpu_info is None: return '-D__SCALAR__' if cpu_info['arch'] == 'X86_64': if 'avx512' in cpu_info['flags'] or 'avx512f' in cpu_info['flags']: return '-D__AVX512__' elif 'avx2' in cpu_info['flags']: return '-D__AVX256__' return '-D__SCALAR__' def command_exists(self, cmd): if '|' in cmd: cmds = cmd.split("|") else: cmds = [cmd] valid = False for cmd in cmds: result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True) valid = valid or result.wait() == 0 if not valid and len(cmds) > 1: print(f"{WARNING} {self.name} requires one of the following commands '{cmds}', but it does not exist!") elif not valid and len(cmds) == 1: print(f"{WARNING} {self.name} requires the '{cmd}' command, but it does not exist!") return valid def warning(self, msg): self.error_log = f"{msg}" print(f"{WARNING} {msg}") def deepspeed_src_path(self, code_path): if os.path.isabs(code_path): return code_path else: return os.path.join(Path(__file__).parent.parent.absolute(), code_path) def builder(self): from torch.utils.cpp_extension import CppExtension return CppExtension(name=self.absolute_name(), sources=self.strip_empty_entries(self.sources()), include_dirs=self.strip_empty_entries(self.include_paths()), extra_compile_args={'cxx': self.strip_empty_entries(self.cxx_args())}, extra_link_args=self.strip_empty_entries(self.extra_ldflags())) def load(self, verbose=True): from deepspeed.git_version_info import installed_ops, torch_info if installed_ops.get(self.name, False): # Ensure the op we're about to load was compiled with the same # torch/cuda versions we are currently using at runtime. self.validate_torch_version(torch_info) if torch.cuda.is_available() and isinstance(self, CUDAOpBuilder): self.validate_torch_op_version(torch_info) return importlib.import_module(self.absolute_name()) else: return self.jit_load(verbose) def jit_load(self, verbose=True): if not self.is_compatible(verbose): raise RuntimeError( f"Unable to JIT load the {self.name} op due to it not being compatible due to hardware/software issue. {self.error_log}" ) try: import ninja # noqa: F401 except ImportError: raise RuntimeError(f"Unable to JIT load the {self.name} op due to ninja not being installed.") if isinstance(self, CUDAOpBuilder) and not self.is_rocm_pytorch(): try: assert_no_cuda_mismatch(self.name) self.build_for_cpu = False except BaseException: self.build_for_cpu = True self.jit_mode = True from torch.utils.cpp_extension import load start_build = time.time() sources = [self.deepspeed_src_path(path) for path in self.sources()] extra_include_paths = [self.deepspeed_src_path(path) for path in self.include_paths()] # Torch will try and apply whatever CCs are in the arch list at compile time, # we have already set the intended targets ourselves we know that will be # needed at runtime. This prevents CC collisions such as multiple __half # implementations. Stash arch list to reset after build. torch_arch_list = None if "TORCH_CUDA_ARCH_LIST" in os.environ: torch_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST") os.environ["TORCH_CUDA_ARCH_LIST"] = "" nvcc_args = self.strip_empty_entries(self.nvcc_args()) cxx_args = self.strip_empty_entries(self.cxx_args()) if isinstance(self, CUDAOpBuilder): if not self.build_for_cpu and self.enable_bf16: cxx_args.append("-DBF16_AVAILABLE") nvcc_args.append("-DBF16_AVAILABLE") op_module = load(name=self.name, sources=self.strip_empty_entries(sources), extra_include_paths=self.strip_empty_entries(extra_include_paths), extra_cflags=cxx_args, extra_cuda_cflags=nvcc_args, extra_ldflags=self.strip_empty_entries(self.extra_ldflags()), verbose=verbose) build_duration = time.time() - start_build if verbose: print(f"Time to load {self.name} op: {build_duration} seconds") # Reset arch list so we are not silently removing it for other possible use cases if torch_arch_list: os.environ["TORCH_CUDA_ARCH_LIST"] = torch_arch_list return op_module class CUDAOpBuilder(OpBuilder): def compute_capability_args(self, cross_compile_archs=None): """ Returns nvcc compute capability compile flags. 1. `TORCH_CUDA_ARCH_LIST` takes priority over `cross_compile_archs`. 2. If neither is set default compute capabilities will be used 3. Under `jit_mode` compute capabilities of all visible cards will be used plus PTX Format: - `TORCH_CUDA_ARCH_LIST` may use ; or whitespace separators. Examples: TORCH_CUDA_ARCH_LIST="6.1;7.5;8.6" pip install ... TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6+PTX" pip install ... - `cross_compile_archs` uses ; separator. """ ccs = [] if self.jit_mode: # Compile for underlying architectures since we know those at runtime for i in range(torch.cuda.device_count()): CC_MAJOR, CC_MINOR = torch.cuda.get_device_capability(i) cc = f"{CC_MAJOR}.{CC_MINOR}" if cc not in ccs: ccs.append(cc) ccs = sorted(ccs) ccs[-1] += '+PTX' else: # Cross-compile mode, compile for various architectures # env override takes priority cross_compile_archs_env = os.environ.get('TORCH_CUDA_ARCH_LIST', None) if cross_compile_archs_env is not None: if cross_compile_archs is not None: print( f"{WARNING} env var `TORCH_CUDA_ARCH_LIST={cross_compile_archs_env}` overrides `cross_compile_archs={cross_compile_archs}`" ) cross_compile_archs = cross_compile_archs_env.replace(' ', ';') else: if cross_compile_archs is None: cross_compile_archs = get_default_compute_capabilities() ccs = cross_compile_archs.split(';') ccs = self.filter_ccs(ccs) if len(ccs) == 0: raise RuntimeError( f"Unable to load {self.name} op due to no compute capabilities remaining after filtering") args = [] self.enable_bf16 = True for cc in ccs: num = cc[0] + cc[2] args.append(f'-gencode=arch=compute_{num},code=sm_{num}') if cc.endswith('+PTX'): args.append(f'-gencode=arch=compute_{num},code=compute_{num}') if int(cc[0]) <= 7: self.enable_bf16 = False return args def filter_ccs(self, ccs: List[str]): """ Prune any compute capabilities that are not compatible with the builder. Should log which CCs have been pruned. """ return ccs def version_dependent_macros(self): # Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456 version_ge_1_1 = [] if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0): version_ge_1_1 = ['-DVERSION_GE_1_1'] version_ge_1_3 = [] if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2): version_ge_1_3 = ['-DVERSION_GE_1_3'] version_ge_1_5 = [] if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4): version_ge_1_5 = ['-DVERSION_GE_1_5'] return version_ge_1_1 + version_ge_1_3 + version_ge_1_5 def is_compatible(self, verbose=True): return super().is_compatible(verbose) def builder(self): try: if not self.is_rocm_pytorch(): assert_no_cuda_mismatch(self.name) self.build_for_cpu = False except BaseException: self.build_for_cpu = True if self.build_for_cpu: from torch.utils.cpp_extension import CppExtension as ExtensionBuilder else: from torch.utils.cpp_extension import CUDAExtension as ExtensionBuilder compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} if self.build_for_cpu else \ {'cxx': self.strip_empty_entries(self.cxx_args()), \ 'nvcc': self.strip_empty_entries(self.nvcc_args())} if not self.build_for_cpu and self.enable_bf16: compile_args['cxx'].append("-DBF16_AVAILABLE") cuda_ext = ExtensionBuilder(name=self.absolute_name(), sources=self.strip_empty_entries(self.sources()), include_dirs=self.strip_empty_entries(self.include_paths()), libraries=self.strip_empty_entries(self.libraries_args()), extra_compile_args=compile_args, extra_link_args=self.strip_empty_entries(self.extra_ldflags())) if self.is_rocm_pytorch(): # hip converts paths to absolute, this converts back to relative sources = cuda_ext.sources curr_file = Path(__file__).parent.parent # ds root for i in range(len(sources)): src = Path(sources[i]) if src.is_absolute(): sources[i] = str(src.relative_to(curr_file)) else: sources[i] = str(src) cuda_ext.sources = sources return cuda_ext def hipify_extension(self): if self.is_rocm_pytorch(): from torch.utils.hipify import hipify_python hipify_python.hipify( project_directory=os.getcwd(), output_directory=os.getcwd(), header_include_dirs=self.include_paths(), includes=[os.path.join(os.getcwd(), '*')], extra_files=[os.path.abspath(s) for s in self.sources()], show_detailed=True, is_pytorch_extension=True, hipify_extra_files_only=True, ) def cxx_args(self): if sys.platform == "win32": return ['-O2'] else: return ['-O3', '-std=c++17', '-g', '-Wno-reorder'] def nvcc_args(self): if self.build_for_cpu: return [] args = ['-O3'] if self.is_rocm_pytorch(): ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version() args += [ '-std=c++17', '-U__HIP_NO_HALF_OPERATORS__', '-U__HIP_NO_HALF_CONVERSIONS__', '-U__HIP_NO_HALF2_OPERATORS__', '-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR, '-DROCM_VERSION_MINOR=%s' % ROCM_MINOR ] else: cuda_major, _ = installed_cuda_version() args += [ '-allow-unsupported-compiler' if sys.platform == "win32" else '', '--use_fast_math', '-std=c++17' if cuda_major > 10 else '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__' ] if os.environ.get('DS_DEBUG_CUDA_BUILD', '0') == '1': args.append('--ptxas-options=-v') args += self.compute_capability_args() return args def libraries_args(self): if self.build_for_cpu: return [] if sys.platform == "win32": return ['cublas', 'curand'] else: return [] class TorchCPUOpBuilder(CUDAOpBuilder): def extra_ldflags(self): if self.build_for_cpu: return ['-fopenmp'] if not self.is_rocm_pytorch(): return ['-lcurand'] return [] def cxx_args(self): import torch args = [] if not self.build_for_cpu: if not self.is_rocm_pytorch(): CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib64") else: CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.ROCM_HOME, "lib") args += super().cxx_args() args += [ f'-L{CUDA_LIB64}', '-lcudart', '-lcublas', '-g', ] CPU_ARCH = self.cpu_arch() SIMD_WIDTH = self.simd_width() CUDA_ENABLE = self.is_cuda_enable() args += [ CPU_ARCH, '-fopenmp', SIMD_WIDTH, CUDA_ENABLE, ] return args
29,737
39.459864
147
py
DeepSpeed
DeepSpeed-master/op_builder/spatial_inference.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .builder import CUDAOpBuilder, installed_cuda_version class SpatialInferenceBuilder(CUDAOpBuilder): BUILD_VAR = "DS_BUILD_SPATIAL_INFERENCE" NAME = "spatial_inference" def __init__(self, name=None): name = self.NAME if name is None else name super().__init__(name=name) def absolute_name(self): return f'deepspeed.ops.spatial.{self.NAME}_op' def is_compatible(self, verbose=True): try: import torch except ImportError: self.warning("Please install torch if trying to pre-compile inference kernels") return False cuda_okay = True if not self.is_rocm_pytorch() and torch.cuda.is_available(): sys_cuda_major, _ = installed_cuda_version() torch_cuda_major = int(torch.version.cuda.split('.')[0]) cuda_capability = torch.cuda.get_device_properties(0).major if cuda_capability >= 8: if torch_cuda_major < 11 or sys_cuda_major < 11: self.warning("On Ampere and higher architectures please use CUDA 11+") cuda_okay = False return super().is_compatible(verbose) and cuda_okay def sources(self): return [ 'csrc/spatial/csrc/opt_bias_add.cu', 'csrc/spatial/csrc/pt_binding.cpp', ] def include_paths(self): return ['csrc/spatial/includes', 'csrc/includes']
1,534
32.369565
91
py
DeepSpeed
DeepSpeed-master/op_builder/sparse_attn.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .builder import OpBuilder try: from packaging import version as pkg_version except ImportError: pkg_version = None class SparseAttnBuilder(OpBuilder): BUILD_VAR = "DS_BUILD_SPARSE_ATTN" NAME = "sparse_attn" def __init__(self): super().__init__(name=self.NAME) def absolute_name(self): return f'deepspeed.ops.sparse_attention.{self.NAME}_op' def sources(self): return ['csrc/sparse_attention/utils.cpp'] def cxx_args(self): return ['-O2', '-fopenmp'] def is_compatible(self, verbose=True): # Check to see if llvm and cmake are installed since they are dependencies #required_commands = ['llvm-config|llvm-config-9', 'cmake'] #command_status = list(map(self.command_exists, required_commands)) #deps_compatible = all(command_status) if self.is_rocm_pytorch(): self.warning(f'{self.NAME} is not compatible with ROCM') return False try: import torch except ImportError: self.warning(f"unable to import torch, please install it first") return False # torch-cpu will not have a cuda version if torch.version.cuda is None: cuda_compatible = False self.warning(f"{self.NAME} cuda is not available from torch") else: major, minor = torch.version.cuda.split('.')[:2] cuda_compatible = (int(major) == 10 and int(minor) >= 1) or (int(major) >= 11) if not cuda_compatible: self.warning(f"{self.NAME} requires CUDA version 10.1+") TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) torch_compatible = (TORCH_MAJOR == 1 and TORCH_MINOR >= 5) if not torch_compatible: self.warning( f'{self.NAME} requires a torch version >= 1.5 and < 2.0 but detected {TORCH_MAJOR}.{TORCH_MINOR}') try: import triton except ImportError: # auto-install of triton is broken on some systems, reverting to manual install for now # see this issue: https://github.com/microsoft/DeepSpeed/issues/1710 self.warning(f"please install triton==1.0.0 if you want to use sparse attention") return False if pkg_version: installed_triton = pkg_version.parse(triton.__version__) triton_mismatch = installed_triton != pkg_version.parse("1.0.0") else: installed_triton = triton.__version__ triton_mismatch = installed_triton != "1.0.0" if triton_mismatch: self.warning(f"using untested triton version ({installed_triton}), only 1.0.0 is known to be compatible") return False return super().is_compatible(verbose) and torch_compatible and cuda_compatible
2,994
35.084337
117
py
DeepSpeed
DeepSpeed-master/op_builder/quantizer.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .builder import CUDAOpBuilder class QuantizerBuilder(CUDAOpBuilder): BUILD_VAR = "DS_BUILD_QUANTIZER" NAME = "quantizer" def __init__(self, name=None): name = self.NAME if name is None else name super().__init__(name=name) def absolute_name(self): return f'deepspeed.ops.quantizer.{self.NAME}_op' def sources(self): return [ 'csrc/quantization/pt_binding.cpp', 'csrc/quantization/fake_quantizer.cu', 'csrc/quantization/quantize.cu', 'csrc/quantization/dequantize.cu', 'csrc/quantization/swizzled_quantize.cu', 'csrc/quantization/quant_reduce.cu', ] def include_paths(self): return ['csrc/includes'] def extra_ldflags(self): return ['-lcurand']
908
24.971429
56
py
DeepSpeed
DeepSpeed-master/op_builder/cpu/comm.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os from .builder import CPUOpBuilder class CCLCommBuilder(CPUOpBuilder): BUILD_VAR = "DS_BUILD_CCL_COMM" NAME = "deepspeed_ccl_comm" def __init__(self, name=None): name = self.NAME if name is None else name super().__init__(name=name) def absolute_name(self): return f'deepspeed.ops.comm.{self.NAME}_op' def sources(self): return ['csrc/cpu/comm/ccl.cpp'] def include_paths(self): includes = ['csrc/cpu/includes'] return includes def is_compatible(self, verbose=True): # TODO: add soft compatibility check for private binary release. # a soft check, as in we know it can be trivially changed. return super().is_compatible(verbose) def extra_ldflags(self): ccl_root_path = os.environ.get("CCL_ROOT") if ccl_root_path == None: raise ValueError( "Didn't find CCL_ROOT, install oneCCL from https://github.com/oneapi-src/oneCCL and source its environment variable" ) return [] else: return ['-lccl', f'-L{ccl_root_path}/lib']
1,225
28.190476
132
py
DeepSpeed
DeepSpeed-master/op_builder/cpu/__init__.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team '''Copyright The Microsoft DeepSpeed Team''' from .comm import CCLCommBuilder from .no_impl import NotImplementedBuilder
217
23.222222
44
py
DeepSpeed
DeepSpeed-master/op_builder/cpu/builder.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team try: # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed # if successful this also means we're doing a local install and not JIT compile path from op_builder import __deepspeed__ # noqa: F401 from op_builder.builder import OpBuilder except ImportError: from deepspeed.ops.op_builder.builder import OpBuilder class CPUOpBuilder(OpBuilder): def builder(self): from torch.utils.cpp_extension import CppExtension as ExtensionBuilder compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} cpp_ext = ExtensionBuilder(name=self.absolute_name(), sources=self.strip_empty_entries(self.sources()), include_dirs=self.strip_empty_entries(self.include_paths()), libraries=self.strip_empty_entries(self.libraries_args()), extra_compile_args=compile_args) return cpp_ext def cxx_args(self): return ['-O3', '-g', '-Wno-reorder'] def libraries_args(self): return []
1,224
34
95
py
DeepSpeed
DeepSpeed-master/op_builder/cpu/no_impl.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .builder import CPUOpBuilder class NotImplementedBuilder(CPUOpBuilder): BUILD_VAR = "DS_BUILD_NOT_IMPLEMENTED" NAME = "deepspeed_not_implemented" def __init__(self, name=None): name = self.NAME if name is None else name super().__init__(name=name) def absolute_name(self): return f'deepspeed.ops.comm.{self.NAME}_op' def load(self, verbose=True): raise ValueError("This op had not been implemented on CPU backend.") def sources(self): return []
616
23.68
76
py
DeepSpeed
DeepSpeed-master/scripts/check-torchcuda.py
#!/usr/bin/env python3 # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from __future__ import annotations '''Copyright The Microsoft DeepSpeed Team''' """ Checks each file in sys.argv for the string "torch.cuda". Modified from https://github.com/jlebar/pre-commit-hooks/blob/master/check_do_not_submit.py """ import subprocess import sys def err(s: str) -> None: print(s, file=sys.stderr) # There are many ways we could search for the string "torch.cuda", but `git # grep --no-index` is nice because # - it's very fast (as compared to iterating over the file in Python) # - we can reasonably assume it's available on all machines # - unlike plain grep, which is slower and has different flags on MacOS versus # Linux, git grep is always the same. res = subprocess.run( ["git", "grep", "-Hn", "--no-index", "-e", r"torch\.cuda", "--and", "--not", "-e", "#ignore-cuda", *sys.argv[1:]], capture_output=True, ) if res.returncode == 0: err('Error: The string "torch.cuda" was found.\nPlease replace all calls to torch.cuda with "get_accelerator()" and add the following import line:\n\n from deepspeed.accelerator import get_accelerator\n\nIf your code is mean to be cuda specific, please add the following comment in the line with torch.cuda:\n\n #ignore-cuda\n' ) err(res.stdout.decode("utf-8")) sys.exit(1) elif res.returncode == 2: err(f"Error invoking grep on {', '.join(sys.argv[1:])}:") err(res.stderr.decode("utf-8")) sys.exit(2) res = subprocess.run( ["git", "grep", "-Hn", "--no-index", r"\.cuda()", *sys.argv[1:]], capture_output=True, ) if res.returncode == 0: err('Error: The string ".cuda()" was found. This implies convert a tensor to cuda tensor. Please replace all calls to tensor.cuda() with "tensor.to(get_accelerator().device_name())" and add the following import line:\nfrom deepspeed.accelerator import get_accelerator' ) err(res.stdout.decode("utf-8")) sys.exit(1) elif res.returncode == 2: err(f"Error invoking grep on {', '.join(sys.argv[1:])}:") err(res.stderr.decode("utf-8")) sys.exit(2) files = [] for file in sys.argv[1:]: if not file.endswith(".cpp"): files.append(file) res = subprocess.run( ["git", "grep", "-Hn", "--no-index", r"\.is_cuda", *files], capture_output=True, ) if res.returncode == 0: err(''' Error: The string ".is_cuda" was found. This implies checking if a tensor is a cuda tensor. Please replace all calls to "tensor.is_cuda" with "get_accelerator().on_accelerator(tensor)", and add the following import line: 'from deepspeed.accelerator import get_accelerator' ''') err(res.stdout.decode("utf-8")) sys.exit(1) elif res.returncode == 2: err(f"Error invoking grep on {', '.join(files)}:") err(res.stderr.decode("utf-8")) sys.exit(2)
2,895
36.128205
337
py
DeepSpeed
DeepSpeed-master/scripts/check-license.py
#!/usr/bin/env python3 # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from __future__ import annotations '''Copyright The Microsoft DeepSpeed Team''' """ Modified from https://github.com/jlebar/pre-commit-hooks/blob/master/check_do_not_submit.py """ import subprocess import sys def err(s: str) -> None: print(s, file=sys.stderr) COPYRIGHT = [ r"^\(\/\/\|#\) Copyright (c) Microsoft Corporation.$", r"^\(\/\/\|#\) SPDX-License-Identifier: Apache-2.0$", r"^\(\/\/\|#\) DeepSpeed Team$" ] success = True failures = [] for f in sys.argv[1:]: for copyright_line in COPYRIGHT: if not success: break res = subprocess.run(["git", "grep", "--quiet", "-e", copyright_line, f], capture_output=True) if res.returncode == 1: success = False failures.append(f) elif res.returncode == 2: err(f"Error invoking grep on {', '.join(sys.argv[1:])}:") err(res.stderr.decode("utf-8")) sys.exit(2) if not success: err(f'{failures}: Missing license at top of file') err(res.stdout.decode("utf-8")) sys.exit(1)
1,175
25.133333
112
py
DeepSpeed
DeepSpeed-master/scripts/replace_copyright.py
#!/usr/bin/env python3 # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ USAGE: $ python3 script/replace_copyright.py --repo_dir ./ """ import os import argparse NEW_COPYRIGHT = ("Copyright (c) Microsoft Corporation.", "SPDX-License-Identifier: Apache-2.0", "", "DeepSpeed Team") PY_SL_COMMENT = "#" PY_ML_SINGLE = "'''" PY_ML_DOUBLE = '"""' PY_COMMENTS = (PY_SL_COMMENT, PY_ML_SINGLE, PY_ML_DOUBLE) C_SL_COMMENT = "//" C_ML_OPEN = "/*" C_ML_CLOSE = "*/" C_COMMENTS = (C_SL_COMMENT, C_ML_OPEN, C_ML_CLOSE) BASH_SL_COMMENT = "#" BASH_COMMENTS = (BASH_SL_COMMENT, ) DELIM = "|/-\|/-\|BARRIER|/-\|/-\|" # noqa: W605 def parser_args(): parser = argparse.ArgumentParser() parser.add_argument("--repo_dir", type=str, help="Repository directory") parser.add_argument("--python_style_ext", type=str, nargs="+", default=[".py"], help="File types to process with python-style comments") parser.add_argument("--bash_style_ext", type=str, nargs="+", default=[".sh"], help="File types to process with bash-style comments") parser.add_argument("--c_style_ext", type=str, nargs="+", default=[ ".c", ".cpp", ".cu", ".h", ".hpp", ".cuh", ".cc", ".hip", ".tr", ], help="File types to process with C-style comments") args = parser.parse_args() return args # These get_header_* functions are ugly, but they work :) def get_header_py(fp): with open(fp, "r") as f: lines = iter(l for l in f.readlines()) header = [] rest = [] in_multiline = False multiline_type = None while (l := next(lines, None)) is not None: l = l.strip() if l.startswith(PY_ML_SINGLE) or l.startswith(PY_ML_DOUBLE): # Detected multiline comment if in_multiline and multiline_type == l[:3]: # Ended a multiline comment in_multiline = False else: # Started a multiline comment in_multiline = True multiline_type = l[:3] if l.endswith(multiline_type) and len(l) >= 6: # Opened and closed multiline comment on single line in_multiline = False elif in_multiline and l.endswith(multiline_type): # Ended a multiline comment in_multiline = False elif not (in_multiline or l.startswith(PY_SL_COMMENT) or l == ""): # Not in a comment rest += [l + "\n"] break header.append(l) rest += list(lines) return header, rest def get_header_c(fp): with open(fp, "r") as f: lines = iter(l for l in f.readlines()) header = [] rest = [] in_multiline = False while (l := next(lines, None)) is not None: l = l.strip() if l.startswith(C_ML_OPEN): # Detected multiline comment if not l.endswith(C_ML_CLOSE): # multiline comment not closed on same line in_multiline = True elif l.endswith(C_ML_CLOSE): # Ended a multiline comment in_multiline = False elif not in_multiline or l.startswith(C_SL_COMMENT) or l.isspace(): # Not in a comment rest += [l + "\n"] break header.append(l) rest += list(lines) return header, rest def get_header_bash(fp): with open(fp, "r") as f: lines = iter(l for l in f.readlines()) header = [] rest = [] while (l := next(lines, None)) is not None: l = l.strip() if not l.startswith(BASH_SL_COMMENT) or l.isspace(): # Not in a comment rest += [l + "\n"] break header.append(l) rest += list(lines) return header, rest def remove_comments(line, comment_strs): for cstr in comment_strs: line = line.replace(cstr, "") return line def format_multiline_comment(text, comment_type): if comment_type == PY_COMMENTS: text = f"\n{comment_type[2]}\n" + "\n".join(text) + f"{comment_type[2]}" if comment_type == C_COMMENTS: text = f"\n{comment_type[1]}\n" + "\n".join(text) + f"{comment_type[2]}" if comment_type == BASH_COMMENTS: text = "\n".join([f"{comment_type[0]}{l}" for l in text]) return text def modify_file_header(fp, file_header, rest_of_file, preserve_text_store, comment_type): header_text = "\n".join(file_header) if not (header_text.strip() == "" or header_text in preserve_text_store): # Unique header, need to get user input print("\n", DELIM, "\n") for idx, line in enumerate(file_header): print(f"{idx}: {line}") print("\n", DELIM, "\n") print("\nIndicate the FIRST line of the Header to KEEP") print("(shebang #! lines will be automatically processed and should not be included).") keep_idx = input("Enter number (or leave blank if no lines should be preserved): ") preserve_text_store[header_text] = file_header[int(keep_idx):] if keep_idx != "" else "" # Identify any shebang lines in the file shebang = "\n".join([l for l in file_header if l.startswith("#!")]) if shebang != "": shebang += "\n" # Get the text we should preserve in this file and process to remove comment characters text_to_preserve = preserve_text_store.get(header_text, [""]) text_to_preserve = [remove_comments(l, comment_type) for l in text_to_preserve] # Format the text we want to keep into a new multiline comment if "".join(text_to_preserve) == "": text_to_preserve = "" else: text_to_preserve = format_multiline_comment(text_to_preserve, comment_type) # Generate the copyright text we will be adding copyright_text = "\n".join([f"{comment_type[0]} {l}" if l != "" else l for l in NEW_COPYRIGHT]) # Assemble the new header new_header = shebang + copyright_text + text_to_preserve # Write out the new file new_file_contents = new_header + "\n" + "".join(rest_of_file) with open(fp, "w") as f: f.write(new_file_contents) return preserve_text_store # Return so we can reuse for future files def main(args): preserve_text_store = {} # Used to track header comments we should preserve for root, dirs, fnames in os.walk(args.repo_dir): # Walk across directory looking for all files with extensions we want to modify for ext in args.python_style_ext: fpaths = [os.path.join(root, fn) for fn in fnames if fn.endswith(ext)] for fp in fpaths: file_header, rest_of_file = get_header_py(fp) preserve_text_store = modify_file_header(fp, file_header, rest_of_file, preserve_text_store, PY_COMMENTS) for ext in args.c_style_ext: fpaths = [os.path.join(root, fn) for fn in fnames if fn.endswith(ext)] for fp in fpaths: file_header, rest_of_file = get_header_c(fp) preserve_text_store = modify_file_header(fp, file_header, rest_of_file, preserve_text_store, C_COMMENTS) for ext in args.bash_style_ext: fpaths = [os.path.join(root, fn) for fn in fnames if fn.endswith(ext)] for fp in fpaths: file_header, rest_of_file = get_header_bash(fp) preserve_text_store = modify_file_header(fp, file_header, rest_of_file, preserve_text_store, BASH_COMMENTS) if __name__ == "__main__": args = parser_args() main(args)
8,221
33.838983
117
py
DeepSpeed
DeepSpeed-master/scripts/check-torchdist.py
#!/usr/bin/env python3 # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from __future__ import annotations '''Copyright The Microsoft DeepSpeed Team''' """ Checks each file in sys.argv for the string "torch.distributed". Modified from https://github.com/jlebar/pre-commit-hooks/blob/master/check_do_not_submit.py """ import subprocess import sys def err(s: str) -> None: print(s, file=sys.stderr) # There are many ways we could search for the string "torch.distributed", but `git # grep --no-index` is nice because # - it's very fast (as compared to iterating over the file in Python) # - we can reasonably assume it's available on all machines # - unlike plain grep, which is slower and has different flags on MacOS versus # Linux, git grep is always the same. res = subprocess.run( ["git", "grep", "-Hn", "--no-index", r"torch\.distributed", *sys.argv[1:]], capture_output=True, ) if res.returncode == 0: err('Error: The string "torch.distributed" was found. Please replace all calls to torch.distributed with "deepspeed.comm"' ) err(res.stdout.decode("utf-8")) sys.exit(1) elif res.returncode == 2: err(f"Error invoking grep on {', '.join(sys.argv[1:])}:") err(res.stderr.decode("utf-8")) sys.exit(2)
1,306
30.878049
126
py
DeepSpeed
DeepSpeed-master/tests/conftest.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team # tests directory-specific settings - this file is run automatically by pytest before any tests are run import sys import pytest import os from os.path import abspath, dirname, join import torch import warnings # Set this environment variable for the T5 inference unittest(s) (e.g. google/t5-v1_1-small) os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python' # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. git_repo_path = abspath(join(dirname(dirname(__file__)), "src")) sys.path.insert(1, git_repo_path) def pytest_configure(config): config.option.color = "yes" config.option.durations = 0 config.option.durations_min = 1 config.option.verbose = True def pytest_addoption(parser): parser.addoption("--torch_ver", default=None, type=str) parser.addoption("--cuda_ver", default=None, type=str) def validate_version(expected, found): version_depth = expected.count('.') + 1 found = '.'.join(found.split('.')[:version_depth]) return found == expected @pytest.fixture(scope="session", autouse=True) def check_environment(pytestconfig): expected_torch_version = pytestconfig.getoption("torch_ver") expected_cuda_version = pytestconfig.getoption("cuda_ver") if expected_torch_version is None: warnings.warn( "Running test without verifying torch version, please provide an expected torch version with --torch_ver") elif not validate_version(expected_torch_version, torch.__version__): pytest.exit( f"expected torch version {expected_torch_version} did not match found torch version {torch.__version__}", returncode=2) if expected_cuda_version is None: warnings.warn( "Running test without verifying cuda version, please provide an expected cuda version with --cuda_ver") elif not validate_version(expected_cuda_version, torch.version.cuda): pytest.exit( f"expected cuda version {expected_cuda_version} did not match found cuda version {torch.version.cuda}", returncode=2) # Override of pytest "runtest" for DistributedTest class # This hook is run before the default pytest_runtest_call @pytest.hookimpl(tryfirst=True) def pytest_runtest_call(item): # We want to use our own launching function for distributed tests if getattr(item.cls, "is_dist_test", False): dist_test_class = item.cls() dist_test_class(item._request) item.runtest = lambda: True # Dummy function so test is not run twice # We allow DistributedTest to reuse distributed environments. When the last # test for a class is run, we want to make sure those distributed environments # are destroyed. def pytest_runtest_teardown(item, nextitem): if getattr(item.cls, "reuse_dist_env", False) and not nextitem: dist_test_class = item.cls() for num_procs, pool in dist_test_class._pool_cache.items(): dist_test_class._close_pool(pool, num_procs, force=True) @pytest.hookimpl(tryfirst=True) def pytest_fixture_setup(fixturedef, request): if getattr(fixturedef.func, "is_dist_fixture", False): dist_fixture_class = fixturedef.func() dist_fixture_class(request)
3,397
37.613636
118
py
DeepSpeed
DeepSpeed-master/tests/benchmarks/flatten_bench.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team #!/usr/bin/env python # run the benchmark under timeit (-t), cProfile (-c), line_profiler (-l) # # usage: # ./flatten_bench.py -t # ./flatten_bench.py -c # kernprof -l flatten_bench.py -l; python -m line_profiler flatten_bench.py.lprof import argparse import gc import torch from torch._utils import _flatten_dense_tensors from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import UtilsBuilder from apex_C import flatten as flatten_apex util_ops = UtilsBuilder().load() flatten = util_ops.flatten unflatten = util_ops.unflatten torch.manual_seed(0) # emulate a small typical model weights x = [ torch.rand((512, 512)).to(get_accelerator().device_name()), torch.rand((512, 1024)).to(get_accelerator().device_name()), torch.rand((512, 30000)).to(get_accelerator().device_name()) ] t = x * 30 # warm up and check that the same output is produced flat_py = _flatten_dense_tensors(t) flat_cpp = flatten(t) flat_apex = flatten_apex(t) #numel = flat_cpp.numel() assert torch.eq(flat_py, flat_cpp).all(), "both produce the same tensor" assert torch.eq(flat_py, flat_apex).all(), "both produce the same tensor" TIMES = 1000 # the programs being tested def py(): for i in range(TIMES): flat = _flatten_dense_tensors(t) def cpp(): for i in range(TIMES): flat = flatten(t) def apex(): for i in range(TIMES): flat = flatten_apex(t) #### cProfile #### import cProfile def cprofileme(): print("--------------- cProfile -----------------") print("py") cProfile.run("py()", sort=-1) gc.collect() get_accelerator().empty_cache() print("cpp") cProfile.run("cpp()", sort=-1) gc.collect() get_accelerator().empty_cache() print("apex") cProfile.run("apex()", sort=-1) gc.collect() get_accelerator().empty_cache() #### timeit #### import timeit def timeme(): print("--------------- timeit -----------------") print(f'py ={timeit.Timer("py()", globals=globals()).timeit(number=1)}') gc.collect() get_accelerator().empty_cache() print(f'cpp ={timeit.Timer("cpp()", globals=globals()).timeit(number=1)}') gc.collect() get_accelerator().empty_cache() print(f'apex={timeit.Timer("apex()", globals=globals()).timeit(number=1)}') gc.collect() get_accelerator().empty_cache() #### line_profiler #### # this one requires a special way to be called # pip install line_profiler # kernprof -l flatten_bench.py -l; python -m line_profiler flatten_bench.py.lprof def line_profileme(): print("--------------- line_profiler -----------------") print("py") profile(py)() # noqa: F821 gc.collect() get_accelerator().empty_cache() print("cpp") profile(cpp)() # noqa: F821 gc.collect() get_accelerator().empty_cache() print("apex") profile(apex)() # noqa: F821 gc.collect() get_accelerator().empty_cache() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-l", action='store_true') parser.add_argument("-c", action='store_true') parser.add_argument("-t", action='store_true') args = parser.parse_args() if args.l: line_profileme() elif args.c: cprofileme() elif args.t: timeme()
3,378
23.485507
82
py
DeepSpeed
DeepSpeed-master/tests/benchmarks/unflatten_bench.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team #!/usr/bin/env python # run the benchmark under timeit (-t), cProfile (-c), line_profiler (-l) # # usage: # ./unflatten_bench.py -t # ./unflatten_bench.py -c # kernprof -l unflatten_bench.py -l; python -m line_profiler unflatten_bench.py.lprof import argparse import gc import torch from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import UtilsBuilder from apex_C import flatten as flatten_apex from apex_C import unflatten as unflatten_apex util_ops = UtilsBuilder().load() flatten = util_ops.flatten unflatten = util_ops.unflatten torch.manual_seed(0) # emulate a small typical model weights x = [ torch.rand((512, 512)).to(get_accelerator().device_name()), torch.rand((512, 1024)).to(get_accelerator().device_name()), torch.rand((512, 30000)).to(get_accelerator().device_name()) ] unflat_t = x * 30 # warm up and check that the same output is produced flat_py = _flatten_dense_tensors(unflat_t) flat_cpp = flatten(unflat_t) flat_apex = flatten_apex(unflat_t) #numel = flat_cpp.numel() assert torch.eq(flat_py, flat_cpp).all(), "both produce the same tensor" assert torch.eq(flat_py, flat_apex).all(), "both produce the same tensor" flat_t = flat_py unflat_py = _unflatten_dense_tensors(flat_py, unflat_t) for i in range(len(unflat_t)): assert torch.eq(unflat_t[i], unflat_py[i]).all() unflat_cpp = _unflatten_dense_tensors(flat_cpp, unflat_t) for i in range(len(unflat_t)): assert torch.eq(unflat_t[i], unflat_cpp[i]).all() unflat_apex = _unflatten_dense_tensors(flat_apex, unflat_t) for i in range(len(unflat_t)): assert torch.eq(unflat_t[i], unflat_apex[i]).all() # the programs being tested def py(): for i in range(1000): unflat = _unflatten_dense_tensors(flat_t, unflat_t) def cpp(): for i in range(1000): unflat = unflatten(flat_t, unflat_t) def apex(): for i in range(1000): unflat = unflatten_apex(flat_t, unflat_t) #### cProfile #### import cProfile def cprofileme(): print("--------------- cProfile -----------------") print("py") cProfile.run("py()", sort=-1) gc.collect() get_accelerator().empty_cache() print("cpp") cProfile.run("cpp()", sort=-1) gc.collect() get_accelerator().empty_cache() print("apex") cProfile.run("apex()", sort=-1) gc.collect() get_accelerator().empty_cache() #### timeit #### import timeit def timeme(): print("--------------- timeit -----------------") print(f'py ={timeit.Timer("py()", globals=globals()).timeit(number=1)}') gc.collect() get_accelerator().empty_cache() print(f'cpp ={timeit.Timer("cpp()", globals=globals()).timeit(number=1)}') gc.collect() get_accelerator().empty_cache() print(f'apex={timeit.Timer("apex()", globals=globals()).timeit(number=1)}') gc.collect() get_accelerator().empty_cache() #### line_profiler #### # this one requires a special way to be called # pip install line_profiler # kernprof -l unflatten_bench.py -l; python -m line_profiler unflatten_bench.py.lprof def line_profileme(): print("--------------- line_profier -----------------") print("py") profile(py)() # noqa: F821 gc.collect() get_accelerator().empty_cache() print("cpp") profile(cpp)() # noqa: F821 gc.collect() get_accelerator().empty_cache() print("apex") profile(apex)() # noqa: F821 gc.collect() get_accelerator().empty_cache() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-l", action='store_true') parser.add_argument("-c", action='store_true') parser.add_argument("-t", action='store_true') args = parser.parse_args() if args.l: line_profileme() elif args.c: cprofileme() elif args.t: timeme()
3,975
26.047619
86
py
DeepSpeed
DeepSpeed-master/tests/accelerator/test_ds_init.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import torch import deepspeed from deepspeed.accelerator import get_accelerator class OneLayerNet(torch.nn.Module): def __init__(self, D_in, D_out): """ In the constructor we instantiate two nn.Linear modules and assign them as member variables. """ super(OneLayerNet, self).__init__() self.linear1 = torch.nn.Linear(D_in, D_out) def forward(self, x): """ In the forward function we accept a Variable of input data and we must return a Variable of output data. We can use Modules defined in the constructor as well as arbitrary operators on Variables. """ h_relu = self.linear1(x).clamp(min=0) y_pred = self.linear1(h_relu) return y_pred def test_literal_device(): model = OneLayerNet(128, 128) os.environ['RANK'] = '0' os.environ['WORLD_SIZE'] = '1' os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = '8088' os.environ['LOCAL_RANK'] = '0' deepspeed.init_distributed(get_accelerator().communication_backend_name()) deepspeed.initialize(model=model, config='ds_config.json') string = get_accelerator().device_name() #'xpu' or 'cuda' string0 = get_accelerator().device_name(0) #'xpu:0' or 'cuda:0' string1 = get_accelerator().device_name(1) #'xpu:1' or 'cuda:1' assert string == 'xpu' or string == 'cuda' assert string0 == 'xpu:0' or string0 == 'cuda:0' assert string1 == 'xpu:1' or string1 == 'cuda:1'
1,610
31.877551
85
py
DeepSpeed
DeepSpeed-master/tests/hybrid_engine/hybrid_engine_test.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch from transformers import AutoModelForCausalLM import deepspeed import argparse from deepspeed.accelerator import get_accelerator deepspeed.runtime.utils.see_memory_usage('pre test', force=True) model = AutoModelForCausalLM.from_pretrained('facebook/opt-350M').half().to(get_accelerator().device_name()) parser = argparse.ArgumentParser() parser = deepspeed.add_config_arguments(parser) args = parser.parse_args() deepspeed.runtime.utils.see_memory_usage('post test', force=True) m, _, _, _ = deepspeed.initialize(model=model, args=args, enable_hybrid_engine=True) m.eval() input = torch.ones(1, 16, device='cuda', dtype=torch.long) out = m(input) m.train() out = m(input) print(out['logits'], out['logits'].norm())
831
25.83871
108
py
DeepSpeed
DeepSpeed-master/tests/unit/modeling.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from __future__ import absolute_import, division, print_function, unicode_literals # Copyright The Microsoft DeepSpeed Team # DeepSpeed note, code taken from commit 3d59216cec89a363649b4fe3d15295ba936ced0f # https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/modeling.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model.""" import copy import json import logging import math import os import shutil import tarfile import tempfile from io import open import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.utils import checkpoint import deepspeed.comm as dist from torch.nn import Module import torch.nn.functional as F import torch.nn.init as init #from numba import cuda #from deepspeed_cuda import DeepSpeedSoftmaxConfig, DeepSpeedSoftmax from deepspeed.accelerator import get_accelerator logger = logging.getLogger(__name__) PRETRAINED_MODEL_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", } CONFIG_NAME = 'bert_config.json' WEIGHTS_NAME = 'pytorch_model.bin' TF_WEIGHTS_NAME = 'model.ckpt' def load_tf_weights_in_bert(model, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(tf_checkpoint_path) print("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m"] for n in name): print("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model """ @torch.jit.script def f_gelu(x): return x * 0.5 * (1.0 + torch.erf(x / 1.41421)) @torch.jit.script def bias_gelu(bias, y): x = bias + y return x * 0.5 * (1.0 + torch.erf(x / 1.41421)) @torch.jit.script def bias_tanh(bias, y): x = bias + y return torch.tanh(x) """ def f_gelu(x): x_type = x.dtype x = x.float() x = x * 0.5 * (1.0 + torch.erf(x / 1.41421)) return x.to(x_type) def bias_gelu(bias, y): y_type = y.dtype x = bias.float() + y.float() x = x * 0.5 * (1.0 + torch.erf(x / 1.41421)) return x.to(y_type) def bias_tanh(bias, y): y_type = y.dtype x = bias.float() + y.float() x = torch.tanh(x) return x.to(y_type) def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return f_gelu(x) def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} class GPUTimer: def __init__(self): super().__init__() self.start = get_accelerator().Event() # noqa: F821 self.stop = get_accelerator().Event() # noqa: F821 def record(self): self.start.record() def elapsed(self): self.stop.record() self.stop.synchronize() return self.start.elapsed_time(self.stop) / 1000.0 class LinearActivation(Module): r"""Fused Linear and activation Module. """ __constants__ = ['bias'] def __init__(self, in_features, out_features, weights, biases, act='gelu', bias=True): super(LinearActivation, self).__init__() self.in_features = in_features self.out_features = out_features self.fused_gelu = False self.fused_tanh = False if isinstance(act, str): if bias and act == 'gelu': self.fused_gelu = True elif bias and act == 'tanh': self.fused_tanh = True else: self.act_fn = ACT2FN[act] else: self.act_fn = act #self.weight = Parameter(torch.Tensor(out_features, in_features)) self.weight = weights[5] self.bias = biases[5] #if bias: # self.bias = Parameter(torch.Tensor(out_features)) #else: # self.register_parameter('bias', None) #self.reset_parameters() def reset_parameters(self): init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) def forward(self, input): if self.fused_gelu: #timing = [] #t1 = GPUTimer() #t1.record() y = F.linear(input, self.weight, None) #timing.append(t1.elapsed()) #t1.record() bg = bias_gelu(self.bias, y) #timing.append(t1.elapsed()) return bg elif self.fused_tanh: return bias_tanh(self.bias, F.linear(input, self.weight, None)) else: return self.act_fn(F.linear(input, self.weight, self.bias)) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, self.bias is not None) class BertConfig(object): """Configuration class to store the configuration of a `BertModel`. """ def __init__(self, vocab_size_or_config_json_file, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, batch_size=8, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, fp16=False): """Constructs BertConfig. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. hidden_dropout_prob: The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. """ if isinstance(vocab_size_or_config_json_file, str): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.batch_size = batch_size self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.fp16 = fp16 else: raise ValueError("First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)") @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" try: import apex #apex.amp.register_half_function(apex.normalization.fused_layer_norm, 'FusedLayerNorm') import apex.normalization #apex.amp.register_float_function(apex.normalization.FusedLayerNorm, 'forward') BertLayerNorm = apex.normalization.FusedLayerNorm except ImportError: print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.") class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super(BertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids=None): seq_length = input_ids.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) words_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = words_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, i, config, weights, biases): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError("The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.query.weight = weights[0] self.query.bias = biases[0] self.key = nn.Linear(config.hidden_size, self.all_head_size) self.key.weight = weights[1] self.key.bias = biases[1] self.value = nn.Linear(config.hidden_size, self.all_head_size) self.value.weight = weights[2] self.value.bias = biases[2] self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.softmax = nn.Softmax(dim=-1) #self.softmax_config = DeepSpeedSoftmaxConfig() #self.softmax_config.batch_size = config.batch_size #self.softmax_config.max_seq_length = config.max_position_embeddings #self.softmax_config.hidden_size = config.hidden_size #self.softmax_config.heads = config.num_attention_heads #self.softmax_config.softmax_id = i #self.softmax_config.fp16 = config.fp16 #self.softmax_config.prob_drop_out = 0.0 #self.softmax = DeepSpeedSoftmax(i, self.softmax_config) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def transpose_key_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 3, 1) def forward(self, hidden_states, attention_mask, grads=None): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_key_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer) attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = self.softmax(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer1 = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer1.size()[:-2] + (self.all_head_size, ) context_layer1 = context_layer1.view(*new_context_layer_shape) return context_layer1 class BertSelfOutput(nn.Module): def __init__(self, config, weights, biases): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dense.weight = weights[3] self.dense.bias = biases[3] self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_w(self): return self.dense.weight class BertAttention(nn.Module): def __init__(self, i, config, weights, biases): super(BertAttention, self).__init__() self.self = BertSelfAttention(i, config, weights, biases) self.output = BertSelfOutput(config, weights, biases) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output def get_w(self): return self.output.get_w() class BertIntermediate(nn.Module): def __init__(self, config, weights, biases): super(BertIntermediate, self).__init__() self.dense_act = LinearActivation(config.hidden_size, config.intermediate_size, weights, biases, act=config.hidden_act) def forward(self, hidden_states): hidden_states = self.dense_act(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config, weights, biases): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dense.weight = weights[6] self.dense.bias = biases[6] self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, i, config, weights, biases): super(BertLayer, self).__init__() self.attention = BertAttention(i, config, weights, biases) self.intermediate = BertIntermediate(config, weights, biases) self.output = BertOutput(config, weights, biases) self.weight = weights self.biases = biases def forward(self, hidden_states, attention_mask, grads, collect_all_grads=False): attention_output = self.attention(hidden_states, attention_mask) intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) if collect_all_grads: # self.weight[0].register_hook(lambda x, self=self: grads.append([x,"Q_W"])) # self.biases[0].register_hook(lambda x, self=self: grads.append([x,"Q_B"])) # self.weight[1].register_hook(lambda x, self=self: grads.append([x,"K_W"])) # self.biases[1].register_hook(lambda x, self=self: grads.append([x,"K_B"])) self.weight[2].register_hook(lambda x, self=self: grads.append([x, "V_W"])) self.biases[2].register_hook(lambda x, self=self: grads.append([x, "V_B"])) self.weight[3].register_hook(lambda x, self=self: grads.append([x, "O_W"])) self.biases[3].register_hook(lambda x, self=self: grads.append([x, "O_B"])) self.attention.output.LayerNorm.weight.register_hook(lambda x, self=self: grads.append([x, "N2_W"])) self.attention.output.LayerNorm.bias.register_hook(lambda x, self=self: grads.append([x, "N2_B"])) self.weight[5].register_hook(lambda x, self=self: grads.append([x, "int_W"])) self.biases[5].register_hook(lambda x, self=self: grads.append([x, "int_B"])) self.weight[6].register_hook(lambda x, self=self: grads.append([x, "out_W"])) self.biases[6].register_hook(lambda x, self=self: grads.append([x, "out_B"])) self.output.LayerNorm.weight.register_hook(lambda x, self=self: grads.append([x, "norm_W"])) self.output.LayerNorm.bias.register_hook(lambda x, self=self: grads.append([x, "norm_B"])) return layer_output def get_w(self): return self.attention.get_w() class BertEncoder(nn.Module): def __init__(self, config, weights, biases): super(BertEncoder, self).__init__() #layer = BertLayer(config, weights, biases) self.FinalLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.layer = nn.ModuleList( [copy.deepcopy(BertLayer(i, config, weights, biases)) for i in range(config.num_hidden_layers)]) self.grads = [] self.graph = [] def get_grads(self): return self.grads # def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True): # all_encoder_layers = [] # for layer_module in self.layer: # hidden_states = layer_module(hidden_states, attention_mask) # if output_all_encoded_layers: # all_encoder_layers.append(hidden_states) # if not output_all_encoded_layers: # all_encoder_layers.append(hidden_states) # return all_encoder_layers def get_modules(self, big_node, input): for mdl in big_node.named_children(): self.graph.append(mdl) self.get_modules(self, mdl, input) def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False): all_encoder_layers = [] def custom(start, end): def custom_forward(*inputs): layers = self.layer[start:end] x_ = inputs[0] for layer in layers: x_ = layer(x_, inputs[1]) return x_ return custom_forward if checkpoint_activations: l = 0 num_layers = len(self.layer) chunk_length = math.ceil(math.sqrt(num_layers)) while l < num_layers: hidden_states = checkpoint.checkpoint(custom(l, l + chunk_length), hidden_states, attention_mask * 1) l += chunk_length # decoder layers else: for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states, attention_mask, self.grads, collect_all_grads=True) hidden_states.register_hook(lambda x, i=i, self=self: self.grads.append([x, "hidden_state"])) #print("pytorch weight is: ", layer_module.get_w()) if output_all_encoded_layers: all_encoder_layers.append((hidden_states)) if not output_all_encoded_layers or checkpoint_activations: all_encoder_layers.append((hidden_states)) return all_encoder_layers #class BertEncoder(nn.Module): # def __init__(self, config): # super(BertEncoder, self).__init__() # layer = BertLayer(config) # self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]) # # def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True): # all_encoder_layers = [] # for layer_module in self.layer: # hidden_states = layer_module(hidden_states, attention_mask) # if output_all_encoded_layers: # all_encoder_layers.append(hidden_states) # if not output_all_encoded_layers: # all_encoder_layers.append(hidden_states) # return all_encoder_layers class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act="tanh") def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense_act(first_token_tensor) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act=config.hidden_act) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) def forward(self, hidden_states): hidden_states = self.dense_act(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(bert_model_embedding_weights.size(1), bert_model_embedding_weights.size(0), bias=False) self.decoder.weight = bert_model_embedding_weights self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) get_accelerator().range_push("decoder input.size() = {}, weight.size() = {}".format( hidden_states.size(), self.decoder.weight.size())) hidden_states = self.decoder(hidden_states) + self.bias get_accelerator().range_pop() return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super(BertOnlyNSPHead, self).__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertPreTrainingHeads, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertPreTrainedModel(nn.Module): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ def __init__(self, config, *inputs, **kwargs): super(BertPreTrainedModel, self).__init__() if not isinstance(config, BertConfig): raise ValueError("Parameter config in `{}(config)` should be an instance of class `BertConfig`. " "To create a model from a Google pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__)) self.config = config def init_bert_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @classmethod def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs): """ Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-large-cased` . `bert-base-multilingual-uncased` . `bert-base-multilingual-cased` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = pretrained_model_name_or_path if resolved_archive_file == archive_file: # noqa: F821 logger.info("loading archive file {}".format(archive_file)) else: logger.info("loading archive file {} from cache at {}".format(archive_file, resolved_archive_file)) # noqa: F821 tempdir = None if os.path.isdir(resolved_archive_file) or from_tf: # noqa: F821 serialization_dir = resolved_archive_file # noqa: F821 else: # Extract archive to temp dir tempdir = tempfile.mkdtemp() logger.info("extracting archive file {} to temp dir {}".format( resolved_archive_file, # noqa: F821 tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: # noqa: F821 archive.extractall(tempdir) serialization_dir = tempdir # Load config config_file = os.path.join(serialization_dir, CONFIG_NAME) config = BertConfig.from_json_file(config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) state_dict = torch.load(weights_path, map_location='cpu' if not get_accelerator().is_available() else None) if tempdir: # Clean up temp dir shutil.rmtree(tempdir) if from_tf: # Directly load from a TensorFlow checkpoint weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME) return load_tf_weights_in_bert(model, weights_path) # Load from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') start_prefix = '' if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()): start_prefix = 'bert.' load(model, prefix=start_prefix) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) return model class BertModel(BertPreTrainedModel): """BERT model ("Bidirectional Embedding Representations from a Transformer"). Params: config: a BertConfig class instance with the configuration to build a new model Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`. Outputs: Tuple of (encoded_layers, pooled_output) `encoded_layers`: controlled by `output_all_encoded_layers` argument: - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding to the last attention block of shape [batch_size, sequence_length, hidden_size], `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a classifier pretrained on top of the hidden state associated to the first character of the input (`CLS`) to train on the Next-Sentence task (see BERT's paper). Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = modeling.BertModel(config=config) all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertModel, self).__init__(config) self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True, checkpoint_activations=False): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 embedding_output = self.embeddings(input_ids, token_type_ids) encoded_layers = self.encoder(embedding_output, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, checkpoint_activations=checkpoint_activations) sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) if not output_all_encoded_layers: encoded_layers = encoded_layers[-1] return encoded_layers, pooled_output class BertForPreTraining(BertPreTrainedModel): """BERT model with pre-training heads. This module comprises the BERT model followed by the two pre-training heads: - the masked language modeling head, and - the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `masked_lm_labels` and `next_sentence_label` are not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `masked_lm_labels` or `next_sentence_label` is `None`: Outputs a tuple comprising - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and - the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForPreTraining(config) masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, args): super(BertForPreTraining, self).__init__(config) self.summary_writer = None if dist.get_rank() == 0: self.summary_writer = args.summary_writer self.samples_per_step = dist.get_world_size() * args.train_batch_size self.sample_count = self.samples_per_step self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def log_summary_writer(self, logs: dict, base='Train'): if dist.get_rank() == 0: module_name = "Samples" #self._batch_module_name.get(batch_type, self._get_batch_type_error(batch_type)) for key, log in logs.items(): self.summary_writer.add_scalar(f'{base}/{module_name}/{key}', log, self.sample_count) self.sample_count += self.samples_per_step def forward(self, batch, log=True): #input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, checkpoint_activations=False): input_ids = batch[1] token_type_ids = batch[3] attention_mask = batch[2] masked_lm_labels = batch[5] next_sentence_label = batch[4] checkpoint_activations = False sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, checkpoint_activations=checkpoint_activations) prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) if masked_lm_labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) #print("loss is {} {}".format(masked_lm_loss, next_sentence_loss)) total_loss = masked_lm_loss + next_sentence_loss # if log: # self.log_summary_writer(logs={'train_loss': total_loss.item()}) return total_loss else: return prediction_scores, seq_relationship_score class BertForMaskedLM(BertPreTrainedModel): """BERT model with the masked language modeling head. This module comprises the BERT model followed by the masked language modeling head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] Outputs: if `masked_lm_labels` is not `None`: Outputs the masked language modeling loss. if `masked_lm_labels` is `None`: Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForMaskedLM(config) masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForMaskedLM, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, checkpoint_activations=False): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) prediction_scores = self.cls(sequence_output) if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) return masked_lm_loss else: return prediction_scores class BertForNextSentencePrediction(BertPreTrainedModel): """BERT model with next sentence prediction head. This module comprises the BERT model followed by the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `next_sentence_label` is not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `next_sentence_label` is `None`: Outputs the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForNextSentencePrediction(config) seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForNextSentencePrediction, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, checkpoint_activations=False): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) seq_relationship_score = self.cls(pooled_output) if next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) return next_sentence_loss else: return seq_relationship_score class BertForSequenceClassification(BertPreTrainedModel): """BERT model for classification. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_labels = 2 model = BertForSequenceClassification(config, num_labels) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_labels): super(BertForSequenceClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return loss else: return logits class BertForMultipleChoice(BertPreTrainedModel): """BERT model for multiple choice tasks. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_choices`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_choices]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]]) input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]]) token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_choices = 2 model = BertForMultipleChoice(config, num_choices) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_choices): super(BertForMultipleChoice, self).__init__(config) self.num_choices = num_choices self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False): flat_input_ids = input_ids.view(-1, input_ids.size(-1)) flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, self.num_choices) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return loss else: return reshaped_logits class BertForTokenClassification(BertPreTrainedModel): """BERT model for token-level classification. This module is composed of the BERT model with a linear layer on top of the full hidden state of the last layer. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, sequence_length, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_labels = 2 model = BertForTokenClassification(config, num_labels) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_labels): super(BertForTokenClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return loss else: return logits class BertForQuestionAnswering(BertPreTrainedModel): """BERT model for Question Answering (span extraction). This module is composed of the BERT model with a linear layer on top of the sequence output that computes start_logits and end_logits Params: `config`: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size]. Positions are clamped to the length of the sequence and position outside of the sequence are not taken into account for computing the loss. `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size]. Positions are clamped to the length of the sequence and position outside of the sequence are not taken into account for computing the loss. Outputs: if `start_positions` and `end_positions` are not `None`: Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions. if `start_positions` or `end_positions` is `None`: Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end position tokens of shape [batch_size, sequence_length]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForQuestionAnswering(config) start_logits, end_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForQuestionAnswering, self).__init__(config) self.bert = BertModel(config) # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version # self.dropout = nn.Dropout(config.hidden_dropout_prob) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None, checkpoint_activations=False): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return total_loss else: return start_logits, end_logits
70,129
44.986885
141
py
DeepSpeed
DeepSpeed-master/tests/unit/alexnet_model.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest import os import torch import torch.nn as nn import torch.nn.functional as F import deepspeed import deepspeed.comm as dist import deepspeed.runtime.utils as ds_utils from deepspeed.accelerator import get_accelerator from deepspeed.runtime.pipe.module import PipelineModule, LayerSpec class AlexNet(nn.Module): def __init__(self, num_classes=10): super(AlexNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=5), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), ) self.classifier = nn.Linear(256, num_classes) self.loss_fn = nn.CrossEntropyLoss() def forward(self, x, y): x = self.features(x) x = x.view(x.size(0), -1) x = self.classifier(x) return self.loss_fn(x, y) class AlexNetPipe(AlexNet): def to_layers(self): layers = [*self.features, lambda x: x.view(x.size(0), -1), self.classifier] return layers class AlexNetPipeSpec(PipelineModule): def __init__(self, num_classes=10, **kwargs): self.num_classes = num_classes specs = [ LayerSpec(nn.Conv2d, 3, 64, kernel_size=11, stride=4, padding=5), LayerSpec(nn.ReLU, inplace=True), LayerSpec(nn.MaxPool2d, kernel_size=2, stride=2), LayerSpec(nn.Conv2d, 64, 192, kernel_size=5, padding=2), F.relu, LayerSpec(nn.MaxPool2d, kernel_size=2, stride=2), LayerSpec(nn.Conv2d, 192, 384, kernel_size=3, padding=1), F.relu, LayerSpec(nn.Conv2d, 384, 256, kernel_size=3, padding=1), F.relu, LayerSpec(nn.Conv2d, 256, 256, kernel_size=3, padding=1), F.relu, LayerSpec(nn.MaxPool2d, kernel_size=2, stride=2), lambda x: x.view(x.size(0), -1), LayerSpec(nn.Linear, 256, self.num_classes), # classifier ] super().__init__(layers=specs, loss_fn=nn.CrossEntropyLoss(), **kwargs) # Define this here because we cannot pickle local lambda functions def cast_to_half(x): return x.half() def cifar_trainset(fp16=False): torchvision = pytest.importorskip("torchvision", minversion="0.5.0") import torchvision.transforms as transforms transform_list = [ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ] if fp16: transform_list.append(torchvision.transforms.Lambda(cast_to_half)) transform = transforms.Compose(transform_list) local_rank = get_accelerator().current_device() # Only one rank per machine downloads. dist.barrier() if local_rank != 0: dist.barrier() data_root = os.getenv("TEST_DATA_DIR", "/tmp/") trainset = torchvision.datasets.CIFAR10(root=os.path.join(data_root, "cifar10-data"), train=True, download=True, transform=transform) if local_rank == 0: dist.barrier() return trainset def train_cifar(model, config, num_steps=400, average_dp_losses=True, fp16=True, seed=123): with get_accelerator().random().fork_rng(devices=[get_accelerator().current_device_name()]): ds_utils.set_random_seed(seed) # disable dropout model.eval() trainset = cifar_trainset(fp16=fp16) config['local_rank'] = dist.get_rank() # deepspeed_io defaults to creating a dataloader that uses a # multiprocessing pool. Our tests use pools and we cannot nest pools in # python. Therefore we're injecting this kwarg to ensure that no pools # are used in the dataloader. old_method = deepspeed.runtime.engine.DeepSpeedEngine.deepspeed_io def new_method(*args, **kwargs): kwargs["num_local_io_workers"] = 0 return old_method(*args, **kwargs) deepspeed.runtime.engine.DeepSpeedEngine.deepspeed_io = new_method engine, _, _, _ = deepspeed.initialize(config=config, model=model, model_parameters=[p for p in model.parameters()], training_data=trainset) losses = [] for step in range(num_steps): loss = engine.train_batch() losses.append(loss.item()) if step % 50 == 0 and dist.get_rank() == 0: print(f'STEP={step} LOSS={loss.item()}') if average_dp_losses: loss_tensor = torch.tensor(losses).to(get_accelerator().device_name()) dist.all_reduce(loss_tensor) loss_tensor /= dist.get_world_size() losses = loss_tensor.tolist() return losses
5,441
34.337662
96
py
DeepSpeed
DeepSpeed-master/tests/unit/simple_model.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import json import argparse import torch from deepspeed.pipe import PipelineModule, LayerSpec from deepspeed.moe.layer import MoE from deepspeed.accelerator import get_accelerator import deepspeed.comm as dist class SimpleModel(torch.nn.Module): def __init__(self, hidden_dim, empty_grad=False, nlayers=1): super(SimpleModel, self).__init__() self.linears = torch.nn.ModuleList([torch.nn.Linear(hidden_dim, hidden_dim) for i in range(nlayers)]) if empty_grad: self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim) self.cross_entropy_loss = torch.nn.CrossEntropyLoss() self.empty_grad = empty_grad def forward(self, x, y): if len(self.linears) == 1: x = self.linears[0](x) else: for i, l in enumerate(self.linears): x = self.linears[i // 2](x) + l(x) return self.cross_entropy_loss(x, y) class SimpleFrozenModel(torch.nn.Module): def __init__(self, hidden_dim, empty_grad=False): super(SimpleFrozenModel, self).__init__() self.linears = torch.nn.ModuleList([torch.nn.Linear(hidden_dim, hidden_dim) for i in range(2)]) if empty_grad: self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim) self.cross_entropy_loss = torch.nn.CrossEntropyLoss() self.empty_grad = empty_grad # Freeze first layer self.linears[0].weight.requires_grad = False self.linears[0].bias.requires_grad = False def forward(self, x, y): if len(self.linears) == 1: x = self.linears[0](x) else: for i, l in enumerate(self.linears): x = self.linears[i // 2](x) + l(x) return self.cross_entropy_loss(x, y) class Curriculum_SimpleModel(SimpleModel): def __init__(self, hidden_dim, empty_grad=False): super(Curriculum_SimpleModel, self).__init__(hidden_dim, empty_grad) def forward(self, x, y, **kwargs): seqlen = kwargs.get('curriculum_seqlen', None) loss = super(Curriculum_SimpleModel, self).forward(x, y) return loss, seqlen class SimpleMoEModel(torch.nn.Module): def __init__(self, hidden_dim, num_experts=4, ep_size=1, use_residual=False): super(SimpleMoEModel, self).__init__() self.linear1 = torch.nn.Linear(hidden_dim, hidden_dim) expert = torch.nn.Sequential(torch.nn.Linear(hidden_dim, hidden_dim), torch.nn.Linear(hidden_dim, hidden_dim)) # using two MoE layers to check implications of sharing a single storage self.moe_1 = MoE(hidden_size=hidden_dim, expert=expert, ep_size=ep_size, use_residual=use_residual, num_experts=num_experts, k=1) # interleaving MoE modules with dense to create an opportunity # for gradients to be merged in ZeRO stage 2 average_tensor reduce bucket self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim) self.moe_2 = MoE(hidden_size=hidden_dim, expert=expert, ep_size=ep_size, use_residual=use_residual, num_experts=num_experts, k=1) self.linear3 = torch.nn.Linear(hidden_dim, hidden_dim) self.cross_entropy_loss = torch.nn.CrossEntropyLoss() def forward(self, x, y): hidden_dim = self.linear1(x) output, _, _ = self.moe_1(hidden_dim) output = self.linear2(output) output, _, _ = self.moe_2(output) output = self.linear3(output) hidden_dim = hidden_dim + output sentence_embed = hidden_dim.mean(1) return self.cross_entropy_loss(sentence_embed, y) class SimplePRMoEModel(torch.nn.Module): def __init__(self, hidden_dim, num_experts=2, ep_size=1, use_residual=False): super(SimplePRMoEModel, self).__init__() self.linear = torch.nn.Linear(hidden_dim, hidden_dim) linear2 = torch.nn.Linear(hidden_dim, hidden_dim) self.linear2 = MoE(hidden_size=hidden_dim, expert=linear2, ep_size=ep_size, use_residual=use_residual, num_experts=num_experts, k=1) linear3 = torch.nn.Linear(hidden_dim, hidden_dim) self.linear3 = MoE(hidden_size=hidden_dim, expert=linear3, ep_size=ep_size, use_residual=use_residual, num_experts=int(2 * num_experts), k=1) self.cross_entropy_loss = torch.nn.CrossEntropyLoss() def forward(self, x, y): hidden_dim = x hidden_dim = self.linear(hidden_dim) output, _, _ = self.linear2(hidden_dim) output, _, _ = self.linear3(output) hidden_dim = hidden_dim + output sentence_embed = hidden_dim.mean(1) return self.cross_entropy_loss(sentence_embed, y) class UnusedParametersModel(SimpleModel): def __init__(self, hidden_dim, empty_grad=False): super().__init__(hidden_dim, empty_grad) self.unused_linear = torch.nn.Linear(hidden_dim, hidden_dim) class LinearStack(torch.nn.Module): def __init__(self, input_dim=128, hidden_dim=128, output_dim=128, num_layers=4): super().__init__() self.input_dim = input_dim self.output_dim = output_dim self.hidden_dim = hidden_dim self.input_layer = torch.nn.Linear(in_features=self.input_dim, out_features=self.hidden_dim) self.layers = torch.nn.ModuleList([ torch.nn.Linear(in_features=self.hidden_dim, out_features=self.hidden_dim, bias=False) for x in range(num_layers) ]) self.output_layer = torch.nn.Linear(in_features=self.hidden_dim, out_features=self.output_dim) self.cross_entropy_loss = torch.nn.CrossEntropyLoss() def forward(self, x, y): x = self.input_layer(x) for layer in self.layers: x = layer(x) x = self.output_layer(x) return x class LinearStackPipe(PipelineModule): def __init__(self, input_dim=128, hidden_dim=128, output_dim=128, num_layers=4, **kwargs): self.input_dim = input_dim self.output_dim = output_dim self.hidden_dim = hidden_dim self.num_layers = num_layers layers = [] layers.append(LayerSpec(torch.nn.Linear, self.input_dim, self.hidden_dim)) for x in range(self.num_layers): layers.append(LayerSpec(torch.nn.Linear, self.hidden_dim, self.hidden_dim, bias=False)) layers.append(lambda x: x) layers.append(LayerSpec(torch.nn.Linear, self.hidden_dim, self.output_dim)) super().__init__(layers=layers, loss_fn=torch.nn.CrossEntropyLoss(), **kwargs) class SimpleOptimizer(torch.optim.Optimizer): def __init__(self, params, lr=0.11072018): defaults = dict(lr=lr) super(SimpleOptimizer, self).__init__(params, defaults) def __setstate__(self, state): super(SimpleOptimizer, self).__setstate__(state) def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue d_p = p.grad.data p.data.add_(-group['lr'], d_p) return loss class HybridStateOptimizer(torch.optim.Optimizer): def __init__(self, params, lr=0.11072018): defaults = dict(lr=lr) super(HybridStateOptimizer, self).__init__(params, defaults) def __setstate__(self, state): super(HybridStateOptimizer, self).__setstate__(state) def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue state = self.state[p] if len(state) == 0: state['integer_step'] = 0 state['tensor_step'] = torch.zeros(1, device=p.device) d_p = p.grad.data p.data.add_(-group['lr'], d_p) state['integer_step'] += 1 state['tensor_step'] += 1 return loss class PLD_SimpleModel(SimpleModel): def __init__(self, hidden_dim, empty_grad=False): super(PLD_SimpleModel, self).__init__(hidden_dim, empty_grad) def forward(self, x, y, **kwargs): pld = kwargs.get('progressive_layer_drop', False) theta = kwargs.get('pld_theta', 1.0) hidden_dim = super(PLD_SimpleModel, self).forward(x, y) return hidden_dim def random_dataset(total_samples, hidden_dim, device, dtype=torch.half): train_data = torch.randn(total_samples, hidden_dim, device=device, dtype=dtype) train_label = torch.empty(total_samples, dtype=torch.long, device=device).random_(hidden_dim) train_dataset = torch.utils.data.TensorDataset(train_data, train_label) return train_dataset def random_dataloader(model, total_samples, hidden_dim, device, dtype=torch.half): batch_size = model.train_micro_batch_size_per_gpu() train_dataset = random_dataset(total_samples, hidden_dim, device, dtype=dtype) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size) return train_loader def sequence_dataloader(model, total_samples, hidden_dim, device, seq_len: int = 32, dtype=torch.half): batch_size = model.train_micro_batch_size_per_gpu() train_data = torch.randn(total_samples, seq_len, hidden_dim, device=device, dtype=dtype) train_label = torch.empty(total_samples, dtype=torch.long, device=device).random_(hidden_dim) train_dataset = torch.utils.data.TensorDataset(train_data, train_label) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size) return train_loader def create_config_from_dict(tmpdir, config_dict): config_path = os.path.join(tmpdir, 'temp_config.json') with open(config_path, 'w') as fd: json.dump(config_dict, fd) return config_path def create_deepspeed_args(): parser = argparse.ArgumentParser() args = parser.parse_args(args='') args.deepspeed = True if dist.is_initialized(): # We assume up to one full node executing unit tests assert dist.get_world_size() <= get_accelerator().device_count() args.local_rank = dist.get_rank() return args def args_from_dict(tmpdir, config_dict): args = create_deepspeed_args() config_path = create_config_from_dict(tmpdir, config_dict) args.deepspeed_config = config_path return args
11,029
35.523179
118
py
DeepSpeed
DeepSpeed-master/tests/unit/common.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import re import time import inspect import socket import subprocess from abc import ABC, abstractmethod from pathlib import Path import torch import torch.multiprocessing as mp import deepspeed from deepspeed.accelerator import get_accelerator import deepspeed.comm as dist import pytest from _pytest.outcomes import Skipped from _pytest.fixtures import FixtureLookupError, FixtureFunctionMarker # Worker timeout *after* the first worker has completed. DEEPSPEED_UNIT_WORKER_TIMEOUT = 120 # Worker timeout for tests that hang DEEPSPEED_TEST_TIMEOUT = 600 def is_rocm_pytorch(): return hasattr(torch.version, 'hip') and torch.version.hip is not None def get_xdist_worker_id(): xdist_worker = os.environ.get('PYTEST_XDIST_WORKER', None) if xdist_worker is not None: xdist_worker_id = xdist_worker.replace('gw', '') return int(xdist_worker_id) return None def get_master_port(): # Select a random open port with socket.socket() as s: s.bind(('', 0)) return str(s.getsockname()[1]) def set_accelerator_visible(): cuda_visible = os.environ.get("CUDA_VISIBLE_DEVICES", None) xdist_worker_id = get_xdist_worker_id() if xdist_worker_id is None: xdist_worker_id = 0 if cuda_visible is None: # CUDA_VISIBLE_DEVICES is not set, discover it using accelerator specific command instead if get_accelerator().device_name() == 'cuda': if is_rocm_pytorch(): rocm_smi = subprocess.check_output(['rocm-smi', '--showid']) gpu_ids = filter(lambda s: 'GPU' in s, rocm_smi.decode('utf-8').strip().split('\n')) num_accelerators = len(list(gpu_ids)) else: nvidia_smi = subprocess.check_output(['nvidia-smi', '--list-gpus']) num_accelerators = len(nvidia_smi.decode('utf-8').strip().split('\n')) elif get_accelerator().device_name() == 'xpu': clinfo = subprocess.check_output(['clinfo']) lines = clinfo.decode('utf-8').strip().split('\n') num_accelerators = 0 for line in lines: match = re.search('Device Type.*GPU', line) if match: num_accelerators += 1 else: assert get_accelerator().device_name() == 'cpu' cpu_sockets = int( subprocess.check_output('cat /proc/cpuinfo | grep "physical id" | sort -u | wc -l', shell=True)) num_accelerators = cpu_sockets cuda_visible = ",".join(map(str, range(num_accelerators))) # rotate list based on xdist worker id, example below # wid=0 -> ['0', '1', '2', '3'] # wid=1 -> ['1', '2', '3', '0'] # wid=2 -> ['2', '3', '0', '1'] # wid=3 -> ['3', '0', '1', '2'] dev_id_list = cuda_visible.split(",") dev_id_list = dev_id_list[xdist_worker_id:] + dev_id_list[:xdist_worker_id] os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(dev_id_list) class DistributedExec(ABC): """ Base class for distributed execution of functions/methods. Contains common methods needed for DistributedTest and DistributedFixture. """ world_size = 2 backend = get_accelerator().communication_backend_name() init_distributed = True set_dist_env = True requires_cuda_env = True reuse_dist_env = False _pool_cache = {} @abstractmethod def run(self): ... def __call__(self, request=None): self._fixture_kwargs = self._get_fixture_kwargs(request, self.run) world_size = self.world_size if self.requires_cuda_env and not get_accelerator().is_available(): pytest.skip("only supported in accelerator environments.") if isinstance(world_size, int): world_size = [world_size] for procs in world_size: self._launch_procs(procs) def _get_fixture_kwargs(self, request, func): if not request: return {} # Grab fixture / parametrize kwargs from pytest request object fixture_kwargs = {} params = inspect.getfullargspec(func).args params.remove("self") for p in params: try: fixture_kwargs[p] = request.getfixturevalue(p) except FixtureLookupError: pass # test methods can have kwargs that are not fixtures return fixture_kwargs def _launch_procs(self, num_procs): # Verify we have enough accelerator devices to run this test if get_accelerator().is_available() and get_accelerator().device_count() < num_procs: pytest.skip( f"Skipping test because not enough GPUs are available: {num_procs} required, {get_accelerator().device_count()} available" ) # Set start method to `forkserver` (or `fork`) mp.set_start_method('forkserver', force=True) # Create process pool or use cached one master_port = None if self.reuse_dist_env: if num_procs not in self._pool_cache: self._pool_cache[num_procs] = mp.Pool(processes=num_procs) master_port = get_master_port() pool = self._pool_cache[num_procs] else: pool = mp.Pool(processes=num_procs) master_port = get_master_port() # Run the test args = [(local_rank, num_procs, master_port) for local_rank in range(num_procs)] skip_msgs_async = pool.starmap_async(self._dist_run, args) try: skip_msgs = skip_msgs_async.get(DEEPSPEED_TEST_TIMEOUT) except mp.TimeoutError: # Shortcut to exit pytest in the case of a hanged test. This # usually means an environment error and the rest of tests will # hang (causing super long unit test runtimes) pytest.exit("Test hanged, exiting", returncode=0) # Tear down distributed environment and close process pools self._close_pool(pool, num_procs) # If we skipped a test, propagate that to this process if any(skip_msgs): assert len(set(skip_msgs)) == 1, "Multiple different skip messages received" pytest.skip(skip_msgs[0]) def _dist_run(self, local_rank, num_procs, master_port): skip_msg = '' if not dist.is_initialized(): """ Initialize deepspeed.comm and execute the user function. """ if self.set_dist_env: os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = str(master_port) os.environ['LOCAL_RANK'] = str(local_rank) # NOTE: unit tests don't support multi-node so local_rank == global rank os.environ['RANK'] = str(local_rank) os.environ['WORLD_SIZE'] = str(num_procs) # turn off NCCL logging if set os.environ.pop('NCCL_DEBUG', None) if get_accelerator().is_available(): set_accelerator_visible() if self.init_distributed: deepspeed.init_distributed(dist_backend=self.backend) dist.barrier() if get_accelerator().is_available(): get_accelerator().set_device(local_rank) try: self.run(**self._fixture_kwargs) except BaseException as e: if isinstance(e, Skipped): skip_msg = e.msg else: raise e return skip_msg def _dist_destroy(self): if (dist is not None) and dist.is_initialized(): dist.barrier() dist.destroy_process_group() def _close_pool(self, pool, num_procs, force=False): if force or not self.reuse_dist_env: msg = pool.starmap(self._dist_destroy, [() for _ in range(num_procs)]) pool.close() pool.join() class DistributedFixture(DistributedExec): """ Implementation that extends @pytest.fixture to allow for distributed execution. This is primarily meant to be used when a test requires executing two pieces of code with different world sizes. There are 2 parameters that can be modified: - world_size: int = 2 -- the number of processes to launch - backend: Literal['nccl','mpi','gloo'] = 'nccl' -- which backend to use Features: - able to call pytest.skip() inside fixture - can be reused by multiple tests - can accept other fixtures as input Limitations: - cannot use @pytest.mark.parametrize - world_size cannot be modified after definition and only one world_size value is accepted - any fixtures used must also be used in the test that uses this fixture (see example below) - return values cannot be returned. Passing values to a DistributedTest object can be achieved using class_tmpdir and writing to file (see example below) Usage: - must implement a run(self, ...) method - fixture can be used by making the class name input to a test function Example: @pytest.fixture(params=[10,20]) def regular_pytest_fixture(request): return request.param class distributed_fixture_example(DistributedFixture): world_size = 4 def run(self, regular_pytest_fixture, class_tmpdir): assert int(os.environ["WORLD_SIZE"]) == self.world_size local_rank = os.environ["LOCAL_RANK"] print(f"Rank {local_rank} with value {regular_pytest_fixture}") with open(os.path.join(class_tmpdir, f"{local_rank}.txt"), "w") as f: f.write(f"{local_rank},{regular_pytest_fixture}") class TestExample(DistributedTest): world_size = 1 def test(self, distributed_fixture_example, regular_pytest_fixture, class_tmpdir): assert int(os.environ["WORLD_SIZE"]) == self.world_size for rank in range(4): with open(os.path.join(class_tmpdir, f"{rank}.txt"), "r") as f: assert f.read() == f"{rank},{regular_pytest_fixture}" """ is_dist_fixture = True # These values are just placeholders so that pytest recognizes this as a fixture _pytestfixturefunction = FixtureFunctionMarker(scope="function", params=None) __name__ = "" def __init__(self): assert isinstance(self.world_size, int), "Only one world size is allowed for distributed fixtures" self.__name__ = type(self).__name__ _pytestfixturefunction = FixtureFunctionMarker(scope="function", params=None, name=self.__name__) class DistributedTest(DistributedExec): """ Implementation for running pytest with distributed execution. There are 2 parameters that can be modified: - world_size: Union[int,List[int]] = 2 -- the number of processes to launch - backend: Literal['nccl','mpi','gloo'] = 'nccl' -- which backend to use Features: - able to call pytest.skip() inside tests - works with pytest fixtures, parametrize, mark, etc. - can contain multiple tests (each of which can be parametrized separately) - class methods can be fixtures (usable by tests in this class only) - world_size can be changed for individual tests using @pytest.mark.world_size(world_size) - class_tmpdir is a fixture that can be used to get a tmpdir shared among all tests (including DistributedFixture) Usage: - class name must start with "Test" - must implement one or more test*(self, ...) methods Example: @pytest.fixture(params=[10,20]) def val1(request): return request.param @pytest.mark.fast @pytest.mark.parametrize("val2", [30,40]) class TestExample(DistributedTest): world_size = 2 @pytest.fixture(params=[50,60]) def val3(self, request): return request.param def test_1(self, val1, val2, str1="hello world"): assert int(os.environ["WORLD_SIZE"]) == self.world_size assert all(val1, val2, str1) @pytest.mark.world_size(1) @pytest.mark.parametrize("val4", [70,80]) def test_2(self, val1, val2, val3, val4): assert int(os.environ["WORLD_SIZE"]) == 1 assert all(val1, val2, val3, val4) """ is_dist_test = True # Temporary directory that is shared among test methods in a class @pytest.fixture(autouse=True, scope="class") def class_tmpdir(self, tmpdir_factory): fn = tmpdir_factory.mktemp(self.__class__.__name__) return fn def run(self, **fixture_kwargs): self._current_test(**fixture_kwargs) def __call__(self, request): self._current_test = self._get_current_test_func(request) self._fixture_kwargs = self._get_fixture_kwargs(request, self._current_test) if self.requires_cuda_env and not get_accelerator().is_available(): pytest.skip("only supported in accelerator environments.") # Catch world_size override pytest mark for mark in getattr(request.function, "pytestmark", []): if mark.name == "world_size": world_size = mark.args[0] break else: world_size = self.world_size if isinstance(world_size, int): world_size = [world_size] for procs in world_size: self._launch_procs(procs) time.sleep(0.5) def _get_current_test_func(self, request): # DistributedTest subclasses may have multiple test methods func_name = request.function.__name__ return getattr(self, func_name) def get_test_path(filename): curr_path = Path(__file__).parent return str(curr_path.joinpath(filename))
14,004
36.851351
138
py
DeepSpeed
DeepSpeed-master/tests/unit/util.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest import torch import deepspeed from deepspeed.git_version_info import torch_info def skip_on_arch(min_arch=7): if deepspeed.accelerator.get_accelerator().device_name() == 'cuda': if torch.cuda.get_device_capability()[0] < min_arch: #ignore-cuda pytest.skip(f"needs higher compute capability than {min_arch}") else: assert deepspeed.accelerator.get_accelerator().device_name() == 'xpu' return def skip_on_cuda(valid_cuda): split_version = lambda x: map(int, x.split('.')[:2]) if deepspeed.accelerator.get_accelerator().device_name() == 'cuda': CUDA_MAJOR, CUDA_MINOR = split_version(torch_info['cuda_version']) CUDA_VERSION = (CUDA_MAJOR * 10) + CUDA_MINOR if valid_cuda.count(CUDA_VERSION) == 0: pytest.skip(f"requires cuda versions {valid_cuda}") else: assert deepspeed.accelerator.get_accelerator().device_name() == 'xpu' return def required_torch_version(): TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) if TORCH_MAJOR >= 1 and TORCH_MINOR >= 8: return True else: return False def bf16_required_version_check(accelerator_check=True): split_version = lambda x: map(int, x.split('.')[:2]) TORCH_MAJOR, TORCH_MINOR = split_version(torch_info['version']) NCCL_MAJOR, NCCL_MINOR = split_version(torch_info['nccl_version']) CUDA_MAJOR, CUDA_MINOR = split_version(torch_info['cuda_version']) # Sometimes bf16 tests are runnable even if not natively supported by accelerator if accelerator_check: accelerator_pass = torch_info['bf16_support'] else: accelerator_pass = True if (TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10)) and (CUDA_MAJOR >= 11) and ( NCCL_MAJOR > 2 or (NCCL_MAJOR == 2 and NCCL_MINOR >= 10)) and accelerator_pass: return True else: return False def required_minimum_torch_version(major_version, minor_version): TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) if TORCH_MAJOR < major_version: return False return TORCH_MAJOR > major_version or TORCH_MINOR >= minor_version def required_maximum_torch_version(major_version, minor_version): TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) if TORCH_MAJOR > major_version: return False return TORCH_MAJOR < major_version or TORCH_MINOR <= minor_version def required_amp_check(): from importlib.util import find_spec if find_spec('apex') is None: return False else: return True
2,840
31.284091
97
py
DeepSpeed
DeepSpeed-master/tests/unit/modelingpreln.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from __future__ import absolute_import, division, print_function, unicode_literals # Copyright The Microsoft DeepSpeed Team # DeepSpeed note, code taken from commit 3d59216cec89a363649b4fe3d15295ba936ced0f # https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/modeling.py # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model.""" import copy import json import logging import math import os import shutil import tarfile import tempfile from io import open import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.utils import checkpoint import deepspeed.comm as dist from torch.nn import Module import torch.nn.functional as F import torch.nn.init as init from deepspeed.accelerator import get_accelerator #from numba import cuda #from deepspeed_cuda import DeepSpeedSoftmaxConfig, DeepSpeedSoftmax logger = logging.getLogger(__name__) PRETRAINED_MODEL_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", } CONFIG_NAME = 'bert_config.json' WEIGHTS_NAME = 'pytorch_model.bin' TF_WEIGHTS_NAME = 'model.ckpt' def load_tf_weights_in_bert(model, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(tf_checkpoint_path) print("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m"] for n in name): print("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model """ @torch.jit.script def f_gelu(x): return x * 0.5 * (1.0 + torch.erf(x / 1.41421)) @torch.jit.script def bias_gelu(bias, y): x = bias + y return x * 0.5 * (1.0 + torch.erf(x / 1.41421)) @torch.jit.script def bias_tanh(bias, y): x = bias + y return torch.tanh(x) """ def f_gelu(x): x_type = x.dtype x = x.float() x = x * 0.5 * (1.0 + torch.erf(x / 1.41421)) return x.to(x_type) def bias_gelu(bias, y): y_type = y.dtype x = bias.float() + y.float() x = x * 0.5 * (1.0 + torch.erf(x / 1.41421)) return x.to(y_type) def bias_tanh(bias, y): y_type = y.dtype x = bias.float() + y.float() x = torch.tanh(x) return x.to(y_type) def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return f_gelu(x) def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} class GPUTimer: def __init__(self): super().__init__() self.start = get_accelerator().Event() # noqa: F821 self.stop = get_accelerator().Event() # noqa: F821 def record(self): self.start.record() def elapsed(self): self.stop.record() self.stop.synchronize() return self.start.elapsed_time(self.stop) / 1000.0 class LinearActivation(Module): r"""Fused Linear and activation Module. """ __constants__ = ['bias'] def __init__(self, in_features, out_features, weights, biases, act='gelu', bias=True): super(LinearActivation, self).__init__() self.in_features = in_features self.out_features = out_features self.fused_gelu = False self.fused_tanh = False if isinstance(act, str): if bias and act == 'gelu': self.fused_gelu = True elif bias and act == 'tanh': self.fused_tanh = True else: self.act_fn = ACT2FN[act] else: self.act_fn = act #self.weight = Parameter(torch.Tensor(out_features, in_features)) self.weight = weights[5] self.bias = biases[5] #if bias: # self.bias = Parameter(torch.Tensor(out_features)) #else: # self.register_parameter('bias', None) #self.reset_parameters() def reset_parameters(self): init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) def forward(self, input): if self.fused_gelu: #timing = [] #t1 = GPUTimer() #t1.record() y = F.linear(input, self.weight, None) #timing.append(t1.elapsed()) #t1.record() bg = bias_gelu(self.bias, y) #timing.append(t1.elapsed()) return bg elif self.fused_tanh: return bias_tanh(self.bias, F.linear(input, self.weight, None)) else: return self.act_fn(F.linear(input, self.weight, self.bias)) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, self.bias is not None) class BertConfig(object): """Configuration class to store the configuration of a `BertModel`. """ def __init__(self, vocab_size_or_config_json_file, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, batch_size=8, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, fp16=False): """Constructs BertConfig. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. hidden_dropout_prob: The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. """ if isinstance(vocab_size_or_config_json_file, str): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.batch_size = batch_size self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.fp16 = fp16 else: raise ValueError("First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)") @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" try: import apex #apex.amp.register_half_function(apex.normalization.fused_layer_norm, 'FusedLayerNorm') import apex.normalization #apex.amp.register_float_function(apex.normalization.FusedLayerNorm, 'forward') BertLayerNorm = apex.normalization.FusedLayerNorm except ImportError: print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.") class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): pdtype = x.dtype x = x.float() u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x.to(pdtype) + self.bias #def forward(self, x): # u = x.mean(-1, keepdim=True) # s = (x - u).pow(2).mean(-1, keepdim=True) # x = (x - u) / torch.sqrt(s + self.variance_epsilon) # return self.weight * x + self.bias class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super(BertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids=None): seq_length = input_ids.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) words_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = words_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, i, config, weights, biases): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError("The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.query.weight = weights[0] self.query.bias = biases[0] self.key = nn.Linear(config.hidden_size, self.all_head_size) self.key.weight = weights[1] self.key.bias = biases[1] self.value = nn.Linear(config.hidden_size, self.all_head_size) self.value.weight = weights[2] self.value.bias = biases[2] self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.softmax = nn.Softmax(dim=-1) #self.softmax_config = DeepSpeedSoftmaxConfig() #self.softmax_config.batch_size = config.batch_size #self.softmax_config.max_seq_length = config.max_position_embeddings #self.softmax_config.hidden_size = config.hidden_size #self.softmax_config.heads = config.num_attention_heads #self.softmax_config.softmax_id = i #self.softmax_config.fp16 = config.fp16 #self.softmax_config.prob_drop_out = 0.0 #self.softmax = DeepSpeedSoftmax(i, self.softmax_config) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def transpose_key_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 3, 1) def forward(self, hidden_states, attention_mask, grads=None): #timing = [] #t1 = GPUTimer() #t1.record() mixed_query_layer = self.query(hidden_states) #timing.append(t1.elapsed()) #print("Query elapsed: %s" % (time.clock() - start)) #t1.record() mixed_key_layer = self.key(hidden_states) #timing.append(t1.elapsed()) #print("Key elapsed: %s" % (time.clock() - start)) #t1.record() mixed_value_layer = self.value(hidden_states) #timing.append(t1.elapsed()) #print("Value elapsed: %s" % (time.clock() - start)) #t1.record() query_layer = self.transpose_for_scores(mixed_query_layer) # print(query_layer) #timing.append(t1.elapsed()) #print("Query-Transform elapsed: %s" % (time.clock() - start)) #t1.record() key_layer = self.transpose_key_for_scores(mixed_key_layer) # print(key_layer) #timing.append(t1.elapsed()) #print("Key-Transform elapsed: %s" % (time.clock() - start)) #t1.record() value_layer = self.transpose_for_scores(mixed_value_layer) #print(value_layer) #timing.append(t1.elapsed()) #print("Value-Transform elapsed: %s" % (time.clock() - start)) # Take the dot product between "query" and "key" to get the raw attention scores. #t1.record() #print(query_layer.shape) #print(key_layer.shape) attention_scores = torch.matmul(query_layer, key_layer) #print(attention_scores.shape) attention_scores = attention_scores / math.sqrt(self.attention_head_size) #print("Pytorch: ", attention_scores) #timing.append(t1.elapsed()) #print("Attention-Score elapsed: %s" % (time.clock() - start)) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) #t1.record() # context_layer = self.softmax(query_layer, key_layer, value_layer, attention_mask) #print("context shape is :", context_layer.shape) #print("Cuda-ext:, ", attention_scores1) # Normalize the attention scores to probabilities. ####attention_probs = self.softmax(attention_scores) #timing.append(t1.elapsed()) #print("Softmax elapsed: %s" % (time.clock() - start)) #t1 = GPUTimer() #t1.record() attention_scores = attention_scores + attention_mask attention_probs = self.softmax(attention_scores) #attention_scores = self.softmax(attention_scores, attention_mask) #print("Softmax elapse {0:8.2f} ms", t1.elapsed() * 1000) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) #t1.record() context_layer = torch.matmul(attention_probs, value_layer) #timing.append(t1.elapsed()) #print("Context elapsed: %s" % (time.clock() - start)) #t1.record() #context_layer1 = context_layer.permute( # 0, 1, 3, 2, 4).contiguous() #if grads is not None: # context_layer.register_hook(lambda x, self = self : grads.append([x, "Context"])) context_layer1 = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer1.size()[:-2] + (self.all_head_size, ) context_layer1 = context_layer1.view(*new_context_layer_shape) #timing.append(t1.elapsed()) #print("Context-Transform elapsed: %s" % (time.clock() - start)) if grads is not None: query_layer.register_hook(lambda x, self=self: grads.append([x, "Query"])) key_layer.register_hook(lambda x, self=self: grads.append([x, "Key"])) value_layer.register_hook(lambda x, self=self: grads.append([x, "Value"])) return context_layer1 class BertSelfOutput(nn.Module): def __init__(self, config, weights, biases): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dense.weight = weights[3] self.dense.bias = biases[3] self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): #timing = [] #t1 = GPUTimer() #t1.record() hidden_states = self.dense(hidden_states) #timing.append(t1.elapsed()) #print("Attention Output elapsed: %s" % (time.clock() - start)) hidden_states = self.dropout(hidden_states) #t1.record() #hidden_states = self.LayerNorm(hidden_states + input_tensor) #timing.append(t1.elapsed()) #print("LayerNorm elapsed: %s" % (time.clock() - start)) return hidden_states def get_w(self): return self.dense.weight class BertAttention(nn.Module): def __init__(self, i, config, weights, biases): super(BertAttention, self).__init__() self.self = BertSelfAttention(i, config, weights, biases) self.output = BertSelfOutput(config, weights, biases) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output def get_w(self): return self.output.get_w() class BertIntermediate(nn.Module): def __init__(self, config, weights, biases): super(BertIntermediate, self).__init__() self.dense_act = LinearActivation(config.hidden_size, config.intermediate_size, weights, biases, act=config.hidden_act) def forward(self, hidden_states): hidden_states = self.dense_act(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config, weights, biases): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dense.weight = weights[6] self.dense.bias = biases[6] self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): #timing = [] #t1 = GPUTimer() #t1.record() #print (hidden_states) #print (self.dense.weight) hidden_states = self.dense(hidden_states) #timing.append(t1.elapsed()) #print("FF2 elapsed: %s" % (time.clock() - start)) hidden_states = self.dropout(hidden_states) #t1.record() #hidden_states = self.LayerNorm(hidden_states + input_tensor) #timing.append(t1.elapsed()) #print("LayerNorm elapsed: %s" % (time.clock() - start)) return hidden_states class BertLayer(nn.Module): def __init__(self, i, config, weights, biases): super(BertLayer, self).__init__() self.attention = BertAttention(i, config, weights, biases) self.PreAttentionLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.PostAttentionLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.intermediate = BertIntermediate(config, weights, biases) self.output = BertOutput(config, weights, biases) self.weight = weights self.biases = biases def forward(self, hidden_states, attention_mask, grads, collect_all_grads=False): input_layer_norm = self.PreAttentionLayerNorm(hidden_states) attention_output = self.attention(input_layer_norm, attention_mask) #print ("hidden shape is :", hidden_states.shape) intermediate_input = hidden_states + attention_output intermediate_layer_norm = self.PostAttentionLayerNorm(intermediate_input) intermediate_output = self.intermediate(intermediate_layer_norm) layer_output = self.output(intermediate_output, attention_output) #attention_output = self.attention(hidden_states, attention_mask) #intermediate_output = self.intermediate(attention_output) #layer_output = self.output(intermediate_output, attention_output) if collect_all_grads: # self.weight[0].register_hook(lambda x, self=self: grads.append([x,"Q_W"])) # self.biases[0].register_hook(lambda x, self=self: grads.append([x,"Q_B"])) # self.weight[1].register_hook(lambda x, self=self: grads.append([x,"K_W"])) # self.biases[1].register_hook(lambda x, self=self: grads.append([x,"K_B"])) self.weight[2].register_hook(lambda x, self=self: grads.append([x, "V_W"])) self.biases[2].register_hook(lambda x, self=self: grads.append([x, "V_B"])) self.weight[3].register_hook(lambda x, self=self: grads.append([x, "O_W"])) self.biases[3].register_hook(lambda x, self=self: grads.append([x, "O_B"])) self.PostAttentionLayerNorm.weight.register_hook(lambda x, self=self: grads.append([x, "N2_W"])) self.PostAttentionLayerNorm.bias.register_hook(lambda x, self=self: grads.append([x, "N2_B"])) self.weight[5].register_hook(lambda x, self=self: grads.append([x, "int_W"])) self.biases[5].register_hook(lambda x, self=self: grads.append([x, "int_B"])) self.weight[6].register_hook(lambda x, self=self: grads.append([x, "out_W"])) self.biases[6].register_hook(lambda x, self=self: grads.append([x, "out_B"])) self.PreAttentionLayerNorm.weight.register_hook(lambda x, self=self: grads.append([x, "norm_W"])) self.PreAttentionLayerNorm.bias.register_hook(lambda x, self=self: grads.append([x, "norm_B"])) return layer_output + intermediate_input def get_w(self): return self.attention.get_w() class BertEncoder(nn.Module): def __init__(self, config, weights, biases): super(BertEncoder, self).__init__() #layer = BertLayer(config, weights, biases) self.FinalLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.layer = nn.ModuleList( [copy.deepcopy(BertLayer(i, config, weights, biases)) for i in range(config.num_hidden_layers)]) self.grads = [] self.graph = [] def get_grads(self): return self.grads # def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True): # all_encoder_layers = [] # for layer_module in self.layer: # hidden_states = layer_module(hidden_states, attention_mask) # if output_all_encoded_layers: # all_encoder_layers.append(hidden_states) # if not output_all_encoded_layers: # all_encoder_layers.append(hidden_states) # return all_encoder_layers def get_modules(self, big_node, input): for mdl in big_node.named_children(): self.graph.append(mdl) self.get_modules(self, mdl, input) def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False): all_encoder_layers = [] def custom(start, end): def custom_forward(*inputs): layers = self.layer[start:end] x_ = inputs[0] for layer in layers: x_ = layer(x_, inputs[1]) return x_ return custom_forward if checkpoint_activations: l = 0 num_layers = len(self.layer) chunk_length = math.ceil(math.sqrt(num_layers)) while l < num_layers: hidden_states = checkpoint.checkpoint(custom(l, l + chunk_length), hidden_states, attention_mask * 1) l += chunk_length # decoder layers else: for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states, attention_mask, self.grads, collect_all_grads=True) hidden_states.register_hook(lambda x, i=i, self=self: self.grads.append([x, "hidden_state"])) #print("pytorch weight is: ", layer_module.get_w()) if output_all_encoded_layers: all_encoder_layers.append((hidden_states)) if not output_all_encoded_layers or checkpoint_activations: hidden_states = self.FinalLayerNorm(hidden_states) all_encoder_layers.append((hidden_states)) return all_encoder_layers #class BertEncoder(nn.Module): # def __init__(self, config): # super(BertEncoder, self).__init__() # layer = BertLayer(config) # self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]) # # def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True): # all_encoder_layers = [] # for layer_module in self.layer: # hidden_states = layer_module(hidden_states, attention_mask) # if output_all_encoded_layers: # all_encoder_layers.append(hidden_states) # if not output_all_encoded_layers: # all_encoder_layers.append(hidden_states) # return all_encoder_layers class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act="tanh") def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense_act(first_token_tensor) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act=config.hidden_act) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) def forward(self, hidden_states): hidden_states = self.dense_act(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(bert_model_embedding_weights.size(1), bert_model_embedding_weights.size(0), bias=False) self.decoder.weight = bert_model_embedding_weights self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) get_accelerator().range_push("decoder input.size() = {}, weight.size() = {}".format( hidden_states.size(), self.decoder.weight.size())) hidden_states = self.decoder(hidden_states) + self.bias get_accelerator().range_pop() return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super(BertOnlyNSPHead, self).__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertPreTrainingHeads, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertPreTrainedModel(nn.Module): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ def __init__(self, config, *inputs, **kwargs): super(BertPreTrainedModel, self).__init__() if not isinstance(config, BertConfig): raise ValueError("Parameter config in `{}(config)` should be an instance of class `BertConfig`. " "To create a model from a Google pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__)) self.config = config def init_bert_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @classmethod def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs): """ Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-large-cased` . `bert-base-multilingual-uncased` . `bert-base-multilingual-cased` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = pretrained_model_name_or_path if resolved_archive_file == archive_file: # noqa: F821 logger.info("loading archive file {}".format(archive_file)) else: logger.info("loading archive file {} from cache at {}".format(archive_file, resolved_archive_file)) # noqa: F821 tempdir = None if os.path.isdir(resolved_archive_file) or from_tf: # noqa: F821 serialization_dir = resolved_archive_file # noqa: F821 else: # Extract archive to temp dir tempdir = tempfile.mkdtemp() logger.info("extracting archive file {} to temp dir {}".format( resolved_archive_file, # noqa: F821 tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: # noqa: F821 archive.extractall(tempdir) serialization_dir = tempdir # Load config config_file = os.path.join(serialization_dir, CONFIG_NAME) config = BertConfig.from_json_file(config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) state_dict = torch.load(weights_path, map_location='cpu' if not get_accelerator().is_available() else None) if tempdir: # Clean up temp dir shutil.rmtree(tempdir) if from_tf: # Directly load from a TensorFlow checkpoint weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME) return load_tf_weights_in_bert(model, weights_path) # Load from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') start_prefix = '' if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()): start_prefix = 'bert.' load(model, prefix=start_prefix) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) return model class BertModel(BertPreTrainedModel): """BERT model ("Bidirectional Embedding Representations from a Transformer"). Params: config: a BertConfig class instance with the configuration to build a new model Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`. Outputs: Tuple of (encoded_layers, pooled_output) `encoded_layers`: controlled by `output_all_encoded_layers` argument: - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding to the last attention block of shape [batch_size, sequence_length, hidden_size], `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a classifier pretrained on top of the hidden state associated to the first character of the input (`CLS`) to train on the Next-Sentence task (see BERT's paper). Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = modeling.BertModel(config=config) all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertModel, self).__init__(config) self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True, checkpoint_activations=False): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 embedding_output = self.embeddings(input_ids, token_type_ids) encoded_layers = self.encoder(embedding_output, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, checkpoint_activations=checkpoint_activations) sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) if not output_all_encoded_layers: encoded_layers = encoded_layers[-1] return encoded_layers, pooled_output class BertForPreTraining(BertPreTrainedModel): """BERT model with pre-training heads. This module comprises the BERT model followed by the two pre-training heads: - the masked language modeling head, and - the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `masked_lm_labels` and `next_sentence_label` are not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `masked_lm_labels` or `next_sentence_label` is `None`: Outputs a tuple comprising - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and - the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForPreTraining(config) masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, args): super(BertForPreTraining, self).__init__(config) self.summary_writer = None if dist.get_rank() == 0: self.summary_writer = args.summary_writer self.samples_per_step = dist.get_world_size() * args.train_batch_size self.sample_count = self.samples_per_step self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def log_summary_writer(self, logs: dict, base='Train'): if dist.get_rank() == 0: module_name = "Samples" #self._batch_module_name.get(batch_type, self._get_batch_type_error(batch_type)) for key, log in logs.items(): self.summary_writer.add_scalar(f'{base}/{module_name}/{key}', log, self.sample_count) self.sample_count += self.samples_per_step def forward(self, batch, log=True): #input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, checkpoint_activations=False): input_ids = batch[1] token_type_ids = batch[3] attention_mask = batch[2] masked_lm_labels = batch[5] next_sentence_label = batch[4] checkpoint_activations = False sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, checkpoint_activations=checkpoint_activations) prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) if masked_lm_labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) #print("loss is {} {}".format(masked_lm_loss, next_sentence_loss)) total_loss = masked_lm_loss + next_sentence_loss # if log: # self.log_summary_writer(logs={'train_loss': total_loss.item()}) return total_loss else: return prediction_scores, seq_relationship_score class BertForMaskedLM(BertPreTrainedModel): """BERT model with the masked language modeling head. This module comprises the BERT model followed by the masked language modeling head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] Outputs: if `masked_lm_labels` is not `None`: Outputs the masked language modeling loss. if `masked_lm_labels` is `None`: Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForMaskedLM(config) masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForMaskedLM, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, checkpoint_activations=False): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) prediction_scores = self.cls(sequence_output) if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) return masked_lm_loss else: return prediction_scores class BertForNextSentencePrediction(BertPreTrainedModel): """BERT model with next sentence prediction head. This module comprises the BERT model followed by the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `next_sentence_label` is not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `next_sentence_label` is `None`: Outputs the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForNextSentencePrediction(config) seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForNextSentencePrediction, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, checkpoint_activations=False): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) seq_relationship_score = self.cls(pooled_output) if next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) return next_sentence_loss else: return seq_relationship_score class BertForSequenceClassification(BertPreTrainedModel): """BERT model for classification. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_labels = 2 model = BertForSequenceClassification(config, num_labels) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_labels): super(BertForSequenceClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return loss else: return logits class BertForMultipleChoice(BertPreTrainedModel): """BERT model for multiple choice tasks. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_choices`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_choices]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]]) input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]]) token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_choices = 2 model = BertForMultipleChoice(config, num_choices) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_choices): super(BertForMultipleChoice, self).__init__(config) self.num_choices = num_choices self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False): flat_input_ids = input_ids.view(-1, input_ids.size(-1)) flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, self.num_choices) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return loss else: return reshaped_logits class BertForTokenClassification(BertPreTrainedModel): """BERT model for token-level classification. This module is composed of the BERT model with a linear layer on top of the full hidden state of the last layer. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, sequence_length, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_labels = 2 model = BertForTokenClassification(config, num_labels) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_labels): super(BertForTokenClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return loss else: return logits class BertForQuestionAnswering(BertPreTrainedModel): """BERT model for Question Answering (span extraction). This module is composed of the BERT model with a linear layer on top of the sequence output that computes start_logits and end_logits Params: `config`: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size]. Positions are clamped to the length of the sequence and position outside of the sequence are not taken into account for computing the loss. `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size]. Positions are clamped to the length of the sequence and position outside of the sequence are not taken into account for computing the loss. Outputs: if `start_positions` and `end_positions` are not `None`: Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions. if `start_positions` or `end_positions` is `None`: Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end position tokens of shape [batch_size, sequence_length]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForQuestionAnswering(config) start_logits, end_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForQuestionAnswering, self).__init__(config) self.bert = BertModel(config) # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version # self.dropout = nn.Dropout(config.hidden_dropout_prob) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None, checkpoint_activations=False): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return total_loss else: return start_logits, end_logits
74,569
45.030864
141
py
DeepSpeed
DeepSpeed-master/tests/unit/megatron_model.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import os import sys import math from .common import get_test_path from deepspeed.pipe import PipelineModule, LayerSpec from deepspeed.accelerator import get_accelerator def get_megatron_version(): p = os.popen("pip list --format=columns | grep megatron-lm") pip_list = p.read() assert 'megatron-lm' in pip_list, 'Please install Megatron-LM before getting its version' ver_str = pip_list.split()[1] return float(ver_str[0]) def get_gpt2_model(args_others, mp_size=1): from megatron.model import GPT2Model from megatron.initialize import initialize_megatron args_defaults = { 'vocab_file': get_test_path('gpt2-vocab.json'), 'merge_file': get_test_path('gpt2-merges.txt'), 'tokenizer_type': 'GPT2BPETokenizer', } args_defaults.update(args_others) # setting "make-vocab-size-divisible-by" to avoid word-embedding size change in resizing testing. sys.argv.extend(['--model-parallel-size', str(mp_size), '--make-vocab-size-divisible-by', str(1)]) initialize_megatron(args_defaults=args_defaults, ignore_unknown_args=True) model = GPT2Model(num_tokentypes=0, parallel_output=False) model.to(get_accelerator().device_name()) from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP from megatron import mpu i = get_accelerator().current_device_name() model = torchDDP(model, device_ids=[i], output_device=i, process_group=mpu.get_data_parallel_group()) return model class MockGPT2ModelPipe(PipelineModule): def __init__(self, num_layers, mp_size, args_others, topo, **kwargs): from megatron.initialize import initialize_megatron args_defaults = { 'vocab_file': get_test_path('gpt2-vocab.json'), 'merge_file': get_test_path('gpt2-merges.txt'), 'tokenizer_type': 'GPT2BPETokenizer', } args_defaults.update(args_others) # setting "make-vocab-size-divisible-by" to avoid word-embedding size change in resizing testing. sys.argv.extend(['--model-parallel-size', str(mp_size), '--make-vocab-size-divisible-by', str(1)]) initialize_megatron(args_defaults=args_defaults, ignore_unknown_args=True) from megatron.model.transformer import ParallelTransformerLayer class ParallelTransformerLayerPipe(ParallelTransformerLayer): def forward(self, args): # hardcode attn mask for testing, PP requires the attn_mask to be stashed attention_mask = torch.tensor([[True]], device=get_accelerator().current_device_name()) return super().forward(args, attention_mask) layers = [] for x in range(num_layers): layers.append( LayerSpec(ParallelTransformerLayerPipe, self.gpt2_attention_mask_func, self.init_method_normal(0.02), self.scaled_init_method_normal(0.02, num_layers), x)) super().__init__(layers=layers, loss_fn=torch.nn.CrossEntropyLoss(), topology=topo, **kwargs) def gpt2_attention_mask_func(self, attention_scores, ltor_mask): attention_scores.masked_fill_(ltor_mask, -10000.0) return attention_scores def init_method_normal(self, sigma): """Init method based on N(0, sigma).""" def init_(tensor): return torch.nn.init.normal_(tensor, mean=0.0, std=sigma) return init_ def scaled_init_method_normal(self, sigma, num_layers): """Init method based on N(0, sigma/sqrt(2*num_layers).""" std = sigma / math.sqrt(2.0 * num_layers) def init_(tensor): return torch.nn.init.normal_(tensor, mean=0.0, std=std) return init_
3,816
35.701923
117
py
DeepSpeed
DeepSpeed-master/tests/unit/__init__.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team '''Copyright The Microsoft DeepSpeed Team'''
140
22.5
44
py
DeepSpeed
DeepSpeed-master/tests/unit/multi_output_model.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch class MultiOutputModel(torch.nn.Module): def __init__(self, hidden_dim, weight_value): super(MultiOutputModel, self).__init__() self.linear = torch.nn.Linear(hidden_dim, hidden_dim, bias=False) self.linear.weight.data.fill_(weight_value) self.cross_entropy_loss = torch.nn.CrossEntropyLoss() def forward(self, inputs, targets): losses = [] for x, y in zip(inputs, targets): hidden_dim = self.linear(x) loss = self.cross_entropy_loss(hidden_dim, y) losses.append(loss) return tuple(losses) def multi_output_dataloader(model, total_samples, hidden_dim, device, inputs, targets): assert len(inputs) == len(targets) batch_size = model.train_micro_batch_size_per_gpu() train_data = [ torch.full(size=(total_samples, hidden_dim), fill_value=x, device=device, dtype=torch.half, requires_grad=True) for x in inputs ] train_label = [torch.empty(total_samples, device=device, dtype=torch.long).fill_(y) for y in targets] train_dataset = torch.utils.data.TensorDataset(*train_data, *train_label) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size) return train_loader
1,352
32.825
119
py
DeepSpeed
DeepSpeed-master/tests/unit/checkpoint/test_zero_optimizer.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import deepspeed from deepspeed.ops.op_builder import CPUAdamBuilder from deepspeed.checkpoint.utils import clone_tensors_for_torch_save from deepspeed.accelerator import get_accelerator from unit.common import DistributedTest, DistributedFixture from unit.simple_model import * from unit.util import required_minimum_torch_version from unit.checkpoint.common import * import pytest class TestZeROCheckpoint(DistributedTest): world_size = 2 @pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer', [(1, False, 'Adam'), (2, False, 'Adam'), (2, True, 'deepspeed_adam'), (3, False, 'Adam'), (3, True, 'deepspeed_adam')]) def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": 'Adam', "params": { "lr": 0.00015, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "fp16": { "enabled": True, "initial_scale_power": 8 }, "wall_clock_breakdown": True, "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload } } hidden_dim = 10 if zero_stage == 3: with deepspeed.zero.Init(): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True) @pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer', [(1, False, "Adam"), (2, False, "Adam"), (2, True, 'deepspeed_adam'), (3, False, 'Adam'), (3, True, 'deepspeed_adam')]) def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": 'Adam', "params": { "lr": 0.00015, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "fp16": { "enabled": True }, "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload } } hidden_dim = 10 if zero_stage == 3: global DeepSpeedZeroOptimizer_Stage3 from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 with deepspeed.zero.Init(): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False) @pytest.mark.parametrize('zero_stage', [1, 2]) def test_hybrid_optimizer_state(self, tmpdir, zero_stage): config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, "steps_per_print": 1, "zero_optimization": { "stage": zero_stage }, "zero_allow_untested_optimizer": True, "fp16": { "enabled": True, "initial_scale_power": 8 } } hidden_dim = 10 models = [SimpleModel(hidden_dim=hidden_dim) for _ in range(2)] optimizers = [HybridStateOptimizer(model.parameters()) for model in models] checkpoint_correctness_verification(config_dict, models=models, base_optimizers=optimizers, hidden_dim=hidden_dim, tmpdir=tmpdir, load_optimizer_states=True) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) def test_load_module_only(self, tmpdir, zero_stage): config_dict = { "train_batch_size": 2, "optimizer": { "type": 'Adam' }, "fp16": { "enabled": True, "initial_scale_power": 8 }, "zero_optimization": { "stage": zero_stage, } } hidden_dim = 10 if zero_stage == 3: with deepspeed.zero.Init(): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) class ws4_model_checkpoint(DistributedFixture): world_size = 4 def run(self, class_tmpdir, elastic_save, load_optim): ds_config = { "train_batch_size": 4, "optimizer": { "type": 'Adam' }, "fp16": { "enabled": True, "initial_scale_power": 8 }, "zero_optimization": { "stage": 2, "elastic_checkpoint": elastic_save } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=8, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() if load_optim: torch.save(model.optimizer.optimizer.state_dict(), os.path.join(class_tmpdir, 'opt-state-dict')) model.save_checkpoint(class_tmpdir) @pytest.mark.parametrize("elastic_save", [True, False]) @pytest.mark.parametrize("elastic_load", [True, False]) @pytest.mark.parametrize("load_optim", [True, False]) class TestZeROElasticCheckpoint(DistributedTest): world_size = 2 def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, load_optim): ds_config = { "train_batch_size": 2, "optimizer": { "type": 'Adam' }, "fp16": { "enabled": True, "initial_scale_power": 8 }, "zero_optimization": { "stage": 2, "elastic_checkpoint": elastic_save } } hidden_dim = 10 # torch 1.2.* stores raw tensor id numbers in checkpoint state which leads to # false positive mismatches in checkpoint state comparisons. # Newer torch versions store tensor ids as 0, 1, 2, ... expected_mismatch_keys = [] if required_minimum_torch_version(1, 4) else ['params'] models = [SimpleModel(hidden_dim) for _ in range(2)] model, _, _, _ = deepspeed.initialize(config=ds_config, model=models[0], model_parameters=models[0].parameters()) data_loader = random_dataloader(model=model, total_samples=8, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() if load_optim: torch.save(model.optimizer.optimizer.state_dict(), os.path.join(tmpdir, 'opt-state-dict')) model.save_checkpoint(tmpdir) ds_config["zero_optimization"]["elastic_checkpoint"] = elastic_load model, _, _, _ = deepspeed.initialize(config=ds_config, model=models[1], model_parameters=models[1].parameters()) model.load_checkpoint(tmpdir, load_optimizer_states=load_optim) if load_optim: saved_sd = torch.load(os.path.join(tmpdir, 'opt-state-dict')) curr_sd = model.optimizer.optimizer.state_dict() for curr_param_group, saved_param_group in zip(curr_sd['param_groups'], saved_sd['param_groups']): compare_state_dicts(curr_param_group, saved_param_group, expected_mismatch_keys) data_loader = random_dataloader(model=model, total_samples=8, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, elastic_save, elastic_load, load_optim): ds_config = { "train_batch_size": 4, "optimizer": { "type": 'Adam' }, "fp16": { "enabled": True, "initial_scale_power": 8 }, "zero_optimization": { "stage": 2, "elastic_checkpoint": elastic_load } } hidden_dim = 10 model = SimpleModel(hidden_dim) # Load checkpoint with dp world size = 2 model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model.parameters()) if load_optim: with pytest.raises(deepspeed.runtime.zero.utils.ZeRORuntimeException): model.load_checkpoint(class_tmpdir, load_optimizer_states=load_optim) else: model.load_checkpoint(class_tmpdir, load_optimizer_states=load_optim) class TestZeROSaveLoadEdgeCase(DistributedTest): world_size = 2 @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) def test_immediate_save_load(self, tmpdir, zero_stage): config_dict = { "train_batch_size": 4, "optimizer": { "type": 'Adam' }, "fp16": { "enabled": True, "initial_scale_power": 8 }, "zero_optimization": { "stage": zero_stage, } } hidden_dim = 10 model = SimpleModel(hidden_dim) ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) ds_model.save_checkpoint(tmpdir) ds_model.load_checkpoint(tmpdir, load_optimizer_states=False, load_lr_scheduler_states=False, load_module_only=False) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) def test_load_immediate_save(self, tmpdir, zero_stage): config_dict = { "train_batch_size": 4, "optimizer": { "type": 'Adam' }, "fp16": { "enabled": True, "initial_scale_power": 8 }, "zero_optimization": { "stage": zero_stage, } } hidden_dim = 10 model = SimpleModel(hidden_dim) # 1. pretrain a model and save it dtype = torch.half ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) data_loader = random_dataloader(model=ds_model, total_samples=1, hidden_dim=hidden_dim, device=ds_model.device, dtype=dtype) for _, batch in enumerate(data_loader): loss = ds_model(batch[0], batch[1]) ds_model.backward(loss) ds_model.step() ds_model.empty_partition_cache() ds_model.save_checkpoint(tmpdir) # 2. load and immediately save a model with a fresh ds engine ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) ds_model.load_checkpoint(tmpdir, load_optimizer_states=False, load_lr_scheduler_states=False, load_module_only=False) ds_model.save_checkpoint(tmpdir) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): config_dict = { "optimizer": { "type": 'Adam' }, "fp16": { "enabled": True, "initial_scale_power": 8 }, "zero_optimization": { "stage": zero_stage, "stage3_gather_fp16_weights_on_model_save": True, }, "gradient_accumulation_steps": 2, "train_micro_batch_size_per_gpu": 1, "train_batch_size": 4, } hidden_dim = 10 model = SimpleModel(hidden_dim) # This test reproduces a bug where one tries to retrieve a 16bit model before grad_accum # cycle was completed. # So we config grad_accum=2 and step only once and save_16bit_model ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) data_loader = random_dataloader(model=ds_model, total_samples=2, hidden_dim=hidden_dim, device=ds_model.device, dtype=torch.half) batch = next(iter(data_loader)) loss = ds_model(batch[0], batch[1]) ds_model.backward(loss) ds_model.step() ds_model.empty_partition_cache() # we stepped only once, and now save 16bit model before gradient_accumulation_steps=2 is complete ds_model.save_16bit_model(tmpdir, "model.pt") # let's test just as well that we can save the checkpoint too ds_model.save_checkpoint(tmpdir) class TestZeROCheckpointFrozenWeights(DistributedTest): world_size = 2 @pytest.mark.parametrize('zero_stage', [1, 2, 3]) def test_load_optimizer_state(self, tmpdir, zero_stage): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": 'Adam', "params": { "lr": 0.00015, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "fp16": { "enabled": True, "initial_scale_power": 8 }, "wall_clock_breakdown": True, "zero_optimization": { "stage": zero_stage } } hidden_dim = 10 with deepspeed.zero.Init(enabled=zero_stage == 3): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) def test_not_load_optimizer_state(self, tmpdir, zero_stage): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": 'Adam', "params": { "lr": 0.00015, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "fp16": { "enabled": True }, "zero_optimization": { "stage": zero_stage } } hidden_dim = 10 with deepspeed.zero.Init(enabled=zero_stage == 3): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) def test_load_module_only(self, tmpdir, zero_stage): config_dict = { "train_batch_size": 2, "optimizer": { "type": 'Adam' }, "fp16": { "enabled": True, "initial_scale_power": 8 }, "zero_optimization": { "stage": zero_stage, } } hidden_dim = 10 with deepspeed.zero.Init(enabled=zero_stage == 3): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) class TestSaveTensorClone(DistributedTest): world_size = 1 @pytest.mark.parametrize('zero_stage', [1, 2]) @pytest.mark.parametrize('use_cpu_device', [True, False]) def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device): ds_config = { "optimizer": { "type": "AdamW", }, "zero_optimization": { "stage": zero_stage }, "train_batch_size": 1, "train_micro_batch_size_per_gpu": 1 } hidden_dim = 1024 model = SimpleModel(hidden_dim, nlayers=4).half() ref_model_state_dict = model.state_dict() ds_engine, _, _, _ = deepspeed.initialize(model=model, config_params=ds_config) clone_device = torch.device('cpu') if use_cpu_device else get_accelerator().current_device() clone_state_dict = clone_tensors_for_torch_save(ds_engine.module.state_dict()) compare_state_dicts(ref_model_state_dict, clone_state_dict) ref_ckpt_file = os.path.join(tmpdir, 'ref_ckpt.pt') torch.save(ref_model_state_dict, ref_ckpt_file) clone_ckpt_file = os.path.join(tmpdir, 'clone_ckpt.pt') torch.save(clone_state_dict, clone_ckpt_file) compare_state_dicts(torch.load(ref_ckpt_file), torch.load(clone_ckpt_file))
19,385
37.161417
116
py
DeepSpeed
DeepSpeed-master/tests/unit/checkpoint/test_other_optimizer.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import deepspeed from deepspeed.ops.op_builder import FusedLambBuilder from unit.common import DistributedTest from unit.simple_model import * from unit.checkpoint.common import checkpoint_correctness_verification import pytest class TestOtherOptimizerCheckpoint(DistributedTest): world_size = 2 @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") def test_checkpoint_unfused_optimizer(self, tmpdir): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0, "fp16": { "enabled": True }, "scheduler": { "type": "OneCycle", "params": { "cycle_first_step_size": 1000, "cycle_first_stair_count": 500, "cycle_second_step_size": 1000, "cycle_second_stair_count": 500, "decay_step_size": 1000, "cycle_min_lr": 0.0001, "cycle_max_lr": 0.0010, "decay_lr_rate": 0.001, "cycle_min_mom": 0.85, "cycle_max_mom": 0.99, "decay_mom_rate": 0.0 } } } args = args_from_dict(tmpdir, config_dict) hidden_dim = 10 models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] # Load & verify optimizer states checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, load_optimizer_states=True) # Ignore optimizer states checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, load_optimizer_states=False) def test_checkpoint_fused_optimizer(self, tmpdir): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "fp16": { "enabled": True } } args = args_from_dict(tmpdir, config_dict) hidden_dim = 10 models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] # Load & verify optimizer states checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, load_optimizer_states=True) # Ignore optimizer states checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, load_optimizer_states=False) def test_checkpoint_fp32_optimizer(self, tmpdir): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "fp16": { "enabled": False } } args = args_from_dict(tmpdir, config_dict) hidden_dim = 10 models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, fp16=False)
4,750
34.721805
117
py
DeepSpeed
DeepSpeed-master/tests/unit/checkpoint/test_mics_optimizer.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import deepspeed from unit.common import DistributedTest from unit.simple_model import * from unit.checkpoint.common import * import pytest class TestMiCSCheckpoint(DistributedTest): world_size = 4 def _toy_model_config(self, shard_size): config_dict = { "train_micro_batch_size_per_gpu": 2, "steps_per_print": 1, "optimizer": { "type": 'Adam', "params": { "lr": 0.00015, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "fp16": { "enabled": True, "initial_scale_power": 8 }, "wall_clock_breakdown": True, "zero_optimization": { "stage": 3, "mics_shard_size": shard_size } } hidden_dim = 10 with deepspeed.zero.MiCS_Init(config_dict_or_path=config_dict): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] return config_dict, hidden_dim, models @pytest.mark.parametrize('shard_size', [1, 2, 4]) def test_load_optimizer_state(self, tmpdir, shard_size): config_dict, hidden_dim, models = self._toy_model_config(shard_size) checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True) @pytest.mark.parametrize('shard_size', [1, 2, 4]) def test_not_load_optimizer_state(self, tmpdir, shard_size): config_dict, hidden_dim, models = self._toy_model_config(shard_size) checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False) @pytest.mark.parametrize('shard_size', [1, 2, 4]) def test_load_module_only(self, tmpdir, shard_size): config_dict, hidden_dim, models = self._toy_model_config(shard_size) checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True)
2,265
32.820896
113
py
DeepSpeed
DeepSpeed-master/tests/unit/checkpoint/test_pipeline.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine from unit.common import DistributedTest from unit.simple_model import * from unit.checkpoint.common import checkpoint_correctness_verification from unit.util import skip_on_arch import pytest class TestPipelineCheckpoint(DistributedTest): world_size = 4 @pytest.mark.parametrize("zero_stage", [0, 1]) def test_checkpoint_pipe_engine(self, zero_stage, tmpdir): skip_on_arch(min_arch=7) config_dict = { "train_batch_size": 2, "train_micro_batch_size_per_gpu": 1, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 1e-5 } }, "zero_optimization": { "stage": zero_stage }, "fp16": { "enabled": zero_stage > 0 }, "scheduler": { "type": "OneCycle", "params": { "cycle_first_step_size": 1000, "cycle_first_stair_count": 500, "cycle_second_step_size": 1000, "cycle_second_stair_count": 500, "decay_step_size": 1000, "cycle_min_lr": 0.0001, "cycle_max_lr": 0.0010, "decay_lr_rate": 0.001, "cycle_min_mom": 0.85, "cycle_max_mom": 0.99, "decay_mom_rate": 0.0 } } } models = [LinearStackPipe(num_stages=2) for _ in range(2)] checkpoint_correctness_verification(config_dict=config_dict, models=models, hidden_dim=models[0].hidden_dim, tmpdir=tmpdir, fp16=config_dict['fp16']['enabled'], load_optimizer_states=True, load_lr_scheduler_states=True, train_batch=True) @pytest.mark.parametrize( "base_topo,test_topo", [ #(PipeTopo(num_pp=1, # num_dp=4), # PipeTopo(num_pp=4, # num_dp=1)), #(PipeTopo(num_pp=2, # num_dp=2), # PipeTopo(num_pp=2, # num_dp=2)), #(PipeTopo(num_pp=4, # num_dp=1), # PipeTopo(num_pp=2, # num_dp=2)), ]) def test_checkpoint_pipe_module(self, base_topo, test_topo, tmpdir): checkpoint_engine = TorchCheckpointEngine() base_model = LinearStackPipe(topology=base_topo) base_model.save_state_dict(tmpdir, checkpoint_engine=checkpoint_engine) dist.barrier() test_model = LinearStackPipe(topology=test_topo) test_model.load_state_dir(tmpdir, checkpoint_engine=checkpoint_engine) # Base and test can have different lengths, so make sure we map from the # smaller to larger model if len(base_model.forward_funcs) < len(test_model.forward_funcs): A = base_model B = test_model else: A = test_model B = base_model # Compare layers individually since partitions are different for idx, A_layer in enumerate(A.forward_funcs): if not hasattr(A_layer, 'parameters'): # Skip functionals, etc. continue # Find the corresponding layer in B global_idx = idx + A._local_start B_local_idx = global_idx - B._local_start B_layer = B.forward_funcs[B_local_idx] # Compare layer parameters for p0, p1 in zip(A_layer.parameters(), B_layer.parameters()): assert torch.allclose(p0, p1, atol=1e-07), f"Model state {p0} is not equal to {p1}"
4,207
35.591304
99
py
DeepSpeed
DeepSpeed-master/tests/unit/checkpoint/test_lr_scheduler.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import deepspeed from deepspeed.ops.op_builder import CPUAdamBuilder from unit.common import DistributedTest from unit.simple_model import * from unit.checkpoint.common import checkpoint_correctness_verification import pytest @pytest.mark.parametrize('zero_stage, use_cpu_offload', [(0, False), (1, False), (2, False), (2, True), (3, False), (3, True)]) class TestLRSchedulerCheckpoint(DistributedTest): world_size = 2 def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": 'Adam', "params": { "lr": 0.00015, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "fp16": { "enabled": True }, "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": 0, "warmup_max_lr": 0.001, "warmup_num_steps": 1000 } } } hidden_dim = 10 if zero_stage == 3: global DeepSpeedZeroOptimizer_Stage3 from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 with deepspeed.zero.Init(): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False, load_lr_scheduler_states=True) def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": 'Adam', "params": { "lr": 1e-5 } }, "fp16": { "enabled": True }, "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": 0, "warmup_max_lr": 0.001, "warmup_num_steps": 1000 } }, } hidden_dim = 10 if zero_stage == 3: with deepspeed.zero.Init(): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False, load_lr_scheduler_states=False)
3,998
34.078947
115
py
DeepSpeed
DeepSpeed-master/tests/unit/checkpoint/test_tag_validation.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import deepspeed from unit.common import DistributedTest from unit.simple_model import * import pytest class TestCheckpointValidationTag(DistributedTest): world_size = 2 @pytest.mark.parametrize('valid_mode', ["FAIL", "WARN", "IGNORE"]) def test_checkpoint_unique_tag(self, tmpdir, valid_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "checkpoint": { "tag_validation": valid_mode } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) if valid_mode == "FAIL": with pytest.raises(AssertionError): model.save_checkpoint(save_dir=tmpdir, tag=f"tag-{dist.get_rank()}") else: model.save_checkpoint(save_dir=tmpdir, tag=f"tag-{dist.get_rank()}") def test_checkpoint_unknown_tag_validation(self, tmpdir): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "checkpoint": { "tag_validation": "foo" } } hidden_dim = 10 args = args_from_dict(tmpdir, config_dict) model = SimpleModel(hidden_dim) with pytest.raises(deepspeed.DeepSpeedConfigError): model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
1,893
29.063492
119
py
DeepSpeed
DeepSpeed-master/tests/unit/checkpoint/test_reshape_checkpoint.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed.checkpoint import model_3d_desc def _do_reshape(src_3d, tgt_3d): assert src_3d.can_reshape(tgt_3d) new_3d_map = src_3d.reshape(tgt_3d) assert len(new_3d_map) == tgt_3d.dp_degree for new_2d_map in new_3d_map: assert new_2d_map.pp_degree == tgt_3d.pp_degree assert new_2d_map.tp_degree == tgt_3d.tp_degree return new_3d_map # Specify 3d shape as pp/tp/dp def test_reshape_222_to_111(): src_3d = model_3d_desc(pp_degree=2, tp_degree=2, dp_degree=2) tgt_3d = model_3d_desc(pp_degree=1, tp_degree=1, dp_degree=1) new_3d_map = _do_reshape(src_3d, tgt_3d) assert new_3d_map[0].get_data(pp_index=0, tp_index=0) == [0, 4, 1, 5, 2, 6, 3, 7] def test_reshape_222_to_121(): src_3d = model_3d_desc(pp_degree=2, tp_degree=2, dp_degree=2) tgt_3d = model_3d_desc(pp_degree=1, tp_degree=2, dp_degree=1) new_3d_map = _do_reshape(src_3d, tgt_3d) assert new_3d_map[0].get_data(pp_index=0, tp_index=0) == [0, 4, 2, 6] assert new_3d_map[0].get_data(pp_index=0, tp_index=1) == [1, 5, 3, 7] def test_reshape_222_to_122(): src_3d = model_3d_desc(pp_degree=2, tp_degree=2, dp_degree=2) tgt_3d = model_3d_desc(pp_degree=1, tp_degree=2, dp_degree=2) new_3d_map = _do_reshape(src_3d, tgt_3d) assert new_3d_map[0].get_data(pp_index=0, tp_index=0) == [0, 4] assert new_3d_map[0].get_data(pp_index=0, tp_index=1) == [1, 5] assert new_3d_map[1].get_data(pp_index=0, tp_index=0) == [2, 6] assert new_3d_map[1].get_data(pp_index=0, tp_index=1) == [3, 7] def test_reshape_222_to_211(): src_3d = model_3d_desc(pp_degree=2, tp_degree=2, dp_degree=2) tgt_3d = model_3d_desc(pp_degree=2, tp_degree=1, dp_degree=1) new_3d_map = _do_reshape(src_3d, tgt_3d) assert new_3d_map[0].get_data(pp_index=0, tp_index=0) == [0, 4, 1, 5] assert new_3d_map[0].get_data(pp_index=1, tp_index=0) == [2, 6, 3, 7]
2,006
31.901639
85
py
DeepSpeed
DeepSpeed-master/tests/unit/checkpoint/test_latest_checkpoint.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import deepspeed from unit.common import DistributedTest from unit.simple_model import * from unit.checkpoint.common import checkpoint_correctness_verification class TestLatestCheckpoint(DistributedTest): world_size = 1 def test_existing_latest(self, tmpdir): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } } } hidden_dim = 10 models = [SimpleModel(hidden_dim=hidden_dim) for _ in range(2)] checkpoint_correctness_verification(config_dict=config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, load_optimizer_states=True, load_lr_scheduler_states=False, fp16=False, empty_tag=True) def test_missing_latest(self, tmpdir): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) # should be no-op, since latest doesn't exist model.load_checkpoint(tmpdir)
1,811
31.945455
115
py
DeepSpeed
DeepSpeed-master/tests/unit/checkpoint/test_sparse.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import deepspeed from unit.common import DistributedTest from unit.simple_model import * import pytest class TestSparseCheckpoint(DistributedTest): world_size = 2 @pytest.mark.parametrize(["to_save_model_has_embedding", "to_save_model_sparse"], [ [False, False], [True, False], [True, True], ]) @pytest.mark.parametrize(["destination_has_embedding", "destination_sparse"], [ [False, False], [True, False], [True, True], ]) def test_non_strict_load_sparse(self, tmpdir, to_save_model_has_embedding, to_save_model_sparse, destination_has_embedding, destination_sparse): class ModelNoEmbedding(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(3, 1) def forward(self, x): return self.linear(x) class ModelEmbedding(torch.nn.Module): def __init__(self): super().__init__() self.emb = torch.nn.Embedding(10, 3) self.linear = torch.nn.Linear(3, 1) def forward(self, x, offsets): return self.linear(self.emb(x, offsets)) if to_save_model_has_embedding: model_to_save = ModelEmbedding() else: model_to_save = ModelNoEmbedding() if destination_has_embedding: model_destination = ModelEmbedding() else: model_destination = ModelNoEmbedding() engine_to_save, _, _, _ = deepspeed.initialize(model=model_to_save, config={ "train_batch_size": 2, "sparse_gradients": to_save_model_sparse }) engine_destination, _, _, _ = deepspeed.initialize(model=model_destination, config={ "train_batch_size": 2, "sparse_gradients": destination_sparse }) save_folder = os.path.join(tmpdir, 'saved_checkpoint') save_tag = '1' engine_to_save.save_checkpoint(save_folder, tag=save_tag) is_sparse_destination = isinstance(model_destination, ModelEmbedding) and destination_sparse if isinstance(model_destination, ModelEmbedding) and model_destination.emb.sparse: assert "emb.weight" in engine_destination.sparse_tensor_module_names engine_destination.load_checkpoint(save_folder, tag=save_tag, load_module_strict=False, load_optimizer_states=False, load_lr_scheduler_states=False, load_module_only=False) if isinstance(model_destination, ModelEmbedding) and isinstance(model_to_save, ModelEmbedding): assert engine_destination.sparse_tensor_module_names == engine_to_save.sparse_tensor_module_names elif isinstance(model_destination, ModelEmbedding): assert not is_sparse_destination or "emb.weight" in engine_destination.sparse_tensor_module_names else: assert len(engine_destination.sparse_tensor_module_names) == 0
3,721
40.820225
109
py
DeepSpeed
DeepSpeed-master/tests/unit/checkpoint/common.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import torch import numbers import deepspeed from deepspeed.runtime.zero.stage_1_and_2 import DeepSpeedZeroOptimizer from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus from unit.simple_model import * def compare_deepspeed_states(saved_model, loaded_model): # These are compared in more depth in other places assert hasattr(loaded_model, 'module') assert saved_model.sparse_tensor_module_names == loaded_model.sparse_tensor_module_names assert saved_model.skipped_steps == loaded_model.skipped_steps assert saved_model.global_steps == loaded_model.global_steps def zero3_params_to_fetch(param_list): return [p for p in param_list if hasattr(p, 'ds_id') and p.ds_status == ZeroParamStatus.NOT_AVAILABLE] def compare_model_states(saved_model, loaded_model, compare_optimizer=True, load_module_only=False): if not load_module_only: compare_deepspeed_states(saved_model, loaded_model) params_to_fetch = zero3_params_to_fetch( list(saved_model.module.named_parameters()) + list(loaded_model.module.named_parameters())) enable_gather = len(params_to_fetch) > 0 with deepspeed.zero.GatheredParameters(params_to_fetch, enabled=enable_gather): for p0, p1 in zip(saved_model.module.named_parameters(), loaded_model.module.named_parameters()): np0, p0 = p0 np1, p1 = p1 if 'deepspeed_moe.gate.wg' in np0: # these params are converted to float at runtime, cast to half for comparison p1 = p1.half() p0 = p0.half() assert id(p0) != id(p1), f'Comparing fp16 model state tensor against itself : {id(p0)} <====> {id(p1)}' try: assert torch.allclose(p0, p1, atol=1e-07), f"FP16 model state {p0} is not equal to {p1}, names:{np0}, {np1}" except RuntimeError as err: print(f"FP16 model state {p0} is not equal to {p1}, names:{np0}, {np1}") raise err if not compare_optimizer: return if DeepSpeedZeroOptimizer_Stage3 is not None and isinstance(saved_model.optimizer, DeepSpeedZeroOptimizer_Stage3): for p0, p1 in zip(saved_model.optimizer.fp32_partitioned_groups_flat, loaded_model.optimizer.fp32_partitioned_groups_flat): assert torch.allclose(p0, p1, atol=1e-07), f"Fp32 model states {p0} is not equal to {p1}" elif isinstance(saved_model.optimizer, DeepSpeedZeroOptimizer): for p0, p1 in zip(saved_model.optimizer.single_partition_of_fp32_groups, loaded_model.optimizer.single_partition_of_fp32_groups): assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}' assert torch.allclose(p0, p1, atol=1e-07), f"Fp32 model states {p0} is not equal to {p1}" elif isinstance(saved_model.optimizer, FP16_Optimizer): for p0, p1 in zip(saved_model.optimizer.fp32_groups_flat, loaded_model.optimizer.fp32_groups_flat): assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}' assert torch.allclose(p0, p1, atol=1e-07), f"FP32 model states {p0} is not equal to {p1}" elif isinstance(saved_model.optimizer, FP16_UnfusedOptimizer): for params0, params1 in zip(saved_model.optimizer.fp32_groups, loaded_model.optimizer.fp32_groups): for p0, p1 in zip(params0, params1): assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}' assert torch.allclose(p0, p1, atol=1e-07), f"FP32 model states {p0} is not equal to {p1}" elif isinstance(saved_model.optimizer, torch.optim.Optimizer): pass else: assert False, f'Unexpected Optimizer Type: {saved_model.optimizer}' def compare_state_dicts(state0, state1, expected_mismatch_keys=[]): for (k0, s0), (k1, s1) in zip(state0.items(), state1.items()): assert k0 == k1, f'failure due to key mismatch {k0} != {k1}' if k0 in expected_mismatch_keys: continue if isinstance(s0, torch.Tensor) and isinstance(s1, torch.Tensor): assert id(s0) != id(s1), f'Comparing optimizer state tensor against itself: {id(s0)} <====> {id(s1)}' assert torch.equal(s0.to('cpu'), s1.to('cpu')) else: assert s0 == s1, f'failures with keys = {k0}, {k1}, values = {type(s0[0])} and {type(s1[0])}' def compare_optimizer_states(saved_model, loaded_model, hidden_dim, fp16=True): saved_optimizer = saved_model.optimizer.optimizer if fp16 else saved_model.optimizer loaded_optimizer = loaded_model.optimizer.optimizer if fp16 else loaded_model.optimizer for state0, state1 in zip(saved_optimizer.state.values(), loaded_optimizer.state.values()): compare_state_dicts(state0, state1) def compare_lr_scheduler_states(saved_model, loaded_model): assert hasattr(saved_model, 'lr_scheduler') assert hasattr(loaded_model, 'lr_scheduler') saved_scheduler = saved_model.lr_scheduler loaded_scheduler = loaded_model.lr_scheduler assert hasattr(saved_scheduler, 'state_dict') assert hasattr(loaded_scheduler, 'state_dict') saved_sd = saved_scheduler.state_dict() loaded_sd = loaded_scheduler.state_dict() print(f"saved_sd = {saved_sd}") print(f"loaded_sd = {loaded_sd}") assert saved_sd.keys() == loaded_sd.keys() for state0, state1 in zip(saved_sd.values(), loaded_sd.values()): if isinstance(state0, numbers.Number) and isinstance(state1, numbers.Number): assert state0 == state1 # following mixture-of-experts.md def create_moe_param_groups(model): from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer parameters = {'params': [p for p in model.parameters()], 'name': 'parameters'} return split_params_into_different_moe_groups_for_optimizer(parameters) def create_deepspeed_model(config_dict, model, base_optimizer): ds_model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=create_moe_param_groups(model), optimizer=base_optimizer) ds_model.empty_partition_cache() return ds_model def checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False, load_lr_scheduler_states=False, fp16=True, train_batch=False, base_optimizers=[None, None], empty_tag=False, seq_dataloader=False, load_module_only=False): dtype = torch.half if fp16 else torch.float32 ds_model = create_deepspeed_model(config_dict=config_dict, model=models[0], base_optimizer=base_optimizers[0]) if seq_dataloader: data_loader = sequence_dataloader(model=ds_model, total_samples=50, hidden_dim=hidden_dim, device=ds_model.device, dtype=dtype) else: data_loader = random_dataloader(model=ds_model, total_samples=50, hidden_dim=hidden_dim, device=ds_model.device, dtype=dtype) if train_batch: ds_model.set_dataloader(data_loader) for _, batch in enumerate(data_loader): loss = ds_model.train_batch() else: for _, batch in enumerate(data_loader): loss = ds_model(batch[0], batch[1]) ds_model.backward(loss) ds_model.step() # Flush zero stage 3 cache ds_model.empty_partition_cache() trained_model = ds_model save_folder = os.path.join(tmpdir, 'saved_checkpoint') save_tag = None if empty_tag else '1' trained_model.save_checkpoint(save_folder, tag=save_tag) dist.barrier() for root, _, files in os.walk(save_folder): for f in files: if "_expert_" in f and "_model_states" in f: expert = torch.load(os.path.join(root, f)) needed, storages = 0, {} for name, tensor in expert.items(): needed += tensor.size().numel() storage = tensor.storage() # some storage can be shared within an expert's checkpoint storages[storage.data_ptr()] = storage.size() stored = sum(v for _, v in storages.items()) assert needed == stored, f"MoE expert checkpoint uses more storage than required: {f}" loaded_model = create_deepspeed_model(config_dict=config_dict, model=models[1], base_optimizer=base_optimizers[1]) assert list(trained_model.parameters())[0].dtype == list(loaded_model.parameters())[0].dtype loaded_model.load_checkpoint(save_folder, tag=save_tag, load_optimizer_states=load_optimizer_states, load_lr_scheduler_states=load_lr_scheduler_states, load_module_only=load_module_only) compare_model_states(trained_model, loaded_model, compare_optimizer=load_optimizer_states, load_module_only=load_module_only) if load_optimizer_states: compare_optimizer_states(trained_model, loaded_model, hidden_dim, fp16) if load_lr_scheduler_states: compare_lr_scheduler_states(trained_model, loaded_model)
10,525
45.166667
118
py
DeepSpeed
DeepSpeed-master/tests/unit/checkpoint/test_moe_checkpoint.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer from unit.common import DistributedTest from unit.simple_model import * from unit.util import required_torch_version from unit.checkpoint.common import checkpoint_correctness_verification import pytest class TestMoECheckpoint(DistributedTest): world_size = 4 @pytest.mark.parametrize("ep_size", [4]) def test_checkpoint_moe(self, tmpdir, ep_size): if not required_torch_version(): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") config_dict = {"train_batch_size": 8, "steps_per_print": 1, "fp16": {"enabled": True}} hidden_dim = 16 models = [SimpleMoEModel(hidden_dim=hidden_dim, num_experts=ep_size, ep_size=ep_size) for _ in range(2)] optimizers = [torch.optim.AdamW(params=model.parameters()) for model in models] checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, load_optimizer_states=True, load_lr_scheduler_states=False, fp16=config_dict["fp16"]["enabled"], empty_tag=True, base_optimizers=optimizers, seq_dataloader=True) @pytest.mark.parametrize("ep_size, load_optim_states", [(4, True), (4, False), (2, True), (2, False)]) def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): if not required_torch_version(): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") config_dict = { "train_batch_size": 8, "steps_per_print": 1, "optimizer": { "type": 'Adam', "params": { "lr": 0.00015, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "fp16": { "enabled": True, "initial_scale_power": 8 }, "zero_optimization": { "stage": 2, } } hidden_dim = 16 models = [SimpleMoEModel(hidden_dim=hidden_dim, num_experts=ep_size, ep_size=ep_size) for _ in range(2)] # param group must have a random unique name (for now) # TODO: clean-up this requirement, the unique name should not be required here param_groups = [{'params': [p for p in model.parameters()], 'name': 'random-unique-name'} for model in models] params = [split_params_into_different_moe_groups_for_optimizer(group) for group in param_groups] optimizers = [torch.optim.AdamW(params=param) for param in params] checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, load_optimizer_states=load_optim_states, load_lr_scheduler_states=False, fp16=config_dict["fp16"]["enabled"], empty_tag=True, base_optimizers=optimizers, seq_dataloader=True)
3,817
44.452381
118
py
DeepSpeed
DeepSpeed-master/tests/unit/hybrid_engine/test_he_all.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import torch import pytest import deepspeed from deepspeed.ops.op_builder import OpBuilder from unit.common import DistributedTest from transformers import (AutoConfig, AutoTokenizer, AutoModelForCausalLM) rocm_version = OpBuilder.installed_rocm_version() if rocm_version != (0, 0): pytest.skip("skip inference tests on rocm for now", allow_module_level=True) @pytest.mark.seq_inference @pytest.mark.parametrize("batch_size", [1, 2], ids=["bsz=1", "bsz=2"]) @pytest.mark.parametrize("model_name", ["EleutherAI/gpt-neo-1.3B", "facebook/opt-1.3b"]) class TestHybridEngineTextGen(DistributedTest): world_size = 1 def _generate(self, model, tokenizer, prompt): local_rank = int(os.getenv("LOCAL_RANK", "0")) tokens = tokenizer.batch_encode_plus(prompt, return_tensors="pt", padding=True) for t in tokens: if torch.is_tensor(tokens[t]): tokens[t] = tokens[t].to(f'cuda:{local_rank}') output = model.generate(**tokens, do_sample=False, max_length=100) outputs = tokenizer.batch_decode(output, skip_special_tokens=True) return outputs def get_model(self, model_name): local_rank = int(os.getenv("LOCAL_RANK", "0")) model_config = AutoConfig.from_pretrained(model_name) model_config.dropout = 0.0 model = AutoModelForCausalLM.from_pretrained(model_name, config=model_config) model = model.half() model = model.to(f'cuda:{local_rank}') return model def get_tokenizer(self, model_name): tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.pad_token = tokenizer.eos_token return tokenizer def get_prompt(self, batch_size): if batch_size == 1: prompt = ["Microsoft is in Washington"] elif batch_size == 2: prompt = ["DeepSpeed is", "Microsoft is in Washington"] else: raise NotImplementedError(f"batch_size {batch_size} not implemented") return prompt def test_correctness(self, batch_size, model_name): pytest.skip("skip test for now, will fix in follow-up PR") model = self.get_model(model_name) tokenizer = self.get_tokenizer(model_name) prompt = self.get_prompt(batch_size) base_out = self._generate(model, tokenizer, prompt) ds_config = {"train_batch_size": 1, "fp16": {"enabled": True}, "hybrid_engine": {"enabled": True}} model, *_ = deepspeed.initialize(model=model, config=ds_config) model.eval() ds1_out = self._generate(model, tokenizer, prompt) assert base_out == ds1_out, f"base_out: {base_out}, ds1_out: {ds1_out}" model.train() model.eval() ds2_out = self._generate(model, tokenizer, prompt) assert base_out == ds2_out def test_functionality(self, batch_size, model_name): model = self.get_model(model_name) tokenizer = self.get_tokenizer(model_name) prompt = self.get_prompt(batch_size) ds_config = {"train_batch_size": 1, "fp16": {"enabled": True}, "hybrid_engine": {"enabled": True}} model, *_ = deepspeed.initialize(model=model, config=ds_config) model.eval() ds1_out = self._generate(model, tokenizer, prompt) model.train() model.eval() ds2_out = self._generate(model, tokenizer, prompt) assert ds1_out == ds2_out, f"ds1_out: {ds1_out}, ds2_out: {ds2_out}"
3,557
36.452632
106
py
DeepSpeed
DeepSpeed-master/tests/unit/hybrid_engine/test_he_lora.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import math import torch import torch.nn.functional as F import pytest import deepspeed from deepspeed.runtime.zero import GatheredParameters from deepspeed.ops.op_builder import OpBuilder from deepspeed.utils import safe_get_full_grad import numpy.testing as npt from unit.common import DistributedTest from transformers import (AutoConfig, AutoTokenizer, AutoModelForCausalLM) rocm_version = OpBuilder.installed_rocm_version() if rocm_version != (0, 0): pytest.skip("skip inference tests on rocm for now", allow_module_level=True) def to_device(batch, device): output = {} for k, v in batch.items(): try: output[k] = v.to(device) except: output[k] = v return output def convert_linear_layer_to_lora(model, part_module_name, lora_dim=0, lora_scaling=1, lora_droppout=0): from deepspeed.compression.helper import recursive_getattr, recursive_setattr repalce_name = [] for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear) and part_module_name in name: repalce_name.append(name) for name in repalce_name: module = recursive_getattr(model, name) tmp = LinearLayer_LoRA(module.weight, lora_dim, lora_scaling, lora_droppout, module.bias).to(module.weight.device).to(module.weight.dtype) recursive_setattr(model, name, tmp) return model class LinearLayer_LoRA(torch.nn.Module): # an simple implementation of LoRA # for now only support Linear Layer def __init__(self, weight, lora_dim=0, lora_scaling=1, lora_droppout=0, bias=None): super(LinearLayer_LoRA, self).__init__() self.weight = weight self.bias = bias if lora_dim <= 0: raise ValueError("You are training to use LoRA, whose reduced dim should be larger than 1") try: # for zero stage 3 rows, columns = weight.ds_shape except: rows, columns = weight.shape self.lora_right_weight = torch.nn.Parameter(torch.zeros( columns, lora_dim)) # apply transpose so in forward we do not need to transpose again self.lora_left_weight = torch.nn.Parameter(torch.zeros(lora_dim, rows)) self.lora_scaling = lora_scaling / lora_dim if lora_droppout > 0: self.lora_dropout = torch.nn.Dropout(lora_droppout) else: self.lora_dropout = torch.nn.Identity() self.reset_parameters() # disable the original weight gradient self.weight.requires_grad = False # fuse LoRA to the original weight self.fuse_lora = False def eval(self): self.lora_dropout.eval() def train(self, mode=True): self.lora_dropout.train(mode) def reset_parameters(self): torch.nn.init.kaiming_uniform_(self.lora_right_weight, a=math.sqrt(5)) torch.nn.init.zeros_(self.lora_left_weight) def forward(self, input): if self.fuse_lora: return F.linear(input, self.weight, self.bias) else: return F.linear(input, self.weight, self.bias) + ( self.lora_dropout(input) @ self.lora_right_weight @ self.lora_left_weight) * self.lora_scaling def only_optimize_lora_parameters(model): # turn off the gradient of all the parameters except the LoRA parameters for name, param in model.named_parameters(): if "lora_right_weight" in name or "lora_left_weight" in name: param.requires_grad = True else: param.requires_grad = False return model @pytest.mark.seq_inference @pytest.mark.parametrize("batch_size", [1], ids=["bsz=1"]) @pytest.mark.parametrize("zero_stage", [2, 3], ids=["zero_stage=2", "zero_stage=3"]) @pytest.mark.parametrize("model_name", ["EleutherAI/gpt-neo-125m", "facebook/opt-350m", "bigscience/bloom-560m"]) @pytest.mark.parametrize("offload_device", ["none", "cpu"]) class TestHybridEngineLoRA(DistributedTest): world_size = 1 def get_model(self, model_name): local_rank = int(os.getenv("LOCAL_RANK", "0")) model_config = AutoConfig.from_pretrained(model_name) model_config.dropout = 0.0 model = AutoModelForCausalLM.from_pretrained(model_name, config=model_config) model = model.half() model = model.to(f'cuda:{local_rank}') return model def get_tokenizer(self, model_name): tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.pad_token = tokenizer.eos_token return tokenizer def get_train_sentences(self, batch_size): sentences = [ r"\n\nHuman: I am trying to write a fairy tale. What is the most popular plot?\n\n" r"Assistant: The most popular plot might be a princess goes to a faraway land, falls in love", r"\n\nHuman: What flowers should I grow to attract bees?\n\nAssistant: The reason you want bees " r"in your garden is to attract pollinators and get more fruit or vegetable production." ] if batch_size <= 2: return sentences[:batch_size] else: raise NotImplementedError(f"batch_size {batch_size} not implemented") def test_lora(self, batch_size, model_name, zero_stage, offload_device): local_rank = int(os.getenv("LOCAL_RANK", "0")) model = self.get_model(model_name) tokenizer = self.get_tokenizer(model_name) train_sentences = self.get_train_sentences(batch_size) # Inject LoRA model = convert_linear_layer_to_lora(model, "", 8) model = only_optimize_lora_parameters(model) ds_config = { "optimizer": { "type": "Adam", "params": { "lr": 1.0, "betas": [0.9, 0.95] } }, "train_batch_size": batch_size, "fp16": { "enabled": True, "initial_scale_power": 12 }, "hybrid_engine": { "enabled": True, "pin_parameters": True }, "zero_optimization": { "stage": zero_stage, "offload_optimizer": { "device": offload_device } } } model, *_ = deepspeed.initialize(model=model, config=ds_config) # Verify gradient norm is larger than 0 before_grad_update_layer0_params = [ ele.detach().cpu().float().numpy() for ele in model.layer_params[0] if ele is not None and len(ele.shape) > 1 ] model.train() batch = tokenizer(train_sentences, max_length=16, padding="max_length", truncation=True, return_tensors="pt") batch = to_device(batch, f'cuda:{local_rank}') batch["labels"] = batch["input_ids"] outputs = model(**batch, use_cache=False) loss = outputs.loss model.backward(loss) grad_norm_dict = dict() for name, param in model.named_parameters(): if param.requires_grad is True: grad_norm_dict[name] = torch.norm(safe_get_full_grad(param)) model.step() grad_norm = sum([ele.detach().cpu().numpy() for ele in grad_norm_dict.values()]) assert grad_norm > 1E-5 # Verify parameter remains the same after_grad_update_layer0_params = [ ele.detach().cpu().float().numpy() for ele in model.layer_params[0] if ele is not None and len(ele.shape) > 1 ] for lhs, rhs in zip(before_grad_update_layer0_params, after_grad_update_layer0_params): npt.assert_allclose(lhs, rhs, 1E-5, 1E-5) # Verify fuse will mutate layer_params model.eval() with GatheredParameters(model.parameters()): model.fuse_lora_weight() after_grad_update_layer0_params_lora_fused = [ ele.detach().cpu().float().numpy() for ele in model.layer_params[0] if ele is not None and len(ele.shape) > 1 ] for lhs, rhs in zip(before_grad_update_layer0_params, after_grad_update_layer0_params_lora_fused): with pytest.raises(AssertionError): npt.assert_allclose(lhs, rhs, 1E-5, 1E-5) with GatheredParameters(model.parameters()): model.unfuse_lora_weight()
8,503
36.298246
117
py
DeepSpeed
DeepSpeed-master/tests/unit/hybrid_engine/test_he_llama.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import torch import pytest import deepspeed from deepspeed.ops.op_builder import OpBuilder from unit.common import DistributedTest from transformers import (AutoConfig, AutoTokenizer, AutoModelForCausalLM) rocm_version = OpBuilder.installed_rocm_version() if rocm_version != (0, 0): pytest.skip("skip inference tests on rocm for now", allow_module_level=True) @pytest.mark.seq_inference @pytest.mark.parametrize("batch_size", [1, 2], ids=["bsz=1", "bsz=2"]) @pytest.mark.parametrize("model_name", ["huggyllama/llama-7b"]) class TestHybridEngineLlama(DistributedTest): world_size = 1 def _generate(self, model, tokenizer, prompt): local_rank = int(os.getenv("LOCAL_RANK", "0")) tokens = tokenizer.batch_encode_plus(prompt, return_tensors="pt", padding=True) for t in tokens: if torch.is_tensor(tokens[t]): tokens[t] = tokens[t].to(f'cuda:{local_rank}') #output = model.generate(**tokens, do_sample=False, max_length=100) output = model.generate(tokens.input_ids, do_sample=False, max_length=100) outputs = tokenizer.batch_decode(output, skip_special_tokens=True) return outputs def get_model(self, model_name): local_rank = int(os.getenv("LOCAL_RANK", "0")) model_config = AutoConfig.from_pretrained(model_name) model_config.dropout = 0.0 model = AutoModelForCausalLM.from_pretrained(model_name, config=model_config) # Make the model smaller so we can run it on a single GPU in CI _ = [model.model.layers.pop(-1) for _ in range(8)] model = model.half() model = model.to(f'cuda:{local_rank}') return model def get_tokenizer(self, model_name): tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.pad_token = tokenizer.eos_token return tokenizer def get_prompt(self, batch_size): if batch_size == 1: prompt = ["Microsoft is in Washington"] elif batch_size == 2: prompt = ["DeepSpeed is", "Microsoft is in Washington"] else: raise NotImplementedError(f"batch_size {batch_size} not implemented") return prompt def test_correctness(self, batch_size, model_name): pytest.skip("skip test for now, will fix in follow-up PR") model = self.get_model(model_name) tokenizer = self.get_tokenizer(model_name) prompt = self.get_prompt(batch_size) base_out = self._generate(model, tokenizer, prompt) ds_config = {"train_batch_size": 1, "fp16": {"enabled": True}, "hybrid_engine": {"enabled": True}} model, *_ = deepspeed.initialize(model=model, config=ds_config) model.eval() ds1_out = self._generate(model, tokenizer, prompt) assert base_out == ds1_out, f"base_out: {base_out}, ds1_out: {ds1_out}" model.train() model.eval() ds2_out = self._generate(model, tokenizer, prompt) assert base_out == ds2_out def test_functionality(self, batch_size, model_name): model = self.get_model(model_name) tokenizer = self.get_tokenizer(model_name) prompt = self.get_prompt(batch_size) ds_config = {"train_batch_size": 1, "fp16": {"enabled": True}, "hybrid_engine": {"enabled": True}} model, *_ = deepspeed.initialize(model=model, config=ds_config) model.eval() ds1_out = self._generate(model, tokenizer, prompt) model.train() model.eval() ds2_out = self._generate(model, tokenizer, prompt) assert ds1_out == ds2_out, f"ds1_out: {ds1_out}, ds2_out: {ds2_out}"
3,745
37.22449
106
py
DeepSpeed
DeepSpeed-master/tests/unit/profiling/flops_profiler/test_flops_profiler.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import pytest import deepspeed from deepspeed.profiling.flops_profiler import get_model_profile from unit.simple_model import SimpleModel, random_dataloader from unit.common import DistributedTest from unit.util import required_minimum_torch_version pytestmark = pytest.mark.skipif(not required_minimum_torch_version(major_version=1, minor_version=3), reason='requires Pytorch version 1.3 or above') def within_range(val, target, tolerance): return abs(val - target) / target < tolerance TOLERANCE = 0.05 class LeNet5(torch.nn.Module): def __init__(self, n_classes): super(LeNet5, self).__init__() self.feature_extractor = torch.nn.Sequential( torch.nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1), torch.nn.Tanh(), torch.nn.AvgPool2d(kernel_size=2), torch.nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1), torch.nn.Tanh(), torch.nn.AvgPool2d(kernel_size=2), torch.nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1), torch.nn.Tanh(), ) self.classifier = torch.nn.Sequential( torch.nn.Linear(in_features=120, out_features=84), torch.nn.Tanh(), torch.nn.Linear(in_features=84, out_features=n_classes), ) def forward(self, x): x = self.feature_extractor(x) x = torch.flatten(x, 1) logits = self.classifier(x) probs = torch.nn.functional.softmax(logits, dim=1) return logits, probs class TestFlopsProfiler(DistributedTest): world_size = 1 def test(self): config_dict = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.001, } }, "zero_optimization": { "stage": 0 }, "fp16": { "enabled": True, }, "flops_profiler": { "enabled": True, "step": 1, "module_depth": -1, "top_modules": 3, }, } hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device, dtype=torch.half) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() if n == 3: break assert within_range(model.flops_profiler.flops, 200, tolerance=TOLERANCE) assert model.flops_profiler.params == 110 def test_flops_profiler_in_inference(self): mod = LeNet5(10) batch_size = 1024 input = torch.randn(batch_size, 1, 32, 32) flops, macs, params = get_model_profile( mod, tuple(input.shape), print_profile=True, detailed=True, module_depth=-1, top_modules=3, warm_up=1, as_string=False, ignore_modules=None, ) print(flops, macs, params) assert within_range(flops, 866076672, TOLERANCE) assert within_range(macs, 426516480, TOLERANCE) assert params == 61706
3,815
31.338983
115
py
DeepSpeed
DeepSpeed-master/tests/unit/compression/test_compression.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import pytest import random import numpy as np from unit.megatron_model import get_gpt2_model from deepspeed.compression.compress import init_compression from unit.modeling import BertConfig from unit.modelingpreln import BertEncoder as BertEncoderPreln from deepspeed.compression.basic_layer import LinearLayer_Compress, ColumnParallelLinear_Compress, RowParallelLinear_Compress from deepspeed.compression.helper import convert_conv1d_to_linear from deepspeed.accelerator import get_accelerator from unit.common import DistributedTest from unit.util import required_minimum_torch_version, required_maximum_torch_version pytestmark = pytest.mark.skipif(not required_minimum_torch_version(major_version=1, minor_version=5), reason='Megatron-LM package requires Pytorch version 1.5 or above') def reset_random(seed=1234): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) get_accelerator().manual_seed_all(seed) def create_bert_model(): hidden_size = 384 num_layers = 2 heads = 12 dropout_ratio = 0.1 bert_config = BertConfig(vocab_size_or_config_json_file=119547, hidden_size=hidden_size, num_hidden_layers=num_layers, num_attention_heads=heads, intermediate_size=hidden_size * 4, hidden_act="gelu", hidden_dropout_prob=dropout_ratio, attention_probs_dropout_prob=dropout_ratio, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.2) weights = [] biases = [] for i in range(4): weights.append(torch.nn.Parameter(torch.Tensor(hidden_size, hidden_size))) weights.append(torch.nn.Parameter(torch.Tensor(hidden_size))) weights.append(torch.nn.Parameter(torch.Tensor(hidden_size * 4, hidden_size))) weights.append(torch.nn.Parameter(torch.Tensor(hidden_size, hidden_size * 4))) weights.append(torch.nn.Parameter(torch.Tensor(hidden_size))) biases.append(torch.nn.Parameter(torch.Tensor(hidden_size))) for i in range(4): biases.append(torch.nn.Parameter(torch.Tensor(hidden_size))) biases.append(torch.nn.Parameter(torch.Tensor(hidden_size * 4))) biases.append(torch.nn.Parameter(torch.Tensor(hidden_size))) biases.append(torch.nn.Parameter(torch.Tensor(hidden_size))) return BertEncoderPreln(bert_config, weights, biases) class Conv1D(torch.nn.Module): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (`int`): The number of output features. nx (`int`): The number of input features. """ def __init__(self, nf, nx): super().__init__() self.nf = nf w = torch.empty(nx, nf) self.weight = torch.nn.Parameter(w) self.bias = torch.nn.Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf, ) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(size_out) return x def create_conv1d_model(): nf = 128 nx = 128 return torch.nn.ModuleList([Conv1D(nf, nx) for i in range(4)]) class TestCompression(DistributedTest): def setup_method(self, method): reset_random() def get_ds_config(self): ds_config_dict = { "train_micro_batch_size_per_gpu": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True }, "compression_training": { "weight_quantization": { "shared_parameters": { "enabled": True, "quantizer_kernel": False, "schedule_offset": 50, "quantize_groups": 1, "quantize_verbose": False, "quantization_type": "asymmetric", "rounding": "nearest", "fp16_mixed_quantize": { "enabled": False, "quantize_change_ratio": 0.001 } }, "different_groups": { "wq1": { "params": { "start_bits": 12, "target_bits": 8, "quantization_period": 50 }, "modules": ["attention.self", "intermediate"] }, "wq2": { "params": { "start_bits": 12, "target_bits": 4, "quantization_period": 50 }, "modules": ["attention.output"] } } }, "activation_quantization": { "shared_parameters": { "enabled": True, "quantization_type": "asymmetric", "range_calibration": "dynamic", "schedule_offset": 50 }, "different_groups": { "aq1": { "params": { "bits": 8 }, "modules": ["attention.output"] } } }, "sparse_pruning": { "shared_parameters": { "enabled": True, "schedule_offset": 30, "method": "l1" }, "different_groups": { "sp1": { "params": { "dense_ratio": 0.5 }, "modules": ["attention.self"] } } }, "row_pruning": { "shared_parameters": { "enabled": True, "schedule_offset": 20, "method": "topk" }, "different_groups": { "rp1": { "params": { "dense_ratio": 0.5 }, "modules": ["intermediate.dense"], "related_modules": [["layer.\\w+.output.dense"]] } } }, "head_pruning": { "shared_parameters": { "enabled": True, "schedule_offset": 10, "method": "topk", "num_heads": 12 }, "different_groups": { "rp1": { "params": { "dense_ratio": 0.5 }, "modules": ["attention.output.dense"], "related_modules": [["self.query", "self.key", "self.value"]] } } } } } return ds_config_dict def test_linear_layer_compress(self, tmpdir): model = create_bert_model() compressed_model = init_compression(model, self.get_ds_config()) assert isinstance(compressed_model.layer[0].attention.self.query, LinearLayer_Compress) assert isinstance(compressed_model.layer[0].attention.self.key, LinearLayer_Compress) assert isinstance(compressed_model.layer[0].attention.self.value, LinearLayer_Compress) @pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.") def test_mpu_compress(self, tmpdir): if not required_maximum_torch_version(major_version=1, minor_version=13): pytest.skip("megatron not compatible with torch >1.13") from megatron import mpu args_defaults = { 'num_layers': 2, 'hidden_size': 128, 'num_attention_heads': 8, 'max_position_embeddings': 128, } model = get_gpt2_model(args_defaults) compressed_model = init_compression(model, self.get_ds_config(), mpu=mpu) assert isinstance(compressed_model.module.language_model.transformer.layers[0].attention.query_key_value, ColumnParallelLinear_Compress) assert isinstance(compressed_model.module.language_model.transformer.layers[0].attention.dense, RowParallelLinear_Compress) assert isinstance(compressed_model.module.language_model.transformer.layers[0].mlp.dense_h_to_4h, ColumnParallelLinear_Compress) assert isinstance(compressed_model.module.language_model.transformer.layers[0].mlp.dense_4h_to_h, RowParallelLinear_Compress) def test_conv1d_convertion(self, tmpdir): model = create_conv1d_model() compressed_model = convert_conv1d_to_linear(model, Conv1D) assert isinstance(compressed_model[0], torch.nn.Linear) assert isinstance(compressed_model[1], torch.nn.Linear) assert isinstance(compressed_model[2], torch.nn.Linear) assert isinstance(compressed_model[3], torch.nn.Linear)
10,129
38.111969
125
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/test_pld.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import numpy as np import deepspeed import pytest from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop from unit.common import DistributedTest from unit.simple_model import SimpleModel, PLD_SimpleModel, random_dataloader @pytest.mark.parametrize('theta', [0, 0.1, 0.9, 1.0]) def test_pld_schedule(tmpdir, theta): gamma = 0.001 pld_scheduler = ProgressiveLayerDrop(theta, gamma) for i in range(10): pld_scheduler.update_state(i) expected_theta = (1. - theta) * np.exp(-gamma * i) + theta actual_theta = pld_scheduler.get_theta() assert expected_theta == actual_theta @pytest.mark.parametrize('theta', [0, 0.1, 0.9, 1.0]) class TestPLDModel(DistributedTest): world_size = 1 def test_pld_model(self, theta): gamma = 0.001 config_dict = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": 'Adam', "params": { "lr": 0.0001 } }, "fp16": { "enabled": True }, "progressive_layer_drop": { "enabled": True, "theta": theta, "gamma": gamma } } hidden_dim = 10 model = PLD_SimpleModel(hidden_dim, empty_grad=False) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() expected_theta = (1. - theta) * np.exp(-gamma * i) + theta actual_theta = model.get_pld_theta() assert expected_theta == actual_theta class TestNonPLDModel(DistributedTest): world_size = 1 def test_non_pld_model(self): gamma = 0.001 theta = 0.5 config_dict = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": 'Adam', "params": { "lr": 0.0001 } }, "fp16": { "enabled": True }, "progressive_layer_drop": { "enabled": True, "theta": theta, "gamma": gamma } } hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=1, hidden_dim=hidden_dim, device=model.device) for i, batch in enumerate(data_loader): with pytest.raises(TypeError): loss = model(batch[0], batch[1])
3,088
29.284314
115
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/test_data.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed.utils import RepeatingLoader import torch import pytest import deepspeed from deepspeed.accelerator import get_accelerator from unit.common import DistributedTest from unit.simple_model import SimpleModel, random_dataset def test_repeating_loader(): loader = [1, 2, 3] loader = RepeatingLoader(loader) for idx in range(50): assert next(loader) == 1 assert next(loader) == 2 assert next(loader) == 3 @pytest.mark.parametrize('train_batch_size, drop_last', [(1, True), (4, True), (1, False), (4, False)]) class TestDataLoaderDropLast(DistributedTest): world_size = 1 def test(self, train_batch_size, drop_last): config_dict = {"train_batch_size": train_batch_size, "dataloader_drop_last": drop_last, "steps_per_print": 1} hidden_dim = 10 model = SimpleModel(hidden_dim) optimizer = torch.optim.AdamW(params=model.parameters()) # TODO: no way to set DeepSpeedEngine.deepspeed_io params, need to use # pin_memory=False for cuda device train_dataset = random_dataset(total_samples=50, hidden_dim=hidden_dim, device=torch.device('cpu'), dtype=torch.float32) model, _, training_dataloader, _ = deepspeed.initialize(config=config_dict, model=model, training_data=train_dataset, optimizer=optimizer) training_dataloader.num_local_io_workers = 0 # We can't do nested mp.pool for n, batch in enumerate(training_dataloader): x = batch[0].to(get_accelerator().current_device_name()) y = batch[1].to(get_accelerator().current_device_name()) loss = model(x, y) model.backward(loss) model.step()
2,092
39.25
117
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/test_runtime_utils.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch from torch._utils import _flatten_dense_tensors import deepspeed.comm as dist import pytest import deepspeed.runtime.utils as ds_utils import deepspeed.utils.groups as groups from deepspeed.accelerator import get_accelerator from unit.common import DistributedTest def test_call_to_str(): c2s = ds_utils.call_to_str assert c2s('int') == 'int()' assert c2s('int', 3) == 'int(3)' assert c2s('int', 3, 'jeff') == 'int(3, \'jeff\')' assert c2s('hello', val=3) == 'hello(val=3)' assert c2s('hello', 1138, val=3) == 'hello(1138, val=3)' class TestClibGradNorm(DistributedTest): world_size = 2 def test(self): param1 = torch.nn.Parameter(torch.Tensor([0])) param1.grad = torch.Tensor([1]) param2 = torch.nn.Parameter(torch.Tensor([0])) param2.grad = torch.Tensor([dist.get_rank() + 1]) # param2 is now MoE parameter param2.allreduce = False parameters = [param1, param2] groups._create_expert_and_data_parallel(2) norm = ds_utils.clip_grad_norm_(parameters, max_norm=0.1) norm = torch.Tensor([norm]).to(get_accelerator().device_name(dist.get_rank())) world_size = dist.get_world_size() gathered_norm = [torch.zeros(1).to(get_accelerator().device_name()) for i in range(world_size)] dist.all_gather(gathered_norm, norm) assert gathered_norm[0] == gathered_norm[1], "norm at rank 0 does not match the norm at rank 1" @pytest.mark.parametrize("check_using_norm", [(False), (True)]) class TestCheckOverflow(DistributedTest): world_size = 2 def test(self, check_using_norm): groups._create_expert_and_data_parallel(2) param1 = torch.nn.Parameter(torch.Tensor([0])) param1.grad = torch.Tensor([1]) param2 = torch.nn.Parameter(torch.Tensor([0])) if dist.get_rank() == 0: param2.grad = torch.Tensor([1]) else: param2.grad = torch.Tensor([float("inf")]) param2.allreduce = False # param2 is now MoE parameter parameters = [param1, param2] if check_using_norm: grads_group_flat = [_flatten_dense_tensors([p.grad for p in parameters])] norm = ds_utils.get_weight_norm(grads_group_flat) overflow_checker = ds_utils.CheckOverflow([parameters]) overflow = overflow_checker.check_using_norm([norm], reduce_overflow=False) else: overflow_checker = ds_utils.CheckOverflow([parameters]) overflow = overflow_checker.check() assert overflow
2,685
32.575
103
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/test_autocast.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest import torch from deepspeed.runtime.zero.linear import LinearModuleForZeroStage3 from deepspeed.accelerator import get_accelerator from unit.common import DistributedTest @pytest.mark.parametrize('half_op', [False, True]) class TestAutoCastDisable(DistributedTest): def test_missing_amp_autocast(self, half_op): hidden_dim = 4 if half_op: input = torch.randn(hidden_dim).to(get_accelerator().device_name()).half() ds_linear = LinearModuleForZeroStage3(hidden_dim, hidden_dim).to(get_accelerator().device_name()).half() else: input = torch.randn(hidden_dim).to(get_accelerator().device_name()) ds_linear = LinearModuleForZeroStage3(hidden_dim, hidden_dim).to(get_accelerator().device_name()) output = ds_linear(input) assert output.dtype == ds_linear.weight.dtype def test_disable_autocast_linear(self, half_op): amp = get_accelerator().amp() hidden_dim = 4 if half_op: input = torch.randn(hidden_dim).to(get_accelerator().device_name()).half() ds_linear = LinearModuleForZeroStage3(hidden_dim, hidden_dim).to(get_accelerator().device_name()).half() else: input = torch.randn(hidden_dim).to(get_accelerator().device_name()) ds_linear = LinearModuleForZeroStage3(hidden_dim, hidden_dim).to(get_accelerator().device_name()) with amp.autocast(False): output = ds_linear(input) assert output.dtype == ds_linear.weight.dtype @pytest.mark.skipif(get_accelerator().amp() is None, reason='amp is not installed') @pytest.mark.parametrize('half_input, half_weight', [(False, False), (False, True), (True, False), (True, True)]) class TestAutoCastEnable(DistributedTest): def test_autocast_linear(self, tmpdir, half_input, half_weight): amp = get_accelerator().amp() hidden_dim = 4 input = torch.randn(hidden_dim).to(get_accelerator().device_name()) ds_linear = LinearModuleForZeroStage3(hidden_dim, hidden_dim).to(get_accelerator().device_name()) if half_input: input = input.half() if half_weight: ds_linear = ds_linear.half() with amp.autocast(): output = ds_linear(input) assert output.dtype == torch.half or output.dtype == torch.bfloat16
2,472
37.640625
116
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/test_data_efficiency.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import os import deepspeed from deepspeed.accelerator import get_accelerator from unit.common import DistributedTest from unit.simple_model import Curriculum_SimpleModel, SimpleModel, random_dataloader, random_dataset class MPU(): def __init__(self, tp_world_size): self.rank = deepspeed.comm.get_rank() self.world_size = deepspeed.comm.get_world_size() self.tp_world_size = tp_world_size for i in range(0, self.world_size, tp_world_size): ranks = range(i, i + tp_world_size) group = deepspeed.comm.new_group(ranks) if self.rank in ranks: self.tp_group = group for i in range(0, tp_world_size): ranks = range(i, self.world_size, tp_world_size) group = deepspeed.comm.new_group(ranks) if self.rank in ranks: self.dp_group = group def get_model_parallel_rank(self): return self.rank % self.tp_world_size def get_model_parallel_world_size(self): return self.tp_world_size def get_data_parallel_rank(self): return self.rank // self.tp_world_size def get_data_parallel_world_size(self): return self.world_size // self.tp_world_size def get_data_parallel_group(self): return self.dp_group def get_model_parallel_group(self): return self.tp_group class TestDataEfficiency(DistributedTest): world_size = 2 def test_curriculum_learning(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015, "weight_decay": 0.01 } }, "gradient_clipping": 1.0, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, "data_efficiency": { "enabled": True, "seed": 1234, "data_sampling": { "enabled": True, "num_workers": 0, "curriculum_learning": { "enabled": True, "data_cluster_path": "/tmp", "curriculum_metrics": { "dummy_metric": { "index_to_sample_path": "dummy", "index_to_metric_path": "dummy", "difficulty_type": "value", "clustering_type": "single_cluster", "min_difficulty": 2, "max_difficulty": 10, "schedule_type": "fixed_root", "schedule_config": { "total_curriculum_step": 8, "difficulty_step": 2, "root_degree": 1 } } } } } } } def data_post_process(data, data_sampler_state_dict): assert 'dummy_metric' in data_sampler_state_dict['current_difficulties'] return data hidden_dim = 10 model = SimpleModel(hidden_dim) dataset = random_dataset(20, hidden_dim, torch.device('cpu'), dtype=torch.half) model, _, data_loader, _ = deepspeed.initialize(config=config_dict, model=model, training_data=dataset, model_parameters=model.parameters(), mpu=MPU(1)) if model.mpu.get_data_parallel_rank() == 0 and not os.path.exists('/tmp'): os.makedirs('/tmp') model.set_data_post_process_func(data_post_process) for n, batch in enumerate(data_loader): x = batch[0].to(get_accelerator().current_device_name()) y = batch[1].to(get_accelerator().current_device_name()) loss = model(x, y) model.backward(loss) model.step() if n >= 10: break class TestLegacyCurriculumScheduler(DistributedTest): world_size = 2 def test_fixed_discrete(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015, "weight_decay": 0.01 } }, "gradient_clipping": 1.0, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, "curriculum_learning": { "enabled": True, "curriculum_type": "seqlen", "min_difficulty": 1, "max_difficulty": 5, "schedule_type": "fixed_discrete", "schedule_config": { "difficulty": [1, 2, 3, 4, 5], "max_step": [2, 4, 6, 8] } } } hidden_dim = 10 ground_truths = {1: 1, 2: 1, 3: 2, 4: 2, 5: 3, 6: 3, 7: 4, 8: 4} model = Curriculum_SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=20, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss, seqlen = model(batch[0], batch[1]) model.backward(loss) model.step() true_seqlen = 5 if n + 1 in ground_truths: true_seqlen = ground_truths[n + 1] assert seqlen == true_seqlen, f"Incorrect curriculum schedule" def test_fixed_linear(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015, "weight_decay": 0.01 } }, "gradient_clipping": 1.0, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, "curriculum_learning": { "enabled": True, "curriculum_type": "seqlen", "min_difficulty": 2, "max_difficulty": 10, "schedule_type": "fixed_linear", "schedule_config": { "total_curriculum_step": 8, "difficulty_step": 2 } } } hidden_dim = 10 ground_truths = {1: 2, 2: 4, 3: 4, 4: 6, 5: 6, 6: 8, 7: 8, 8: 10, 9: 10, 10: 10} model = Curriculum_SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=20, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss, seqlen = model(batch[0], batch[1]) model.backward(loss) model.step() if n + 1 in ground_truths: true_seqlen = ground_truths[n + 1] assert seqlen == true_seqlen, f"Incorrect curriculum schedule"
7,880
35.486111
115
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/test_lr_schedulers.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import deepspeed import pytest from unit.common import DistributedTest from unit.simple_model import SimpleModel, random_dataloader from deepspeed.runtime.lr_schedules import LR_RANGE_TEST, LR_RANGE_TEST_MIN_LR, LR_RANGE_TEST_STEP_RATE, LR_RANGE_TEST_STEP_SIZE, LR_RANGE_TEST_STAIRCASE from deepspeed.runtime.lr_schedules import WARMUP_LR, WARMUP_MIN_LR, WARMUP_MAX_LR, WARMUP_NUM_STEPS, WARMUP_TYPE, WARMUP_LOG_RATE, WARMUP_LINEAR_RATE from deepspeed.runtime.lr_schedules import ONE_CYCLE, CYCLE_MIN_LR, CYCLE_MAX_LR, CYCLE_FIRST_STEP_SIZE, DECAY_LR_RATE, DECAY_STEP_SIZE from deepspeed.runtime.lr_schedules import CYCLE_MIN_MOM, CYCLE_MAX_MOM, DECAY_MOM_RATE from deepspeed.runtime.lr_schedules import WARMUP_DECAY_LR, TOTAL_NUM_STEPS def _verify_continuous_decrease(values): for i in range(len(values) - 1): assert values[i] > values[i + 1] def _verify_continuous_increase(values): for i in range(len(values) - 1): assert values[i] < values[i + 1] def _verify_staircase_increase(values, step_size): num_values = len(values) for i in range(0, num_values, step_size): j = min(i + step_size, num_values) assert all([values[i] == v for v in values[i:j]]) @pytest.mark.parametrize("scheduler_type,params", [(WARMUP_LR, {}), (WARMUP_DECAY_LR, { WARMUP_NUM_STEPS: 10, TOTAL_NUM_STEPS: 20 }), (ONE_CYCLE, { CYCLE_MIN_LR: 0, CYCLE_MAX_LR: 0.1 }), (LR_RANGE_TEST, {})]) class TestGetLrBeforeTrain(DistributedTest): world_size = 1 def test(self, scheduler_type, params): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 }, }, "scheduler": { "type": scheduler_type, "params": params }, "gradient_clipping": 1.0 } hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device, dtype=torch.float) for n, batch in enumerate(data_loader): # get lr before training starts lr_scheduler.get_lr() loss = model(batch[0], batch[1]) model.backward(loss) model.step() @pytest.mark.parametrize("warmup_num_steps", [10, 15, 19, 33]) @pytest.mark.parametrize("warmup_type", [WARMUP_LOG_RATE, WARMUP_LINEAR_RATE]) class TestLrSchedule(DistributedTest): world_size = 1 def test_lr_warmup_schedule(self, warmup_num_steps, warmup_type): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 }, }, "scheduler": { "type": WARMUP_LR, "params": { WARMUP_MIN_LR: 0.1, WARMUP_MAX_LR: 0.2, WARMUP_NUM_STEPS: warmup_num_steps, WARMUP_TYPE: warmup_type, } }, "gradient_clipping": 1.0 } schedule_params = config_dict["scheduler"]["params"] total_num_steps = 2 * warmup_num_steps hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=total_num_steps * 2, hidden_dim=hidden_dim, device=model.device, dtype=torch.float) step_lrs = [] for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() step_lrs.append(lr_scheduler.get_lr()) # Verify initial lr assert step_lrs[0] == [schedule_params[WARMUP_MIN_LR]] # Verify warmup completion warmup_num_steps = schedule_params[WARMUP_NUM_STEPS] warmup_max_lr = [schedule_params[WARMUP_MAX_LR]] assert step_lrs[warmup_num_steps] == warmup_max_lr # Verify post-warmup completion assert all([warmup_max_lr == lr for lr in step_lrs[warmup_num_steps:]]) def test_lr_warmup_decay_schedule(self, warmup_num_steps, warmup_type): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 }, }, "scheduler": { "type": WARMUP_DECAY_LR, "params": { WARMUP_MIN_LR: 0.1, WARMUP_MAX_LR: 0.2, WARMUP_NUM_STEPS: warmup_num_steps, TOTAL_NUM_STEPS: warmup_num_steps * 2, WARMUP_TYPE: warmup_type } }, "gradient_clipping": 1.0 } schedule_params = config_dict["scheduler"]["params"] total_num_steps = schedule_params[TOTAL_NUM_STEPS] hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=total_num_steps * 2, hidden_dim=hidden_dim, device=model.device, dtype=torch.float) step_lrs = [] for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() step_lrs.append(lr_scheduler.get_lr()) # Verify initial lr assert step_lrs[0] == [schedule_params[WARMUP_MIN_LR]] # Verify lr at warmup completion warmup_num_steps = schedule_params[WARMUP_NUM_STEPS] warmup_max_lr = [schedule_params[WARMUP_MAX_LR]] assert step_lrs[warmup_num_steps] == warmup_max_lr # Verify decay phase previous_lr = warmup_max_lr for lr in step_lrs[warmup_num_steps + 1:]: assert lr < previous_lr previous_lr = lr @pytest.mark.parametrize("scheduler_type,params", [(WARMUP_LR, {}), (WARMUP_DECAY_LR, { WARMUP_NUM_STEPS: 5, TOTAL_NUM_STEPS: 10 }), (ONE_CYCLE, { CYCLE_MIN_LR: 0, CYCLE_MAX_LR: 0.1, CYCLE_FIRST_STEP_SIZE: 5, DECAY_STEP_SIZE: 5 }), (LR_RANGE_TEST, { LR_RANGE_TEST_MIN_LR: 1e-4, LR_RANGE_TEST_STEP_SIZE: 1 })]) class TestSchedulerOptimizerParity(DistributedTest): world_size = 1 def test(self, scheduler_type, params): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 }, }, "scheduler": { "type": scheduler_type, "params": params }, "gradient_clipping": 1.0 } hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device, dtype=torch.float) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() assert lr_scheduler.get_lr() == model.get_lr() @pytest.mark.parametrize("min_lr, step_rate, step_size, staircase", [(1e-4, 1e-5, 1, True), (1e-5, 1e-5, 1, False), (1e-4, 1e-3, 10, True), (1e-3, 1e-3, 10, False), (1e-2, 1e-2, 19, True), (1e-2, 1e-2, 19, False) ])# yapf: disable class TestLrRange(DistributedTest): world_size = 1 def test(self, min_lr, step_rate, step_size, staircase): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 }, }, "scheduler": { "type": LR_RANGE_TEST, "params": { LR_RANGE_TEST_MIN_LR: min_lr, LR_RANGE_TEST_STEP_RATE: step_rate, LR_RANGE_TEST_STEP_SIZE: step_size, LR_RANGE_TEST_STAIRCASE: staircase } }, "gradient_clipping": 1.0 } hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=max(50, step_size * 2), hidden_dim=hidden_dim, device=model.device, dtype=torch.float) step_lrs = [] for _, batch in enumerate(data_loader): step_lrs.extend(lr_scheduler.get_lr()) loss = model(batch[0], batch[1]) model.backward(loss) model.step() # Verify starting lr assert step_lrs[0] == min_lr if staircase: # Verify staircase increasing lr _verify_staircase_increase(step_lrs, step_size) else: # Verify continuous increasing lr _verify_continuous_increase(step_lrs) class TestOneCycle(DistributedTest): world_size = 1 @pytest.mark.parametrize("min_lr, max_lr, decay_rate, cycle_step_size, decay_step_size", [ (1e-5, 1e-2, 1e-3, 10, 10), (1e-3, 1e-1, 0, 21, 21), (1e-5, 1e-2, 1e-3, 10, 10), (1e-3, 1e-1, 1e-1, 21, 21), (1e-5, 1e-1, 0, 10, 0), ]) # yapf: disable def test_lr(self, min_lr, max_lr, decay_rate, cycle_step_size, decay_step_size): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 }, }, "scheduler": { "type": ONE_CYCLE, "params": { CYCLE_MIN_LR: min_lr, CYCLE_MAX_LR: max_lr, DECAY_LR_RATE: decay_rate, CYCLE_FIRST_STEP_SIZE: cycle_step_size, DECAY_STEP_SIZE: decay_step_size } }, "gradient_clipping": 1.0 } hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=max(50, cycle_step_size * 3), hidden_dim=hidden_dim, device=model.device, dtype=torch.float) step_lrs = [] for _, batch in enumerate(data_loader): step_lrs.extend(lr_scheduler.get_lr()) loss = model(batch[0], batch[1]) model.backward(loss) model.step() # Verify starting lr assert step_lrs[0] == min_lr # Verify peak lr assert step_lrs[cycle_step_size] == max_lr # Verify increasing phase _verify_continuous_increase(step_lrs[:cycle_step_size]) # Verify decreasing phase _verify_continuous_decrease(step_lrs[cycle_step_size:(cycle_step_size * 2)]) # Verify decay phase if decay_rate > 0: _verify_continuous_decrease(step_lrs[(cycle_step_size * 2):]) @pytest.mark.parametrize("min_mom, max_mom, decay_rate, step_size", [ (0.08, 0.09, 1e-3, 10), (0.08, 0.09, 0, 21), (0.08, 0.09, 1e-3, 10), (0.08, 0.09, 0, 21), ]) # yapf: disable def test_mom(self, min_mom, max_mom, decay_rate, step_size): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 }, }, "scheduler": { "type": ONE_CYCLE, "params": { CYCLE_MIN_LR: 1e-3, CYCLE_MAX_LR: 1e-2, CYCLE_MIN_MOM: min_mom, CYCLE_MAX_MOM: max_mom, DECAY_MOM_RATE: decay_rate, CYCLE_FIRST_STEP_SIZE: step_size, DECAY_STEP_SIZE: step_size } }, "gradient_clipping": 1.0 } hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=max(50, step_size * 3), hidden_dim=hidden_dim, device=model.device, dtype=torch.float) step_moms = [] for _, batch in enumerate(data_loader): step_moms.append(lr_scheduler.get_mom()) loss = model(batch[0], batch[1]) model.backward(loss) model.step() # Verify starting lr assert step_moms[0][0][0] == max_mom # Verify peak lr assert step_moms[step_size][0][0] == min_mom # Verify decreasing phase _verify_continuous_decrease(step_moms[:step_size]) # Verify increasing phase _verify_continuous_increase(step_moms[step_size:(step_size * 2)]) # Verify decay phase if decay_rate > 0: _verify_continuous_increase(step_moms[(step_size * 2):])
17,663
38.783784
153
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/test_ds_config_model.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest import os import json from pydantic import Field, ValidationError from typing import List from deepspeed.runtime import config as ds_config from deepspeed.runtime.config_utils import DeepSpeedConfigModel class SimpleConf(DeepSpeedConfigModel): param_1: int = 0 param_2_old: str = Field(None, deprecated=True, new_param="param_2", new_param_fn=(lambda x: [x])) param_2: List[str] = None param_3: int = Field(0, alias="param_3_alias") def test_only_required_fields(tmpdir): '''Ensure that config containing only the required fields is accepted. ''' cfg_json = tmpdir.mkdir('ds_config_unit_test').join('minimal.json') with open(cfg_json, 'w') as f: required_fields = {'train_batch_size': 64} json.dump(required_fields, f) run_cfg = ds_config.DeepSpeedConfig(cfg_json) assert run_cfg is not None assert run_cfg.train_batch_size == 64 assert run_cfg.train_micro_batch_size_per_gpu == 64 assert run_cfg.gradient_accumulation_steps == 1 def test_config_duplicate_key(tmpdir): config_dict = ''' { "train_batch_size": 24, "train_batch_size": 24, } ''' config_path = os.path.join(tmpdir, 'temp_config.json') with open(config_path, 'w') as jf: jf.write("%s" % config_dict) with pytest.raises(ValueError): run_cfg = ds_config.DeepSpeedConfig(config_path) def test_config_base(): config = SimpleConf(**{"param_1": 42}) assert config.param_1 == 42 def test_config_base_deprecatedfield(): config = SimpleConf(**{"param_2_old": "DS"}) assert config.param_2 == ["DS"] def test_config_base_aliasfield(): config = SimpleConf(**{"param_3": 10}) assert config.param_3 == 10 config = SimpleConf(**{"param_3_alias": 10}) assert config.param_3 == 10 @pytest.mark.parametrize("config_dict", [{"param_1": "DS"}, {"param_2": "DS"}, {"param_1_typo": 0}]) def test_config_base_literalfail(config_dict): with pytest.raises(ValidationError): config = SimpleConf(**config_dict) def test_config_base_deprecatedfail(): with pytest.raises(AssertionError): config = SimpleConf(**{"param_2": ["DS"], "param_2_old": "DS"})
2,296
27.7125
102
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/test_multi_output_model.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import deepspeed from pytest import approx from unit.common import DistributedTest from unit.multi_output_model import MultiOutputModel, multi_output_dataloader class TestTwoOutputModel(DistributedTest): world_size = 1 def test(self, tmpdir): grad_accumulation_steps = 2 micro_batch_size = 1 world_size = self.world_size config_dict = { "train_micro_batch_size_per_gpu": micro_batch_size, "gradient_accumulation_steps": grad_accumulation_steps, "train_batch_size": micro_batch_size * grad_accumulation_steps * world_size, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True } } hidden_dim = 10 weight_value = 0.1 model = MultiOutputModel(hidden_dim, weight_value) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) total_samples = 4 data_loader = multi_output_dataloader(model=model, total_samples=total_samples, hidden_dim=hidden_dim, device=model.device, inputs=[1.0, 2.0], targets=[1, 2]) for n, batch in enumerate(data_loader): assert len(batch) % 2 == 0, \ f"multi_output_dataloader failed to return even number of data samples (input+target)" midpoint = len(batch) // 2 inputs, targets = batch[:midpoint], batch[midpoint:] loss_tuple = model(inputs, targets) expected_loss = torch.tensor(2.302734375, dtype=torch.half, device=model.device) for loss in loss_tuple: assert loss.shape == torch.Size([]) assert loss.item() == approx(expected_loss.item()) summed_loss = sum(loss_tuple) scaled_loss = model.backward(summed_loss) expected_scaled_loss = summed_loss.float() / grad_accumulation_steps assert scaled_loss.item() == approx(expected_scaled_loss.item()) model.step() class TestThreeOutputModel(DistributedTest): world_size = 1 def test(self, tmpdir): grad_accumulation_steps = 3 micro_batch_size = 1 world_size = 1 config_dict = { "train_micro_batch_size_per_gpu": micro_batch_size, "gradient_accumulation_steps": grad_accumulation_steps, "train_batch_size": micro_batch_size * grad_accumulation_steps * world_size, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True } } hidden_dim = 10 weight_value = 0.1 model = MultiOutputModel(hidden_dim, weight_value) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) total_samples = grad_accumulation_steps * micro_batch_size * 2 data_loader = multi_output_dataloader(model=model, total_samples=total_samples, hidden_dim=hidden_dim, device=model.device, inputs=[1.0, 2.0, 3.0], targets=[1, 2, 3]) for n, batch in enumerate(data_loader): assert len(batch) % 2 == 0, \ f"multi_output_dataloader failed to return even number of data samples (input+target)" midpoint = len(batch) // 2 inputs, targets = batch[:midpoint], batch[midpoint:] loss_tuple = model(inputs, targets) assert len(loss_tuple) == 3 expected_loss = torch.tensor(2.302734375, dtype=torch.half, device=model.device) for loss in loss_tuple: assert loss.shape == torch.Size([]) assert loss.item() == approx(expected_loss.item()) summed_loss = sum(loss_tuple) scaled_loss = model.backward(summed_loss) expected_scaled_loss = summed_loss.float() / grad_accumulation_steps assert scaled_loss.item() == approx(expected_scaled_loss.item()) model.step()
4,821
37.269841
115
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/test_ds_config_dict.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team # A test on its own import os import pytest import json import hjson import argparse from deepspeed.runtime.zero.config import DeepSpeedZeroConfig from deepspeed.accelerator import get_accelerator from unit.common import DistributedTest, get_test_path from unit.simple_model import SimpleModel, create_config_from_dict, random_dataloader import deepspeed.comm as dist # A test on its own import deepspeed from deepspeed.runtime.config import DeepSpeedConfig, get_bfloat16_enabled class TestBasicConfig(DistributedTest): world_size = 1 def test_accelerator(self): assert (get_accelerator().is_available()) def test_check_version(self): assert hasattr(deepspeed, "__git_hash__") assert hasattr(deepspeed, "__git_branch__") assert hasattr(deepspeed, "__version__") assert hasattr(deepspeed, "__version_major__") assert hasattr(deepspeed, "__version_minor__") assert hasattr(deepspeed, "__version_patch__") @pytest.fixture def base_config(): config_dict = { "train_batch_size": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True } } return config_dict def _run_batch_config(ds_config, train_batch=None, micro_batch=None, gas=None): ds_config.train_batch_size = train_batch ds_config.train_micro_batch_size_per_gpu = micro_batch ds_config.gradient_accumulation_steps = gas success = True try: ds_config._configure_train_batch_size() except AssertionError: success = False return success def _batch_assert(status, ds_config, batch, micro_batch, gas, success): if not success: assert not status print("Failed but All is well") return assert ds_config.train_batch_size == batch assert ds_config.train_micro_batch_size_per_gpu == micro_batch assert ds_config.gradient_accumulation_steps == gas print("All is well") #Tests different batch config provided in deepspeed json file @pytest.mark.parametrize('num_ranks,batch,micro_batch,gas,success', [(2,32,16,1,True), (2,32,8,2,True), (2,33,17,2,False), (2,32,18,1,False)]) # yapf: disable class TestBatchConfig(DistributedTest): world_size = 2 def test(self, num_ranks, batch, micro_batch, gas, success): assert dist.get_world_size() == num_ranks, \ 'The test assumes a world size of f{num_ranks}' ds_batch_config = get_test_path('ds_batch_config.json') ds_config = DeepSpeedConfig(ds_batch_config) #test cases when all parameters are provided status = _run_batch_config(ds_config, train_batch=batch, micro_batch=micro_batch, gas=gas) _batch_assert(status, ds_config, batch, micro_batch, gas, success) #test cases when two out of three parameters are provided status = _run_batch_config(ds_config, train_batch=batch, micro_batch=micro_batch) _batch_assert(status, ds_config, batch, micro_batch, gas, success) if success: #when gas is provided with one more parameter status = _run_batch_config(ds_config, train_batch=batch, gas=gas) _batch_assert(status, ds_config, batch, micro_batch, gas, success) status = _run_batch_config(ds_config, micro_batch=micro_batch, gas=gas) _batch_assert(status, ds_config, batch, micro_batch, gas, success) #test the case when only micro_batch or train_batch is provided if gas == 1: status = _run_batch_config(ds_config, micro_batch=micro_batch) _batch_assert(status, ds_config, batch, micro_batch, gas, success) status = _run_batch_config(ds_config, train_batch=batch) _batch_assert(status, ds_config, batch, micro_batch, gas, success) else: #when only gas is provided status = _run_batch_config(ds_config, gas=gas) _batch_assert(status, ds_config, batch, micro_batch, gas, success) #when gas is provided with something else and gas does not divide batch if gas != 1: status = _run_batch_config(ds_config, train_batch=batch, gas=gas) _batch_assert(status, ds_config, batch, micro_batch, gas, success) def test_temp_config_json(tmpdir): config_dict = { "train_batch_size": 1, } config_path = create_config_from_dict(tmpdir, config_dict) config_json = json.load(open(config_path, 'r')) assert 'train_batch_size' in config_json @pytest.mark.parametrize("gather_weights_key", ["stage3_gather_16bit_weights_on_model_save", "stage3_gather_fp16_weights_on_model_save"]) def test_gather_16bit_params_on_model_save(gather_weights_key): config_dict = { gather_weights_key: True, } config = DeepSpeedZeroConfig(**config_dict) assert config.gather_16bit_weights_on_model_save == True @pytest.mark.parametrize("bf16_key", ["bf16", "bfloat16"]) def test_get_bfloat16_enabled(bf16_key): cfg = { bf16_key: { "enabled": True, }, } assert get_bfloat16_enabled(cfg) == True class TestConfigLoad(DistributedTest): world_size = 1 def test_dict(self, base_config): hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=base_config, model=model, model_parameters=model.parameters()) def test_json(self, base_config, tmpdir): config_path = os.path.join(tmpdir, "config.json") with open(config_path, 'w') as fp: json.dump(base_config, fp) hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_path, model=model, model_parameters=model.parameters()) def test_hjson(self, base_config, tmpdir): config_path = os.path.join(tmpdir, "config.json") with open(config_path, 'w') as fp: hjson.dump(base_config, fp) hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_path, model=model, model_parameters=model.parameters()) class TestDeprecatedDeepScaleConfig(DistributedTest): world_size = 1 def test(self, base_config, tmpdir): config_path = create_config_from_dict(tmpdir, base_config) parser = argparse.ArgumentParser() args = parser.parse_args(args='') args.deepscale_config = config_path args.local_rank = 0 hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(args=args, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() class TestDistInit(DistributedTest): world_size = 1 def test(self, base_config): hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=base_config, model=model, model_parameters=model.parameters(), dist_init_required=True) data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() class TestInitNoOptimizer(DistributedTest): world_size = 1 def test(self, base_config): del base_config["optimizer"] hidden_dim = 10 model = SimpleModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=base_config, model=model) data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) with pytest.raises(AssertionError): model.backward(loss) with pytest.raises(AssertionError): model.step() class TestArgs(DistributedTest): world_size = 1 def test_none_args(self, base_config): model = SimpleModel(hidden_dim=10) model, _, _, _ = deepspeed.initialize(args=None, model=model, config=base_config) data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) def test_no_args(self, base_config): model = SimpleModel(hidden_dim=10) model, _, _, _ = deepspeed.initialize(model=model, config=base_config) data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) class TestNoModel(DistributedTest): world_size = 1 def test(self, base_config): model = SimpleModel(hidden_dim=10) with pytest.raises(AssertionError): model, _, _, _ = deepspeed.initialize(model=None, config=base_config) with pytest.raises(AssertionError): model, _, _, _ = deepspeed.initialize(model, config=base_config)
9,760
34.754579
115
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/test_ds_initialize.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest from typing import Callable import torch from torch.optim import Optimizer, Adam, AdamW from torch.optim.lr_scheduler import _LRScheduler, LambdaLR from unit.simple_model import SimpleModel, random_dataloader from unit.common import DistributedTest from unit.util import required_torch_version, bf16_required_version_check, required_amp_check import deepspeed from deepspeed.ops.adam import FusedAdam from deepspeed.runtime.lr_schedules import WARMUP_LR, WarmupLR from deepspeed.runtime.config import ADAM_OPTIMIZER from deepspeed.runtime.utils import see_memory_usage @pytest.mark.parametrize('zero_stage', [0, 3]) class TestNoOptim(DistributedTest): world_size = 1 def test(self, zero_stage): if zero_stage == 3 and not required_torch_version(): pytest.skip("zero-3 param offload requires at least torch 1.8") ds_config = { 'train_batch_size': self.world_size, 'fp16': { 'enabled': True }, 'zero_optimization': { "stage": zero_stage, "offload_param": { "device": "cpu" } } } # 20B test #hidden_dim = 16 * 1024 hidden_dim = 4 with deepspeed.zero.Init(enabled=zero_stage == 3, config_dict_or_path=ds_config): model = SimpleModel(hidden_dim, nlayers=78) see_memory_usage('pre-init', force=True) model, _, _, _ = deepspeed.initialize(model=model, config=ds_config) see_memory_usage('post-init', force=True) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device, dtype=torch.half) for batch in data_loader: model(batch[0], batch[1]) see_memory_usage('post-fwds', force=True) @pytest.mark.parametrize('optimizer_type', [None, Optimizer, Callable]) class TestClientOptimizer(DistributedTest): world_size = 1 def test(self, optimizer_type): def _optimizer_callable(params) -> Optimizer: return AdamW(params=params) hidden_dim = 10 model = SimpleModel(hidden_dim) config_dict = {'train_batch_size': 1} if optimizer_type is None: client_optimizer = None config_dict['optimizer'] = {'type': ADAM_OPTIMIZER} elif optimizer_type is Optimizer: client_optimizer = Adam(model.parameters()) else: client_optimizer = _optimizer_callable _, ds_optimizer, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=list(model.parameters()), optimizer=client_optimizer) if client_optimizer is None: assert isinstance(ds_optimizer, FusedAdam) elif isinstance(client_optimizer, Optimizer): assert ds_optimizer == client_optimizer else: assert isinstance(ds_optimizer, AdamW) @pytest.mark.parametrize('client_parameters', [True, False]) class TestConfigOptimizer(DistributedTest): world_size = 1 def test(self, client_parameters): ds_config = {"train_batch_size": 1, "optimizer": {"type": "Adam", "params": {"lr": 0.001}}} hidden_dim = 10 model = SimpleModel(hidden_dim) if client_parameters: model_parameters = list(model.parameters()) else: model_parameters = None _, ds_optimizer, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model_parameters) assert isinstance(ds_optimizer, FusedAdam) @pytest.mark.parametrize('optimizer_extension', ['zero1', 'zero2', 'amp', None]) @pytest.mark.parametrize('model_dtype', ['fp16', 'bf16', 'fp32']) @pytest.mark.parametrize('grad_accum_dtype', [None, 'fp16', 'bf16', 'fp32']) class TestOptimizerImplementation(DistributedTest): world_size = 1 reuse_dist_env = True def test(self, optimizer_extension, model_dtype, grad_accum_dtype): if optimizer_extension == 'zero1': zero_stage = 1 elif optimizer_extension == 'zero2': zero_stage = 2 else: zero_stage = 0 amp = (optimizer_extension == 'amp') fp16 = (model_dtype == 'fp16') bf16 = (model_dtype == 'bf16') # Skip checks if bf16 and not bf16_required_version_check(): pytest.skip( "DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) if amp and not required_amp_check(): pytest.skip("Amp is not installed can't run amp check") # Config declaration ds_config = { "train_batch_size": 1, 'fp16': { 'enabled': fp16 }, 'bf16': { 'enabled': bf16 }, 'amp': { 'enabled': amp }, 'zero_optimization': { "stage": zero_stage }, "data_types": { "grad_accum_dtype": grad_accum_dtype }, "optimizer": { "type": "Adam", "params": { "lr": 0.001 } } } key = (optimizer_extension, model_dtype, grad_accum_dtype) # Enumerate supported configurations is_supported = {} # ZeRO 1 Wrapper is_supported[('zero1', 'fp16', None)] = True is_supported[('zero1', 'fp16', 'fp16')] = True is_supported[('zero1', 'bf16', None)] = True is_supported[('zero1', 'bf16', 'bf16')] = True is_supported[('zero1', 'bf16', 'fp32')] = True is_supported[('zero1', 'fp32', None)] = True is_supported[('zero1', 'fp32', 'fp32')] = True # ZeRO 2 Wrapper is_supported[('zero2', 'fp16', None)] = True is_supported[('zero2', 'fp16', 'fp16')] = True is_supported[('zero2', 'bf16', None)] = True is_supported[('zero2', 'bf16', 'bf16')] = True is_supported[('zero2', 'fp32', None)] = True is_supported[('zero2', 'fp32', 'fp32')] = True # Amp Wrapper is_supported[('amp', 'fp32', None)] = True is_supported[('amp', 'fp32', 'fp32')] = True # FP16 Wrapper is_supported[(None, 'fp16', None)] = True is_supported[(None, 'fp16', 'fp16')] = True # BF16 Wrapper is_supported[(None, 'bf16', 'fp32')] = True is_supported[(None, 'bf16', None)] = True # No Wrapper is_supported[(None, 'fp32', None)] = True is_supported[(None, 'fp32', 'fp32')] = True hidden_dim = 10 model = SimpleModel(hidden_dim) model_parameters = list(model.parameters()) if key in is_supported: _, ds_optimizer, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model_parameters) assert True else: with pytest.raises(NotImplementedError): _, ds_optimizer, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model_parameters) @pytest.mark.parametrize("scheduler_type", [None, _LRScheduler, Callable]) @pytest.mark.parametrize("optimizer_type", [None, Optimizer, Callable]) class TestClientLrScheduler(DistributedTest): world_size = 1 def test(self, scheduler_type, optimizer_type): def _my_lambda(epoch): return epoch // 10 def _optimizer_callable(params) -> Optimizer: return torch.optim.AdamW(params=params) def _lr_scheduler_callable(optimizer) -> _LRScheduler: return LambdaLR(optimizer, _my_lambda) hidden_dim = 10 model = SimpleModel(hidden_dim) config_dict = {'train_batch_size': 1} client_optimizer = None client_scheduler = None if optimizer_type is None: config_dict['optimizer'] = {'type': ADAM_OPTIMIZER} elif optimizer_type is Optimizer: client_optimizer = torch.optim.Adam(model.parameters()) else: client_optimizer = _optimizer_callable if scheduler_type is None: config_dict['scheduler'] = {'type': WARMUP_LR, 'params': {}} elif scheduler_type == _LRScheduler: if isinstance(client_optimizer, Optimizer): client_scheduler = LambdaLR(client_optimizer, _my_lambda) else: # Verify invalid combination is correctly handled client_scheduler = LambdaLR(torch.optim.Adam(model.parameters()), _my_lambda) else: client_scheduler = _lr_scheduler_callable if isinstance(client_scheduler, _LRScheduler) and not isinstance(client_optimizer, Optimizer): with pytest.raises(AssertionError): _, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=list(model.parameters()), optimizer=client_optimizer, lr_scheduler=client_scheduler) else: _, _, _, ds_lr_scheduler = deepspeed.initialize(config=config_dict, model=model, model_parameters=list(model.parameters()), optimizer=client_optimizer, lr_scheduler=client_scheduler) if client_scheduler is None: assert isinstance(ds_lr_scheduler, WarmupLR) elif isinstance(client_scheduler, _LRScheduler): assert ds_lr_scheduler == client_scheduler else: assert isinstance(ds_lr_scheduler, LambdaLR)
10,634
37.813869
136
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/activation_checkpointing/test_activation_checkpointing.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team # TODO: add tests with model parallelism for activation partitioning and other features. import pytest import torch import deepspeed from deepspeed.accelerator import get_accelerator from copy import deepcopy from unit.common import DistributedTest ckpt = deepspeed.checkpointing.checkpoint def _compute(module, *inputs, do_checkpoint=False): if do_checkpoint: outputs = ckpt(module, *inputs) else: outputs = module(*inputs) if torch.is_tensor(outputs): outputs = (outputs, ) sum(o.sum() for o in outputs if torch.is_tensor(o) and o.requires_grad).backward() grads = [p.grad for p in module.parameters()] input_grads = [inp.grad for inp in inputs if torch.is_tensor(inp)] return { 'outputs': outputs, 'module_grads': grads, 'input_grads': input_grads, } def _prep_inputs(*inputs): _inputs = [] for inp in inputs: inp = deepcopy(inp) if torch.is_tensor(inp): inp = inp.to(get_accelerator().device_name()) _inputs.append(inp) return tuple(_inputs) def _match_outputs(ref, tgt): assert type(ref) == type(tgt) if type(ref) in [list, tuple]: for x, y in zip(ref, tgt): _match_outputs(x, y) elif not torch.is_tensor(ref): assert ref == tgt elif ref.is_floating_point(): assert torch.allclose(ref, tgt) else: assert torch.equal(ref, tgt) def _test_activation_checkpoint(module, *inputs): # Move to device module.to(get_accelerator().device_name()) # Get rid of dropouts until we fork the RNG between tests. module.eval() module_ = deepcopy(module) inputs_ = _prep_inputs(*inputs) base = _compute(module_, *inputs_, do_checkpoint=False) module_ = deepcopy(module) inputs_ = _prep_inputs(*inputs) test = _compute(module_, *inputs_, do_checkpoint=True) for group in base.keys(): for b, t in zip(base[group], test[group]): _match_outputs(b, t) def _test_activation_checkpoint_ordering(module, expected_ordering, *inputs): # Move to device module.to(get_accelerator().device_name()) # Get rid of dropouts until we fork the RNG between tests. module.eval() module_ = deepcopy(module) inputs_ = _prep_inputs(*inputs) test = _compute(module_, *inputs_, do_checkpoint=True) outputs = test['outputs'] test_ordering = [] for item in outputs: if type(item) in [list, tuple]: test_ordering += [torch.is_tensor(t) for t in item] else: test_ordering += [torch.is_tensor(item)] assert expected_ordering == test_ordering # # Helpers # class MaskedLinear(torch.nn.Linear): def forward(self, x, mask): out = super().forward(x) if mask.is_floating_point(): out = out * mask else: # must cast BoolTensor in older torch versions out = out * mask.type_as(out) return out class MaskedLinearSeq(MaskedLinear): """Tests pipeline modules by also returning the mask.""" def forward(self, x, mask): return super().forward(x, mask), mask class MaskedLinearSeqDup(MaskedLinearSeq): """MaskedLinearSeq, but with more outputs than inputs and in a different order.""" def forward(self, x, mask): dup = x.clone().detach() * 1.38 # just an arbitrary scaling x, mask = super().forward(x, mask) return dup, x, mask class DropMaskLinear(torch.nn.Linear): def forward(self, x, mask): return super().forward(x) class LinearNonTensorInput(torch.nn.Linear): def forward(self, x, non_tensor_input): return super().forward(x) class LinearNonTensorOutput(torch.nn.Linear): def __init__(self, non_tensor_output): super().__init__(HIDDEN_DIM, HIDDEN_DIM) self.non_tensor_output = non_tensor_output def forward(self, x): out = super().forward(x) return out, self.non_tensor_output HIDDEN_DIM = 20 def _mixed_mask(size=HIDDEN_DIM): entries = torch.randn(size) mask = torch.where(entries > 0, torch.ones(size), torch.zeros(size)) mask = mask.bool() return mask def _bool_to_float(btensor, dtype=torch.float32): """Converts a torch.BoolTensor to an equivalent dtype. """ ones = torch.ones(size=btensor.size(), dtype=dtype) zeros = torch.zeros(size=btensor.size(), dtype=dtype) return torch.where(btensor, ones, zeros) # # Tests # # both bool and float are important, as bool is not differentiable @pytest.mark.parametrize('mask', [ _mixed_mask(), _bool_to_float(_mixed_mask()), ]) class TestActivationCheckpoint(DistributedTest): world_size = 1 def test_ckpt_inputs1_outputs1(self, mask): module = torch.nn.Linear(HIDDEN_DIM, HIDDEN_DIM) inputs = torch.rand(HIDDEN_DIM) inputs.requires_grad = True _test_activation_checkpoint(module, inputs) def test_ckpt_inputs2_outputs1(self, mask): module = MaskedLinear(HIDDEN_DIM, HIDDEN_DIM) inputs = torch.rand(HIDDEN_DIM) inputs.requires_grad = True _test_activation_checkpoint(module, inputs, mask) def test_ckpt_inputs2_outputs2(self, mask): module = MaskedLinearSeq(HIDDEN_DIM, HIDDEN_DIM) inputs = torch.rand(HIDDEN_DIM) inputs.requires_grad = True _test_activation_checkpoint(module, inputs, mask) def test_ckpt_inputs2_outputs3(self, mask): module = MaskedLinearSeqDup(HIDDEN_DIM, HIDDEN_DIM) inputs = torch.rand(HIDDEN_DIM) inputs.requires_grad = True _test_activation_checkpoint(module, inputs, mask) def test_ckpt_arg_none(self, mask): module = DropMaskLinear(HIDDEN_DIM, HIDDEN_DIM) inputs = (torch.rand(HIDDEN_DIM), None) inputs[0].requires_grad = True _test_activation_checkpoint(module, *inputs) @pytest.mark.parametrize('non_tensor', [None, 2, True, (None, 2.5), (None, True, torch.randn(HIDDEN_DIM))]) class TestCheckpointNonTensor(DistributedTest): world_size = 1 def test_ckpt_non_tensor_input(self, non_tensor): module = LinearNonTensorInput(HIDDEN_DIM, HIDDEN_DIM) inputs = torch.rand(HIDDEN_DIM) inputs.requires_grad = True _test_activation_checkpoint(module, inputs, non_tensor) def test_ckpt_non_tensor_output(self, non_tensor): module = LinearNonTensorOutput(non_tensor) inputs = torch.rand(HIDDEN_DIM) inputs.requires_grad = True _test_activation_checkpoint(module, inputs) @pytest.mark.parametrize('non_tensor_output', [ None, (torch.randn(HIDDEN_DIM), 2.5), (None, torch.randn(HIDDEN_DIM), True), (None, True, torch.randn(HIDDEN_DIM)) ]) class TestCheckpointNonTensorOutputOrdering(DistributedTest): world_size = 1 def test_ckpt_non_tensor_output_ordering(self, non_tensor_output): module = LinearNonTensorOutput(non_tensor_output) inputs = torch.rand(HIDDEN_DIM) inputs.requires_grad = True # First return is a tensor ordering = [True] if type(non_tensor_output) in [list, tuple]: ordering += [torch.is_tensor(t) for t in non_tensor_output] else: ordering += [torch.is_tensor(non_tensor_output)] _test_activation_checkpoint_ordering(module, ordering, inputs)
7,460
27.918605
118
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/comm/test_coalesced_collectives.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ unit tests for coalesced collectives """ import torch import deepspeed.comm as dist from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced from deepspeed.accelerator import get_accelerator from unit.common import DistributedTest class TestReduceScatterCoalesced(DistributedTest): world_size = 2 def test_single_input(self): input = torch.full((6, ), dist.get_rank(), dtype=torch.half, device=get_accelerator().current_device_name()) (output, ) = reduce_scatter_coalesced([input], dist.get_world_group()) assert output.shape == (3, ) assert torch.allclose(output, torch.full_like(output, 0.5)) def test_two_inputs(self): tensor_kwargs = {"device": get_accelerator().current_device_name(), "dtype": torch.half} inputs = [ dist.get_rank() * torch.arange(0, 6, **tensor_kwargs), dist.get_rank() * torch.arange(6, 9, **tensor_kwargs), ] output1, output2 = reduce_scatter_coalesced(inputs, dist.get_world_group()) if dist.get_rank() == 0: assert output1.shape == (3, ) assert torch.allclose(output1, torch.arange(0, 3, **tensor_kwargs) / 2) assert output2.shape == (2, ) assert torch.allclose(output2, torch.arange(6, 8, **tensor_kwargs) / 2) elif dist.get_rank() == 1: assert output1.shape == (3, ) assert torch.allclose(output1, torch.arange(3, 6, **tensor_kwargs) / 2) assert output2.shape == (1, ) assert torch.allclose(output2, torch.arange(8, 9, **tensor_kwargs) / 2) class TestReduceScatterCoalescedTensorSmallerThanWorldSize(DistributedTest): world_size = 2 def test(self): input = torch.zeros((1, ), dtype=torch.half, device=get_accelerator().current_device_name()) (output, ) = reduce_scatter_coalesced([input], dist.get_world_group()) if dist.get_rank() == 0: assert output.shape == (1, ) assert torch.allclose(output, torch.zeros_like(output)) elif dist.get_rank() == 1: assert output.shape == (0, )
2,235
35.064516
116
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import deepspeed from unit.common import DistributedTest from unit.util import skip_on_arch class Model(torch.nn.Module): def __init__(self): super().__init__() self.emb = torch.nn.EmbeddingBag(10, 3, mode="sum", sparse=True) self.linear = torch.nn.Linear(3, 1) def forward(self, x, offsets): return self.linear(self.emb(x, offsets)) class Adam(torch.optim.Optimizer): def __init__(self, dense_params, sparse_params): super().__init__(dense_params + sparse_params, defaults={}) self.adam = torch.optim.Adam(dense_params) self.adam_sparse = torch.optim.SparseAdam(sparse_params) @torch.no_grad() def step(self, closure=None): loss_1 = self.adam.step(closure) loss_2 = self.adam_sparse.step(closure) if loss_1 is not None and loss_2 is not None: return loss_1 + loss_2 return loss_1 or loss_2 def get_model_optimizer(): torch.manual_seed(0) model = Model() optimizer = Adam(list(model.linear.parameters()), list(model.emb.parameters())) return model, optimizer def get_data(device): x = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long, device=device) offsets = torch.tensor([0, 4], dtype=torch.long, device=device) y = torch.tensor([[1.0], [0.0]], device=device) return x, offsets, y class TestSparseAdam(DistributedTest): world_size = 2 def test(self): skip_on_arch(min_arch=7) config_dict = {"train_batch_size": 2, "steps_per_print": 1, "sparse_gradients": True} model, optimizer = get_model_optimizer() loss = torch.nn.BCEWithLogitsLoss() engine, _, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config=config_dict) x, offsets, y = get_data(engine.device) engine.gradient_average = True res = engine(x, offsets) engine.backward(loss(res, y)) averaged_grads = {} for k, v in engine.named_parameters(): grad = v.grad.to_dense() if v.grad.is_sparse else v.grad averaged_grads[k] = grad v.grad = None engine.gradient_average = False res = engine(x, offsets) engine.backward(loss(res, y)) for k, v in engine.named_parameters(): grad = v.grad.to_dense() if v.grad.is_sparse else v.grad assert torch.allclose(grad, averaged_grads[k] * engine.world_size)
2,543
29.285714
100
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/sparse_tensor/test_csr.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import random from deepspeed.runtime.sparse_tensor import SparseTensor def test_csr_addition_self(): row_count = 10 random.seed(1234) x = torch.ones(1, 5) for i in range(row_count - 1): if random.random() > 0.75: x = torch.cat([x, torch.ones(1, 5)]) else: x = torch.cat([x, torch.zeros(1, 5)]) dense_x = x.clone() cx = SparseTensor(x) assert torch.all(dense_x == cx.to_dense()) cx.add(cx) assert torch.all(dense_x + dense_x == cx.to_dense()) def test_csr_addition_different(): row_count = 10 random.seed(1234) x = torch.ones(1, 5) for i in range(row_count - 1): if random.random() > 0.75: x = torch.cat([x, torch.ones(1, 5)]) else: x = torch.cat([x, torch.zeros(1, 5)]) dense_x = x.clone() cx = SparseTensor(x) y = torch.ones(1, 5) for i in range(row_count - 1): if random.random() > 0.75: y = torch.cat([y, torch.ones(1, 5)]) else: y = torch.cat([y, torch.zeros(1, 5)]) dense_y = y.clone() cy = SparseTensor(y) dense_sum = dense_x + dense_y cx.add(cy) assert torch.all(dense_sum == cx.to_dense())
1,326
22.696429
56
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/sparse_tensor/test_sparse_grads.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import deepspeed from unit.common import DistributedTest import deepspeed.utils.groups as groups class Model(torch.nn.Module): def __init__(self): super().__init__() self.emb = torch.nn.EmbeddingBag(10, 3, mode="sum", sparse=True) self.linear = torch.nn.Linear(3, 1) def forward(self, x, offsets): return self.linear(self.emb(x, offsets)) class Adam(torch.optim.Optimizer): def __init__(self, dense_params, sparse_params): super().__init__(dense_params + sparse_params, defaults={}) self.adam = torch.optim.Adam(dense_params) self.adam_sparse = torch.optim.SparseAdam(sparse_params) @torch.no_grad() def step(self, closure=None): loss_1 = self.adam.step(closure) loss_2 = self.adam_sparse.step(closure) if loss_1 is not None and loss_2 is not None: return loss_1 + loss_2 return loss_1 or loss_2 class TestSparseAdam(DistributedTest): world_size = 2 def test(self): config_dict = {"train_batch_size": 2, "steps_per_print": 1, "sparse_gradients": True} model = Model() optimizer = Adam(list(model.linear.parameters()), list(model.emb.parameters())) engine, _, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config=config_dict) loss = torch.nn.BCEWithLogitsLoss() x = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long, device=engine.device) offsets = torch.tensor([0, 4], dtype=torch.long, device=engine.device) y = torch.tensor([[1.0], [0.0]], device=engine.device) res = engine(x, offsets) engine.backward(loss(res, y)) engine.step() results = [engine.all_gather_scalar(i, groups._get_data_parallel_group()) for i in model.emb.parameters()] for res in results: assert torch.allclose(res[0], res[1])
1,988
31.606557
114
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/half_precision/test_bf16.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import deepspeed import pytest from deepspeed.ops.adam import FusedAdam from unit.common import DistributedTest from deepspeed.ops.op_builder import CPUAdamBuilder from unit.simple_model import SimpleModel, SimpleOptimizer, random_dataloader from unit.util import bf16_required_version_check from deepspeed import comm as dist class TestAdamBF16ZeroOneCycleCompatibility(DistributedTest): world_size = 1 def test(self, zero_stage=2, use_cpu_offload=False): if not bf16_required_version_check(): pytest.skip( " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") config_dict = { "train_micro_batch_size_per_gpu": 1, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "scheduler": { "type": "OneCycle", "params": { "cycle_first_step_size": 16000, "cycle_first_stair_count": 8000, "decay_step_size": 16000, "cycle_min_lr": 1e-06, "cycle_max_lr": 3e-05, "decay_lr_rate": 1e-07, "cycle_min_mom": 0.85, "cycle_max_mom": 0.99, "decay_mom_rate": 0.0 } }, "fp16": { "enabled": False }, "bf16": { "enabled": True }, "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device, dtype=torch.bfloat16) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() class TestZeroAllowUntestedOptimizer(DistributedTest): world_size = 1 def test(self, zero_stage=2, use_cpu_offload=False): if not bf16_required_version_check(): pytest.skip( " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") config_dict = { "train_micro_batch_size_per_gpu": 4, "steps_per_print": 1, "fp16": { "enabled": False, }, "bf16": { "enabled": True }, "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload }, "zero_allow_untested_optimizer": False } hidden_dim = 10 model = SimpleModel(hidden_dim) optimizer = SimpleOptimizer(model.parameters()) with pytest.raises(AssertionError): model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer, model_parameters=model.parameters()) class TestZeroEmptyPartition(DistributedTest): world_size = 3 def test(self, zero_stage=2, use_cpu_offload=False): if not bf16_required_version_check(): pytest.skip( " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") if zero_stage == 3: pytest.skip("skip for now") config_dict = { "train_micro_batch_size_per_gpu": 1, "gradient_accumulation_steps": 1, "fp16": { "enabled": False }, "bf16": { "enabled": True }, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload, "reduce_bucket_size": 100, "allgather_bucket_size": 100 } } hidden_dim = 1 model = SimpleModel(hidden_dim) # Ensure model has 2 parameters, to cause empty partition with DP=3 assert len(list(model.parameters())) == 2 model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) # Now make sure things work.. data_loader = random_dataloader(model=model, total_samples=1, hidden_dim=hidden_dim, device=model.device, dtype=torch.bfloat16) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() @pytest.mark.parametrize("optimizer_constructor", [torch.optim.Adam, FusedAdam]) class TestZeroSupportedClientOptimizer(DistributedTest): world_size = 1 def test(self, optimizer_constructor, zero_stage=2): if not bf16_required_version_check(): pytest.skip( " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) config_dict = { "train_micro_batch_size_per_gpu": 2, "steps_per_print": 1, "fp16": { "enabled": False }, "bf16": { "enabled": True }, "zero_optimization": { "stage": zero_stage } } hidden_dim = 10 model = SimpleModel(hidden_dim) client_optimizer = optimizer_constructor(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=client_optimizer) class TestZero2ReduceScatterOff(DistributedTest): world_size = 2 def test(self): if not bf16_required_version_check(): pytest.skip( " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) config_dict = { "train_micro_batch_size_per_gpu": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0, "zero_optimization": { "stage": 2, "contiguous_gradients": True, "allgather_bucket_size": 2000000000, "reduce_bucket_size": 200000000, "overlap_comm": False, "reduce_scatter": False }, "fp16": { "enabled": False }, "bf16": { "enabled": True } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device, dtype=torch.bfloat16) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() class TestZeroEmptyGrad(DistributedTest): world_size = 1 def test(self, stage=2): if not bf16_required_version_check(): pytest.skip( " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) config_dict = { "train_micro_batch_size_per_gpu": 1, "steps_per_print": 1, "fp16": { "enabled": False }, "bf16": { "enabled": True }, "zero_optimization": { "stage": stage } } hidden_dim = 10 model = SimpleModel(hidden_dim) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device, dtype=torch.bfloat16) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() @pytest.mark.parametrize("comp_type", [torch.float16, torch.bfloat16, torch.float], ids=["fp16", "bfp16", "fp32"]) @pytest.mark.parametrize("comm_type", [torch.float16, torch.bfloat16, None], ids=["fp16", "bfp16", "default"]) class TestZeroDtypeCocktail(DistributedTest): world_size = 2 def test(self, comp_type, comm_type): if comp_type == torch.bfloat16 or comm_type == torch.bfloat16: if not bf16_required_version_check(): pytest.skip( " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) type_str = {torch.float16: "fp16", torch.bfloat16: "bfp16"} config_dict = { "train_micro_batch_size_per_gpu": 2, "steps_per_print": 1, "fp16": { "enabled": comp_type == torch.float16 }, "bf16": { "enabled": comp_type == torch.bfloat16 }, "zero_optimization": { "stage": 2 }, } if comm_type is not None: config_dict["communication_data_type"] = type_str[comm_type] else: comm_type = comp_type hidden_dim = 10 model = SimpleModel(hidden_dim) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, total_samples=2, hidden_dim=hidden_dim, device=model.device, dtype=comp_type) def custom_reduce(tensor, dst, op=dist.ReduceOp.SUM, group=None, async_op=False): assert tensor.dtype == comm_type return orig_torch_reduce(tensor, dst, op, group, async_op) orig_torch_reduce = dist.reduce dist.reduce = custom_reduce for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() dist.reduce = orig_torch_reduce
12,355
35.023324
141
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/half_precision/test_fp8.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import deepspeed import pytest from unit.common import DistributedTest from unit.util import skip_on_arch try: import transformer_engine.pytorch as transformer_engine from transformer_engine.common import recipe except ImportError: pytest.skip("Transformer Engine package is missing, skipping tests", allow_module_level=True) @pytest.mark.parametrize("base_datatype", ["fp16", "bf16", "fp32"]) class TestFp8ComposabilityAcrossZero(DistributedTest): world_size = 1 def test(self, base_datatype): skip_on_arch(min_arch=9) def run_zero(stage, model_dtype): num_batches = 128 batch_size = 16 hidden_dim = 768 # Have to set seed before model torch.random.manual_seed(42) enable_fp16 = model_dtype == torch.float16 enable_bf16 = model_dtype == torch.bfloat16 # TransformerEngine Model model = transformer_engine.Linear(hidden_dim, hidden_dim, bias=True, params_dtype=model_dtype) # Create FP8 recipe. Note: All input args are optional. fp8_recipe = recipe.DelayedScaling(fp8_format=recipe.Format.HYBRID, amax_history_len=16, amax_compute_algo="max") config = { "train_batch_size": batch_size, "gradient_accumulation_steps": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00001 } }, "zero_optimization": { "stage": stage }, "fp16": { "enabled": enable_fp16, "loss_scale": 0.1 }, "bf16": { "enabled": enable_bf16 } } # Init DeepSpeed model, optimizer, _, _ = deepspeed.initialize(args=None, model=model, model_parameters=model.parameters(), config=config) batches = torch.randn(num_batches, batch_size, hidden_dim, device=model.device, dtype=model_dtype) for batch in batches: # Enables autocasting for the forward pass with transformer_engine.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe): out = model(batch) loss = out.mean() model.backward(loss) model.step() return loss if base_datatype == "fp16": model_dtype = torch.float16 elif base_datatype == "bf16": model_dtype = torch.bfloat16 else: model_dtype = torch.float32 # config zero_stage = [0, 1, 2, 3] losses = [] for stage in zero_stage: loss = run_zero(stage, model_dtype) losses.append(loss) all_equal = all(torch.allclose(loss, losses[0], 1e-07, 1e-05) for loss in losses) assert (all_equal)
3,349
35.413043
110
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/half_precision/test_dynamic_loss_scale.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import deepspeed import numpy as np from unit.common import DistributedTest from unit.simple_model import SimpleModel def run_model_step(model, gradient_list): for value in gradient_list: for p in model.parameters(): p.grad = torch.empty_like(p, dtype=p.dtype) p.grad.fill_(value) model.step() class TestFused(DistributedTest): world_size = 1 def test_no_overflow(self): config_dict = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 8, "loss_scale_window": 2 } } hidden_dim = 1 model = SimpleModel(hidden_dim) model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) expected_loss_scale = 2**8 expected_scale_window = 2 # Ensure the dynamic loss scaler is correctly configured. assert optim.dynamic_loss_scale == True assert optim.cur_scale == expected_loss_scale assert optim.scale_window == expected_scale_window for i, value in enumerate(np.random.uniform(-0.1, 0.1, 10)): run_model_step(model, [value]) assert optim.cur_scale == expected_loss_scale assert optim.cur_iter == (i + 1) if optim.cur_iter % expected_scale_window == 0: expected_loss_scale *= 2 def test_all_overflow(self): config_dict = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 4, "loss_scale_window": 2 } } hidden_dim = 1 model = SimpleModel(hidden_dim) model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) expected_loss_scale = 2**4 # Ensure the dynamic loss scaler is correctly configured. assert optim.dynamic_loss_scale == True assert optim.cur_scale == expected_loss_scale overflow_gradients = [float('inf'), float('-inf')] + [float('nan')] * 6 for i, value in enumerate(overflow_gradients): run_model_step(model, [value]) expected_loss_scale = max(expected_loss_scale / 2, 1) assert optim.cur_scale == expected_loss_scale assert optim.cur_iter == (i + 1) def test_some_overflow(self): config_dict = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 8, "loss_scale_window": 2 } } hidden_dim = 1 model = SimpleModel(hidden_dim) model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) expected_loss_scale = 2**8 expected_scale_window = 2 expected_iteration = 0 # Ensure the dynamic loss scaler is correctly configured. assert optim.dynamic_loss_scale == True assert optim.cur_scale == expected_loss_scale assert optim.scale_window == expected_scale_window # Run model with overflows to decrease scale overflow_gradients = [float('inf'), float('nan')] expected_iteration += len(overflow_gradients) run_model_step(model, overflow_gradients) expected_loss_scale /= (2**len(overflow_gradients)) assert optim.cur_scale == expected_loss_scale assert optim.cur_iter == expected_iteration # Run model scale_window + 1 times to increase scale once normal_gradients = np.random.uniform(-0.1, 0.1, expected_scale_window + 1) expected_iteration += len(normal_gradients) run_model_step(model, normal_gradients) expected_loss_scale *= 2 assert optim.cur_scale == expected_loss_scale assert optim.cur_iter == expected_iteration # Run model with overflows to decrease scale overflow_gradients = [float('inf')] expected_iteration += len(overflow_gradients) run_model_step(model, overflow_gradients) expected_loss_scale /= (2**len(overflow_gradients)) assert optim.cur_scale == expected_loss_scale assert optim.cur_iter == expected_iteration class TestUnfused(DistributedTest): world_size = 1 def test_no_overflow(self): config_dict = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 8, "loss_scale_window": 2 } } hidden_dim = 1 model = SimpleModel(hidden_dim) model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) expected_loss_scale = 2**8 expected_scale_window = 2 # Ensure the dynamic loss scaler is correctly configured. assert optim.dynamic_loss_scale == True assert optim.cur_scale == expected_loss_scale assert optim.scale_window == expected_scale_window for i, value in enumerate(np.random.uniform(-0.1, 0.1, 10)): run_model_step(model, [value]) assert optim.cur_scale == expected_loss_scale assert optim.cur_iter == (i + 1) if optim.cur_iter % expected_scale_window == 0: expected_loss_scale *= 2 def test_all_overflow(self): config_dict = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 4, "loss_scale_window": 2, "min_loss_scale": 0.25 } } hidden_dim = 1 model = SimpleModel(hidden_dim) model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) expected_loss_scale = 2**4 expected_min_loss_scale = 0.25 # Ensure the dynamic loss scaler is correctly configured. assert optim.dynamic_loss_scale == True assert optim.cur_scale == expected_loss_scale assert optim.min_loss_scale == expected_min_loss_scale overflow_gradients = [float('inf'), float('-inf')] + [float('nan')] * 6 for i, value in enumerate(overflow_gradients): run_model_step(model, [value]) expected_loss_scale = max(expected_loss_scale / 2, expected_min_loss_scale) assert optim.cur_scale == expected_loss_scale assert optim.cur_iter == (i + 1) def test_some_overflow(self): config_dict = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 8, "loss_scale_window": 2 } } hidden_dim = 1 model = SimpleModel(hidden_dim) model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) expected_loss_scale = 2**8 expected_scale_window = 2 expected_iteration = 0 # Ensure the dynamic loss scaler is correctly configured. assert optim.dynamic_loss_scale == True assert optim.cur_scale == expected_loss_scale assert optim.scale_window == expected_scale_window # Run model with overflows to decrease scale overflow_gradients = [float('inf'), float('nan')] expected_iteration += len(overflow_gradients) run_model_step(model, overflow_gradients) expected_loss_scale /= (2**len(overflow_gradients)) assert optim.cur_scale == expected_loss_scale assert optim.cur_iter == expected_iteration # Run model scale_window + 1 times to increase scale once normal_gradients = np.random.uniform(-0.1, 0.1, expected_scale_window + 1) expected_iteration += len(normal_gradients) run_model_step(model, normal_gradients) expected_loss_scale *= 2 assert optim.cur_scale == expected_loss_scale assert optim.cur_iter == expected_iteration # Run model with overflows to decrease scale overflow_gradients = [float('inf')] expected_iteration += len(overflow_gradients) run_model_step(model, overflow_gradients) expected_loss_scale /= (2**len(overflow_gradients)) assert optim.cur_scale == expected_loss_scale assert optim.cur_iter == expected_iteration
9,932
35.653137
119
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/half_precision/test_fp16.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import deepspeed.comm as dist import deepspeed import pytest from deepspeed.ops.adam import FusedAdam from unit.common import DistributedTest from unit.simple_model import SimpleModel, SimpleOptimizer, random_dataloader, SimpleMoEModel, sequence_dataloader from unit.util import required_torch_version from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import CPUAdamBuilder try: from apex import amp # noqa: F401 _amp_available = True except ImportError: _amp_available = False amp_available = pytest.mark.skipif(not _amp_available, reason="apex/amp is not installed") class TestLambFP32GradClip(DistributedTest): world_size = 2 def test(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0 } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device, dtype=torch.float) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() class TestLambFP16(DistributedTest): world_size = 2 def test__basic(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0, "fp16": { "enabled": True } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() def test_empty_grad(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0, "fp16": { "enabled": True } } hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=True) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() class TestAdamFP32EmptyGrad(DistributedTest): world_size = 2 def test(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0, "fp16": { "enabled": False } } hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=True) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device, dtype=torch.float) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() class TestAdamwFP16Basic(DistributedTest): world_size = 1 def test(self): config_dict = {"train_batch_size": 1, "steps_per_print": 1, "fp16": {"enabled": True}} hidden_dim = 10 model = SimpleModel(hidden_dim) optimizer = torch.optim.AdamW(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() class TestFP16OptimizerForMoE(DistributedTest): world_size = 2 def test_unfused_gradnorm(self, monkeypatch): if not required_torch_version(): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") config_dict = {"train_batch_size": 2, "steps_per_print": 1, "fp16": {"enabled": True}} hidden_dim = 10 def mock_unscale_and_clip_grads(total_norm, apply_scale=True): torch_norm_tensor = get_accelerator().FloatTensor([total_norm]) all_gather_results = [torch.zeros_like(torch_norm_tensor) for _ in range(dist.get_world_size())] dist.all_gather(all_gather_results, torch_norm_tensor) assert len(set([x.item() for x in all_gather_results])) == 1 return 1.0 # initialize MoE model = SimpleMoEModel(hidden_dim, ep_size=2) optimizer = torch.optim.AdamW(params=model.parameters()) engine, optimizer, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer, dist_init_required=False) monkeypatch.setattr(optimizer, 'unscale_and_clip_grads', mock_unscale_and_clip_grads) data_loader = sequence_dataloader(model=engine, total_samples=50, hidden_dim=hidden_dim, device=engine.device) for n, batch in enumerate(data_loader): loss = engine(batch[0], batch[1]) engine.backward(loss) engine.step() def test_fused_gradnorm(self, monkeypatch): if not required_torch_version(): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") config_dict = {"train_batch_size": 2, "steps_per_print": 1, "fp16": {"enabled": True}} hidden_dim = 10 def mock_unscale_and_clip_grads(grads_groups_flat, total_norm, apply_scale=True): torch_norm_tensor = get_accelerator().FloatTensor([total_norm]) all_gather_results = [torch.zeros_like(torch_norm_tensor) for _ in range(dist.get_world_size())] dist.all_gather(all_gather_results, torch_norm_tensor) assert len(set([x.item() for x in all_gather_results])) == 1 return 1.0 # initialize MoE model = SimpleMoEModel(hidden_dim, ep_size=2) # optimizer = torch.optim.AdamW(params=model.parameters()) optimizer = FusedAdam(params=model.parameters()) engine, optimizer, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer, dist_init_required=False) monkeypatch.setattr(optimizer, 'unscale_and_clip_grads', mock_unscale_and_clip_grads) data_loader = sequence_dataloader(model=engine, total_samples=50, hidden_dim=hidden_dim, device=engine.device) for n, batch in enumerate(data_loader): loss = engine(batch[0], batch[1]) engine.backward(loss) engine.step() @pytest.mark.parametrize("fused_lamb_legacy", [(False), (True)]) def test_lamb_gradnorm(self, monkeypatch, fused_lamb_legacy: bool): if not required_torch_version(): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") config_dict = { "train_batch_size": 2, "steps_per_print": 1, "fp16": { "enabled": True }, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } } } hidden_dim = 10 def mock_unscale_and_clip_grads(total_norm, apply_scale=True): torch_norm_tensor = get_accelerator().FloatTensor([total_norm]) all_gather_results = [torch.zeros_like(torch_norm_tensor) for _ in range(dist.get_world_size())] dist.all_gather(all_gather_results, torch_norm_tensor) assert len(set([x.item() for x in all_gather_results])) == 1 return 1.0 # initialize MoE model = SimpleMoEModel(hidden_dim, ep_size=2) engine, optimizer, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters(), dist_init_required=False) monkeypatch.setattr(optimizer, 'unscale_and_clip_grads', mock_unscale_and_clip_grads) optimizer.fused_lamb_legacy = fused_lamb_legacy data_loader = sequence_dataloader(model=engine, total_samples=50, hidden_dim=hidden_dim, device=engine.device) for n, batch in enumerate(data_loader): loss = engine(batch[0], batch[1]) engine.backward(loss) engine.step() class TestAdamwFP16EmptyGrad(DistributedTest): world_size = 1 def test(self): config_dict = {"train_batch_size": 1, "steps_per_print": 1, "fp16": {"enabled": True}} hidden_dim = 10 model = SimpleModel(hidden_dim) optimizer = torch.optim.AdamW(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() @pytest.mark.parametrize("zero_stage", [1, 2, 3]) @pytest.mark.parametrize("use_cpu_offload", [True, False]) class TestAdamFP16ZeroOneCycleCompatibility(DistributedTest): world_size = 1 def test(self, zero_stage, use_cpu_offload): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") config_dict = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "scheduler": { "type": "OneCycle", "params": { "cycle_first_step_size": 16000, "cycle_first_stair_count": 8000, "decay_step_size": 16000, "cycle_min_lr": 1e-06, "cycle_max_lr": 3e-05, "decay_lr_rate": 1e-07, "cycle_min_mom": 0.85, "cycle_max_mom": 0.99, "decay_mom_rate": 0.0 } }, "fp16": { "enabled": True }, "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=10, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() @pytest.mark.parametrize("zero_stage", [1, 2, 3]) @pytest.mark.parametrize("use_cpu_offload", [True, False]) class TestZeroStaticScale(DistributedTest): world_size = 1 def test(self, zero_stage, use_cpu_offload, hidden_dim=4): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") config_dict = { "train_batch_size": 4, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True, "loss_scale": 138. }, "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload } } model = SimpleModel(hidden_dim) model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) # Ensure the static scaler is configured. assert optim.dynamic_loss_scale == False assert optim.loss_scaler.loss_scale == 138. # Now make sure things work.. data_loader = random_dataloader(model=model, total_samples=10, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() @pytest.mark.parametrize("zero_stage", [1, 2, 3]) @pytest.mark.parametrize("use_cpu_offload", [True, False]) class TestZeroAllowUntestedOptimizer(DistributedTest): world_size = 1 def test(self, zero_stage, use_cpu_offload): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") config_dict = { "train_batch_size": 4, "steps_per_print": 1, "fp16": { "enabled": True, }, "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload }, "zero_allow_untested_optimizer": False, "zero_force_ds_cpu_optimizer": False } hidden_dim = 10 model = SimpleModel(hidden_dim) optimizer = SimpleOptimizer(model.parameters()) with pytest.raises(AssertionError): model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer, model_parameters=model.parameters()) @pytest.mark.parametrize("zero_stage", [1, 2, 3]) @pytest.mark.parametrize("use_cpu_offload", [True, False]) class TestZeroEmptyPartition(DistributedTest): world_size = 3 def test(self, zero_stage, use_cpu_offload): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") if zero_stage == 3: pytest.skip("skip for now") config_dict = { "train_micro_batch_size_per_gpu": 1, "gradient_accumulation_steps": 1, "fp16": { "enabled": True, "initial_scale_power": 8 }, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload, "reduce_bucket_size": 100, "allgather_bucket_size": 100 } } hidden_dim = 1 model = SimpleModel(hidden_dim) # Ensure model has 2 parameters, to cause empty partition with DP=3 assert len(list(model.parameters())) == 2 model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) # Now make sure things work.. data_loader = random_dataloader(model=model, total_samples=1, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() @amp_available class TestAmp(DistributedTest): world_size = 2 def test_adam_basic(self): config_dict = {"train_batch_size": 2, "steps_per_print": 1, "amp": {"enabled": True}} hidden_dim = 10 model = SimpleModel(hidden_dim) optimizer = torch.optim.Adam(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() def test_lamb_basic(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0, "amp": { "enabled": True, } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() def test_adam_O2(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0, "amp": { "enabled": True, "opt_level": "O2" } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() def test_adam_O2_empty_grad(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0, "amp": { "enabled": True, "opt_level": "O2" } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() @pytest.mark.parametrize("zero_stage", [1, 2, 3]) @pytest.mark.parametrize("optimizer_constructor", [FusedAdam, torch.optim.Adam]) class TestZeroSupportedClientOptimizer(DistributedTest): world_size = 1 def test(self, zero_stage, optimizer_constructor): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "fp16": { "enabled": True }, "zero_optimization": { "stage": zero_stage } } hidden_dim = 10 model = SimpleModel(hidden_dim) client_optimizer = optimizer_constructor(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=client_optimizer) class TestZero2ReduceScatterOff(DistributedTest): world_size = 2 def test(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0, "zero_optimization": { "stage": 2, "contiguous_gradients": True, "allgather_bucket_size": 2000000000, "reduce_bucket_size": 200000000, "overlap_comm": False, "reduce_scatter": False }, "fp16": { "enabled": True } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() @pytest.mark.parametrize("adam_type", ["Adam", "AdamW"]) @pytest.mark.parametrize("torch_impl", [True, False]) class TestFP16AdamTypes(DistributedTest): world_size = 1 def test(self, adam_type, torch_impl): config_dict = { "train_batch_size": 1, "steps_per_print": 1, "fp16": { "enabled": True, "initial_scale_power": 10 }, "optimizer": { "type": adam_type, "torch_adam": torch_impl, "params": { "lr": 0.00015 } } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=10, hidden_dim=hidden_dim, device=model.device) for _, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() class TestZero3LazyScatter(DistributedTest): world_size = 1 def test(self): config_dict = { "train_batch_size": 1, "steps_per_print": 1, "fp16": { "enabled": True, "initial_scale_power": 10 }, "optimizer": { "type": "AdamW", "params": { "lr": 0.00015 } }, "zero_optimization": { "stage": 3 } } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=10, hidden_dim=hidden_dim, device=model.device) for _, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() @pytest.mark.parametrize('stage', [1, 2, 3]) class TestZeroEmptyGrad(DistributedTest): world_size = 1 def test(self, stage): config_dict = { "train_batch_size": 1, "steps_per_print": 1, "fp16": { "enabled": True }, "zero_optimization": { "stage": stage } } hidden_dim = 10 model = SimpleModel(hidden_dim) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step()
25,459
35.371429
119
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/half_precision/onebit/test_onebit.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import torch.nn as nn import deepspeed.comm as dist import deepspeed import pytest import os import numpy as np from deepspeed.runtime.pipe.topology import PipeDataParallelTopology from deepspeed.ops.op_builder import OpBuilder from deepspeed.runtime.pipe.module import PipelineModule from unit.common import DistributedTest from unit.simple_model import SimpleModel, random_dataloader from unit.alexnet_model import AlexNetPipe, train_cifar from unit.util import required_minimum_torch_version from deepspeed.accelerator import get_accelerator PipeTopo = PipeDataParallelTopology if not required_minimum_torch_version(major_version=1, minor_version=8): pytest.skip( "NCCL-based 1-bit compression requires torch 1.8 or higher", allow_module_level=True, ) rocm_version = OpBuilder.installed_rocm_version() if rocm_version[0] > 4: pytest.skip("NCCL-based 1-bit compression is not yet supported w. ROCm 5 until cupy supports ROCm 5", allow_module_level=True) @pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=["fp32", "fp16"]) class TestOneBitAdamBasic(DistributedTest): world_size = 2 def test(self, dtype): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "OneBitAdam", "params": { "lr": 0.00015, "weight_decay": 0.01, "freeze_step": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), }, }, "gradient_clipping": 1.0, "fp16": { "enabled": (dtype == torch.float16), "loss_scale": 0, "initial_scale_power": 16, }, } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader( model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device, dtype=dtype, ) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() class TestOneBitAdamExpAvgMask(DistributedTest): world_size = 2 def test(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "OneBitAdam", "params": { "lr": 0.00015, "weight_decay": 0.01, "freeze_step": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), }, }, "gradient_clipping": 1.0, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, } hidden_dim = 10 model = SimpleModel(hidden_dim) param_optimizer = list(model.named_parameters()) mask1 = torch.zeros_like(param_optimizer[0][1].data) for col in range(mask1.size()[1]): mask1[0][col] += 1 mask1 = torch.flatten(mask1) optimizer_grouped_parameters = [ { "params": [param_optimizer[0][1]], "weight_decay": 0.01, "exp_avg_mask": mask1, }, { "params": [param_optimizer[1][1]], "weight_decay": 0.01 }, ] model, optimizer, _, _ = deepspeed.initialize( config=config_dict, model=model, model_parameters=optimizer_grouped_parameters, ) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() # Test whether the momentum mask works for v in optimizer.state.values(): if v["exp_avg"].size() == mask1.size(): assert torch.allclose( v["exp_avg"], v["exp_avg"].mul_(mask1.to(device=v["exp_avg"].device)), atol=1e-07, ), f"Momentum mask is not working properly" class TestOneBitAdamCheckpointing(DistributedTest): world_size = 2 def test(self, tmpdir): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "OneBitAdam", "params": { "lr": 0.00015, "weight_decay": 0.01, "freeze_step": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), }, }, "gradient_clipping": 1.0, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, } hidden_dim = 10 model = SimpleModel(hidden_dim) param_optimizer = list(model.named_parameters()) mask1 = torch.zeros_like(param_optimizer[0][1].data) mask2 = torch.zeros_like(param_optimizer[0][1].data) for col in range(mask1.size()[1]): mask1[0][col] += 1 mask2[1][col] += 1 mask1 = torch.flatten(mask1) mask2 = torch.flatten(mask2) optimizer_grouped_parameters_1 = [ { "params": [param_optimizer[0][1]], "weight_decay": 0.01, "exp_avg_mask": mask1, }, { "params": [param_optimizer[1][1]], "weight_decay": 0.01 }, ] optimizer_grouped_parameters_2 = [ { "params": [param_optimizer[0][1]], "weight_decay": 0.01, "exp_avg_mask": mask2, }, { "params": [param_optimizer[1][1]], "weight_decay": 0.01 }, ] optimizer_grouped_parameters_3 = [ { "params": [param_optimizer[0][1]], "weight_decay": 0.01 }, { "params": [param_optimizer[1][1]], "weight_decay": 0.01 }, ] model_1, optimizer_1, _, _ = deepspeed.initialize( config=config_dict, model=model, model_parameters=optimizer_grouped_parameters_1, ) data_loader = random_dataloader( model=model_1, total_samples=10, hidden_dim=hidden_dim, device=model_1.device, ) for n, batch in enumerate(data_loader): loss = model_1(batch[0], batch[1]) model_1.backward(loss) model_1.step() # Test whether momentum mask still exist after saving checkpoint assert optimizer_1.optimizer.adam_freeze_key is True mask1 = mask1.to(device=optimizer_1.param_groups[0]["exp_avg_mask"].device) assert torch.allclose(optimizer_1.param_groups[0]["exp_avg_mask"], mask1, atol=1e-07), f"Incorrect momentum mask" save_folder = os.path.join(tmpdir, "saved_checkpoint") model_1.save_checkpoint(save_folder, tag=None) assert torch.allclose(optimizer_1.param_groups[0]["exp_avg_mask"], mask1, atol=1e-07), f"Momentum mask should not change after saving checkpoint" model_2, optimizer_2, _, _ = deepspeed.initialize( config=config_dict, model=model, model_parameters=optimizer_grouped_parameters_2, ) # Test whether momentum mask stays the same after loading checkpoint mask2 = mask2.to(device=optimizer_2.param_groups[0]["exp_avg_mask"].device) assert torch.allclose(optimizer_2.param_groups[0]["exp_avg_mask"], mask2, atol=1e-07), f"Incorrect momentum mask" model_2.load_checkpoint( save_folder, tag=None, load_optimizer_states=True, load_lr_scheduler_states=True, ) assert torch.allclose(optimizer_2.param_groups[0]["exp_avg_mask"], mask2, atol=1e-07), f"Momentum mask should not change after loading checkpoint" # Test whether worker&server error is reset for v in optimizer_2.state.values(): assert "worker_error" not in v, f"Incorrect worker error" assert "server_error" not in v, f"Incorrect server error" assert optimizer_2.optimizer.adam_freeze_key is True model_3, optimizer_3, _, _ = deepspeed.initialize( config=config_dict, model=model, model_parameters=optimizer_grouped_parameters_3, ) optimizer_3.optimizer.freeze_step = 20 data_loader = random_dataloader( model=model_3, total_samples=50, hidden_dim=hidden_dim, device=model_3.device, ) for n, batch in enumerate(data_loader): loss = model_3(batch[0], batch[1]) model_3.backward(loss) model_3.step() assert optimizer_3.optimizer.adam_freeze_key is True # Test whether momentum mask stays the same after loading checkpoint assert ("exp_avg_mask" not in optimizer_3.param_groups[0]), f"Incorrect momentum mask" model_3.load_checkpoint( save_folder, tag=None, load_optimizer_states=True, load_lr_scheduler_states=True, ) assert ("exp_avg_mask" not in optimizer_3.param_groups[0]), f"Momentum mask should not change after loading checkpoint" # Test whether worker&server error is reset for v in optimizer_3.state.values(): assert "worker_error" not in v, f"Incorrect worker error" assert "server_error" not in v, f"Incorrect server error" assert optimizer_3.optimizer.adam_freeze_key is False def test_overflow(self, tmpdir): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "OneBitAdam", "params": { "lr": 0.00015, "weight_decay": 0.01, "freeze_step": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), }, }, "gradient_clipping": 1.0, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=100, hidden_dim=hidden_dim, device=model.device) save_folder = os.path.join(tmpdir, "saved_checkpoint") for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) if dist.get_rank() == 0 and n >= 10: loss = loss * 1000000.0 model.backward(loss) dist.barrier() model.step() dist.barrier() model.save_checkpoint(save_folder, tag=None) @pytest.mark.parametrize( "topo_config", [ { "num_pp": 2, "num_dp": 2 }, ], ) class TestOneBitAdamFP16Pipeline(DistributedTest): world_size = 4 def test(self, topo_config): config_dict = { "train_batch_size": 4, "grandient_accumulation_steps": 1, "steps_per_print": 20, "optimizer": { "type": "OneBitAdam", "params": { "lr": 0.00001, "betas": [0.9, 0.999], "eps": 1e-8, "weight_decay": 3e-7, "freeze_step": 200, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), }, }, "gradient_clipping": 1.0, "zero_optimization": { "stage": 0 }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, "pipeline": { "seed_layers": True, "activation_checkpoint_interval": 1 }, } topo = PipeTopo(**topo_config) steps = 100 # TODO: Add correctness tests/asserts comparing with baseline? test_net = AlexNetPipe() test_model = PipelineModule(layers=test_net.to_layers(), topology=topo, loss_fn=nn.CrossEntropyLoss()) test_losses = train_cifar(test_model, config=config_dict, num_steps=steps, fp16=config_dict['fp16']['enabled']) @pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=["fp32", "fp16"]) class TestZeroOneAdamBasic(DistributedTest): world_size = 2 def test(self, dtype): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "ZeroOneAdam", "params": { "lr": 0.00015, "weight_decay": 0.01, "var_freeze_step": 4, "var_update_scaler": 1, "local_step_scaler": 1, "local_step_clipper": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), }, }, "gradient_clipping": 1.0, "fp16": { "enabled": (dtype == torch.float16), "loss_scale": 0, "initial_scale_power": 16, }, } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader( model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device, dtype=dtype, ) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() class TestZeroOneAdamExpAvgMask(DistributedTest): world_size = 2 def test(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "ZeroOneAdam", "params": { "lr": 0.00015, "weight_decay": 0.01, "var_freeze_step": 4, "var_update_scaler": 1, "local_step_scaler": 1, "local_step_clipper": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), }, }, "gradient_clipping": 1.0, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, } hidden_dim = 10 model = SimpleModel(hidden_dim) param_optimizer = list(model.named_parameters()) mask1 = torch.zeros_like(param_optimizer[0][1].data) for col in range(mask1.size()[1]): mask1[0][col] += 1 mask1 = torch.flatten(mask1) optimizer_grouped_parameters = [ { "params": [param_optimizer[0][1]], "weight_decay": 0.01, "exp_avg_mask": mask1, }, { "params": [param_optimizer[1][1]], "weight_decay": 0.01 }, ] model, optimizer, _, _ = deepspeed.initialize( config=config_dict, model=model, model_parameters=optimizer_grouped_parameters, ) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() # Test whether the momentum mask works for v in optimizer.state.values(): if v["exp_avg"].size() == mask1.size(): assert torch.allclose( v["exp_avg"], v["exp_avg"].mul_(mask1.to(device=v["exp_avg"].device)), atol=1e-07, ), f"Momentum mask is not working properly" class TestZeroOneAdamCheckpointing(DistributedTest): world_size = 2 def test(self, tmpdir): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "ZeroOneAdam", "params": { "lr": 0.00015, "weight_decay": 0.01, "var_freeze_step": 4, "var_update_scaler": 1, "local_step_scaler": 1, "local_step_clipper": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), }, }, "gradient_clipping": 1.0, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, } hidden_dim = 10 model = SimpleModel(hidden_dim) param_optimizer = list(model.named_parameters()) mask1 = torch.zeros_like(param_optimizer[0][1].data) mask2 = torch.zeros_like(param_optimizer[0][1].data) for col in range(mask1.size()[1]): mask1[0][col] += 1 mask2[1][col] += 1 mask1 = torch.flatten(mask1) mask2 = torch.flatten(mask2) optimizer_grouped_parameters_1 = [ { "params": [param_optimizer[0][1]], "weight_decay": 0.01, "exp_avg_mask": mask1, }, { "params": [param_optimizer[1][1]], "weight_decay": 0.01 }, ] optimizer_grouped_parameters_2 = [ { "params": [param_optimizer[0][1]], "weight_decay": 0.01, "exp_avg_mask": mask2, }, { "params": [param_optimizer[1][1]], "weight_decay": 0.01 }, ] optimizer_grouped_parameters_3 = [ { "params": [param_optimizer[0][1]], "weight_decay": 0.01 }, { "params": [param_optimizer[1][1]], "weight_decay": 0.01 }, ] model_1, optimizer_1, _, _ = deepspeed.initialize( config=config_dict, model=model, model_parameters=optimizer_grouped_parameters_1, ) data_loader = random_dataloader( model=model_1, total_samples=10, hidden_dim=hidden_dim, device=model_1.device, ) for n, batch in enumerate(data_loader): loss = model_1(batch[0], batch[1]) model_1.backward(loss) model_1.step() # Test whether momentum mask still exist after saving checkpoint mask1 = mask1.to(device=optimizer_1.param_groups[0]["exp_avg_mask"].device) assert torch.allclose(optimizer_1.param_groups[0]["exp_avg_mask"], mask1, atol=1e-07), f"Incorrect momentum mask" save_folder = os.path.join(tmpdir, "saved_checkpoint") model_1.save_checkpoint(save_folder, tag=None) assert torch.allclose(optimizer_1.param_groups[0]["exp_avg_mask"], mask1, atol=1e-07), f"Momentum mask should not change after saving checkpoint" model_2, optimizer_2, _, _ = deepspeed.initialize( config=config_dict, model=model, model_parameters=optimizer_grouped_parameters_2, ) # Test whether momentum mask stays the same after loading checkpoint mask2 = mask2.to(device=optimizer_2.param_groups[0]["exp_avg_mask"].device) assert torch.allclose(optimizer_2.param_groups[0]["exp_avg_mask"], mask2, atol=1e-07), f"Incorrect momentum mask" model_2.load_checkpoint( save_folder, tag=None, load_optimizer_states=True, load_lr_scheduler_states=True, ) assert torch.allclose(optimizer_2.param_groups[0]["exp_avg_mask"], mask2, atol=1e-07), f"Momentum mask should not change after loading checkpoint" # Test whether worker&server error is reset for v in optimizer_2.state.values(): assert "worker_error" not in v, f"Incorrect worker error" assert "server_error" not in v, f"Incorrect server error" model_3, optimizer_3, _, _ = deepspeed.initialize( config=config_dict, model=model, model_parameters=optimizer_grouped_parameters_3, ) optimizer_3.optimizer.freeze_step = 20 data_loader = random_dataloader( model=model_3, total_samples=50, hidden_dim=hidden_dim, device=model_3.device, ) for n, batch in enumerate(data_loader): loss = model_3(batch[0], batch[1]) model_3.backward(loss) model_3.step() # Test whether momentum mask stays the same after loading checkpoint assert ("exp_avg_mask" not in optimizer_3.param_groups[0]), f"Incorrect momentum mask" model_3.load_checkpoint( save_folder, tag=None, load_optimizer_states=True, load_lr_scheduler_states=True, ) assert ("exp_avg_mask" not in optimizer_3.param_groups[0]), f"Momentum mask should not change after loading checkpoint" # Test whether worker&server error is reset for v in optimizer_3.state.values(): assert "worker_error" not in v, f"Incorrect worker error" assert "server_error" not in v, f"Incorrect server error" def test_overflow(self, tmpdir): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "ZeroOneAdam", "params": { "lr": 0.00015, "weight_decay": 0.01, "var_freeze_step": 4, "var_update_scaler": 1, "local_step_scaler": 1, "local_step_clipper": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), }, }, "gradient_clipping": 1.0, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=100, hidden_dim=hidden_dim, device=model.device) save_folder = os.path.join(tmpdir, "saved_checkpoint") for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) if dist.get_rank() == 0 and n >= 10: loss = loss * 1000000.0 model.backward(loss) dist.barrier() model.step() dist.barrier() model.save_checkpoint(save_folder, tag=None) @pytest.mark.parametrize( "topo_config", [ { "num_pp": 2, "num_dp": 2 }, ], ) class TestZeroOneAdamFP16Pipeline(DistributedTest): world_size = 4 def test(self, topo_config): config_dict = { "train_batch_size": 4, "grandient_accumulation_steps": 1, "steps_per_print": 20, "optimizer": { "type": "ZeroOneAdam", "params": { "lr": 0.00001, "betas": [0.9, 0.999], "eps": 1e-8, "weight_decay": 3e-7, "var_freeze_step": 4, "var_update_scaler": 1, "local_step_scaler": 1, "local_step_clipper": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), }, }, "gradient_clipping": 1.0, "zero_optimization": { "stage": 0 }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, "pipeline": { "seed_layers": True, "activation_checkpoint_interval": 1 }, } topo = PipeTopo(**topo_config) steps = 100 # TODO: Add correctness tests/asserts comparing with baseline? test_net = AlexNetPipe() test_model = PipelineModule(layers=test_net.to_layers(), topology=topo, loss_fn=nn.CrossEntropyLoss()) test_losses = train_cifar(test_model, config=config_dict, num_steps=steps, fp16=config_dict['fp16']['enabled']) @pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=["fp32", "fp16"]) class TestOneBitLambBasic(DistributedTest): world_size = 2 def test(self, dtype): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "OneBitLamb", "params": { "lr": 0.00015, "weight_decay": 0.01, "max_coeff": 0.3, "min_coeff": 0.01, "freeze_step": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), "coeff_beta": 0.9, "factor_max": 1.0, "factor_min": 0.5, "factor_threshold": 0.1, }, }, "gradient_clipping": 1.0, "fp16": { "enabled": (dtype == torch.float16), "loss_scale": 0, "initial_scale_power": 16, }, } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader( model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device, dtype=dtype, ) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() class TestOneBitLampExpAvgMask(DistributedTest): world_size = 2 def test(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "OneBitLamb", "params": { "lr": 0.00015, "weight_decay": 0.01, "max_coeff": 0.3, "min_coeff": 0.01, "freeze_step": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), "coeff_beta": 0.9, "factor_max": 1.0, "factor_min": 0.5, "factor_threshold": 0.1, }, }, "gradient_clipping": 1.0, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, } hidden_dim = 10 model = SimpleModel(hidden_dim) param_optimizer = list(model.named_parameters()) mask1 = torch.zeros_like(param_optimizer[0][1].data) for col in range(mask1.size()[1]): mask1[0][col] += 1 optimizer_grouped_parameters = [ { "params": [param_optimizer[0][1]], "weight_decay": 0.01, "exp_avg_mask": mask1, }, { "params": [param_optimizer[1][1]], "weight_decay": 0.01 }, ] model, optimizer, _, _ = deepspeed.initialize( config=config_dict, model=model, model_parameters=optimizer_grouped_parameters, ) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() # Test whether the momentum mask works for v in optimizer.state.values(): if v["exp_avg"].size() == mask1.size(): assert torch.allclose( v["exp_avg"], v["exp_avg"].mul_(mask1.to(device=v["exp_avg"].device)), atol=1e-07, ), f"Momentum mask is not working properly" class TestOneBitLambCheckpointing(DistributedTest): world_size = 2 def test(self, tmpdir): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "OneBitLamb", "params": { "lr": 0.00015, "weight_decay": 0.01, "max_coeff": 0.3, "min_coeff": 0.01, "freeze_step": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), "coeff_beta": 0.9, "factor_max": 1.0, "factor_min": 0.5, "factor_threshold": 0.1, }, }, "gradient_clipping": 1.0, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, } hidden_dim = 10 model = SimpleModel(hidden_dim) param_optimizer = list(model.named_parameters()) mask1 = torch.zeros_like(param_optimizer[0][1].data) mask2 = torch.zeros_like(param_optimizer[0][1].data) for col in range(mask1.size()[1]): mask1[0][col] += 1 mask2[1][col] += 1 optimizer_grouped_parameters_1 = [ { "params": [param_optimizer[0][1]], "weight_decay": 0.01, "exp_avg_mask": mask1, }, { "params": [param_optimizer[1][1]], "weight_decay": 0.01 }, ] optimizer_grouped_parameters_2 = [ { "params": [param_optimizer[0][1]], "weight_decay": 0.01, "exp_avg_mask": mask2, }, { "params": [param_optimizer[1][1]], "weight_decay": 0.01 }, ] optimizer_grouped_parameters_3 = [ { "params": [param_optimizer[0][1]], "weight_decay": 0.01 }, { "params": [param_optimizer[1][1]], "weight_decay": 0.01 }, ] model_1, optimizer_1, _, _ = deepspeed.initialize( config=config_dict, model=model, model_parameters=optimizer_grouped_parameters_1, ) data_loader = random_dataloader( model=model_1, total_samples=10, hidden_dim=hidden_dim, device=model_1.device, ) for n, batch in enumerate(data_loader): loss = model_1(batch[0], batch[1]) model_1.backward(loss) model_1.step() # Test whether momentum mask still exist after saving checkpoint assert optimizer_1.optimizer.lamb_freeze_key is True mask1 = mask1.to(device=optimizer_1.param_groups[0]["exp_avg_mask"].device) assert torch.allclose(optimizer_1.param_groups[0]["exp_avg_mask"], mask1, atol=1e-07), f"Incorrect momentum mask" scaling_coeff_1 = [] for v in optimizer_1.state.values(): assert "scaling_coeff" in v, f"Incorrect scaling_coeff" scaling_coeff_1.append(v["scaling_coeff"]) save_folder = os.path.join(tmpdir, "saved_checkpoint") model_1.save_checkpoint(save_folder, tag=None) assert torch.allclose(optimizer_1.param_groups[0]["exp_avg_mask"], mask1, atol=1e-07), f"Momentum mask should not change after saving checkpoint" model_2, optimizer_2, _, _ = deepspeed.initialize( config=config_dict, model=model, model_parameters=optimizer_grouped_parameters_2, ) # Test whether momentum mask stays the same after loading checkpoint mask2 = mask2.to(device=optimizer_2.param_groups[0]["exp_avg_mask"].device) assert torch.allclose(optimizer_2.param_groups[0]["exp_avg_mask"], mask2, atol=1e-07), f"Incorrect momentum mask" model_2.load_checkpoint( save_folder, tag=None, load_optimizer_states=True, load_lr_scheduler_states=True, ) assert torch.allclose(optimizer_2.param_groups[0]["exp_avg_mask"], mask2, atol=1e-07), f"Momentum mask should not change after loading checkpoint" # Test whether worker&server error is reset assert len(optimizer_2.optimizer.worker_errors) == 0, f"Incorrect worker error" assert len(optimizer_2.optimizer.server_errors) == 0, f"Incorrect server error" # Test whether scaling_coeffs is loaded correctly scaling_coeff_2 = [] for v in optimizer_2.state.values(): assert "scaling_coeff" in v, f"Incorrect scaling_coeff" scaling_coeff_2.append(v["scaling_coeff"]) assert list(sorted(scaling_coeff_2)) == list(sorted(scaling_coeff_1)), f"Incorrect scaling_coeffs" assert optimizer_2.optimizer.lamb_freeze_key is True model_3, optimizer_3, _, _ = deepspeed.initialize( config=config_dict, model=model, model_parameters=optimizer_grouped_parameters_3, ) optimizer_3.optimizer.freeze_step = 20 data_loader = random_dataloader( model=model_3, total_samples=50, hidden_dim=hidden_dim, device=model_3.device, ) for n, batch in enumerate(data_loader): loss = model_3(batch[0], batch[1]) model_3.backward(loss) model_3.step() assert optimizer_3.optimizer.lamb_freeze_key is True # Test whether momentum mask stays the same after loading checkpoint assert ("exp_avg_mask" not in optimizer_3.param_groups[0]), f"Incorrect momentum mask" model_3.load_checkpoint( save_folder, tag=None, load_optimizer_states=True, load_lr_scheduler_states=True, ) assert ("exp_avg_mask" not in optimizer_3.param_groups[0]), f"Momentum mask should not change after loading checkpoint" # Test whether worker&server error is reset assert len(optimizer_3.optimizer.worker_errors) == 0, f"Incorrect worker error" assert len(optimizer_3.optimizer.server_errors) == 0, f"Incorrect server error" # Test whether scaling_coeffs, lamb_coeff_freeze, last_factor are reset for v in optimizer_3.state.values(): assert v["lamb_coeff_freeze"] == 0.0, f"Incorrect lamb_coeff_freeze" assert v["last_factor"] == 1.0, f"Incorrect last_factor" assert "scaling_coeff" not in v, f"Incorrect scaling_coeff" assert optimizer_3.optimizer.lamb_freeze_key is False def test_overflow(self, tmpdir): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "OneBitLamb", "params": { "lr": 0.00015, "weight_decay": 0.01, "max_coeff": 0.3, "min_coeff": 0.01, "freeze_step": 2, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), "coeff_beta": 0.9, "factor_max": 1.0, "factor_min": 0.5, "factor_threshold": 0.1, }, }, "gradient_clipping": 1.0, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=100, hidden_dim=hidden_dim, device=model.device) save_folder = os.path.join(tmpdir, "saved_checkpoint") for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) if dist.get_rank() == 0 and n >= 10: loss = loss * 1000000.0 model.backward(loss) dist.barrier() model.step() dist.barrier() model.save_checkpoint(save_folder, tag=None) @pytest.mark.parametrize( "topo_config", [ { "num_pp": 2, "num_dp": 2 }, ], ) class TestOneBitLambFP16Pipeline(DistributedTest): world_size = 4 def test(self, topo_config): config_dict = { "train_batch_size": 4, "grandient_accumulation_steps": 1, "steps_per_print": 20, "optimizer": { "type": "OneBitLamb", "params": { "lr": 0.00001, "betas": [0.9, 0.999], "eps": 1e-8, "weight_decay": 3e-7, "freeze_step": 200, "cuda_aware": False, "comm_backend_name": get_accelerator().communication_backend_name(), }, }, "gradient_clipping": 1.0, "zero_optimization": { "stage": 0 }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16 }, "pipeline": { "seed_layers": True, "activation_checkpoint_interval": 1 }, } topo = PipeTopo(**topo_config) steps = 100 # TODO: Add correctness tests/asserts comparing with baseline? test_net = AlexNetPipe() test_model = PipelineModule(layers=test_net.to_layers(), topology=topo, loss_fn=nn.CrossEntropyLoss()) test_losses = train_cifar(test_model, config=config_dict, num_steps=steps, fp16=config_dict['fp16']['enabled']) @pytest.mark.sequential class TestCompressedAllReduceBasic(DistributedTest): world_size = 2 def test(self, tmpdir): from deepspeed.runtime.comm.nccl import NcclBackend size = dist.get_world_size() rank = dist.get_rank() backend = NcclBackend() local_rank = dist.get_rank() device = torch.device(get_accelerator().device_name(), dist.get_rank()) # A simulated compression function using deepspeed.comm def torch_sim(a): a_sign = a.sign().add_(1).bool().float().add_(-0.5).mul_(2.0) scale = a.norm() / np.sqrt(a.numel()) a_compressed = scale * a_sign a_sign = None worker_error = a - a_compressed dist.all_reduce(a_compressed) a_compressed.mul_(1 / dist.get_world_size()) a_server_sign = (a_compressed.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)) a_list = torch.chunk(a_compressed, chunks=dist.get_world_size()) server_scale = [chunk_a.norm() / np.sqrt(chunk_a.numel()) for chunk_a in a_list] a_sign_list = torch.chunk(a_server_sign, dist.get_world_size()) a_server_compressed = torch.cat([server_scale[i] * a_sign_list[i] for i in range(dist.get_world_size())]) rank = dist.get_rank() server_error = a_list[rank] - server_scale[rank] * a_sign_list[rank] get_accelerator().synchronize() dist.barrier() return a_server_compressed, worker_error, server_error tensor_size = 300 * 2**20 server_size = int(tensor_size / size) if tensor_size % (8 * size) != 0: right_tensor_size = tensor_size + (8 * size - (tensor_size % (8 * size))) else: right_tensor_size = tensor_size right_server_size = right_tensor_size // size # Adding bias to the initialization of the gradient we are communicating # In order to get rid of the case where some elements in the gradient are too small a = (torch.rand(tensor_size, device=device) - 0.5) + 0.01 * rank worker_error = torch.zeros(right_tensor_size, device=device) server_error = torch.zeros(right_server_size, device=device) a_torch, worker_error_torch, server_error_torch = torch_sim(a) get_accelerator().empty_cache() a_after = backend.compressed_allreduce(a, worker_error, server_error, local_rank) threshold = 1e-6 magnitude_threshold = 1e-6 diff_mask = (a_after - a_torch) > threshold diff_server_mask = torch.chunk(diff_mask, size)[rank] mpi_server = torch.chunk(a_after, size)[rank] + server_error torch_server = torch.chunk(a_torch, size)[rank] + server_error_torch # If the number in the compensated_server_m is too small (e.g 1e-8), then calling sign() might be problematic # The test would skip those numbers that are too small in compensated_server_m check_mag_mask = mpi_server[diff_server_mask] > magnitude_threshold if torch.sum(check_mag_mask) != 0: print("Fails at {} of positions".format(torch.sum(check_mag_mask))) assert torch.sum(diff_server_mask) == 0 or torch.sum(check_mag_mask) == 0
44,111
35.913808
119
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/zero/test_zero_tensor_fragment.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest import deepspeed.comm as dist import torch from unit.common import DistributedTest from unit.simple_model import random_dataloader from unit.util import bf16_required_version_check import deepspeed from deepspeed.utils import safe_get_full_fp32_param, safe_get_full_grad, safe_get_full_optimizer_state from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum from deepspeed.ops.aio import AsyncIOBuilder def validate_full_tensors(model): for _, lp in model.named_parameters(): hp = safe_get_full_fp32_param(lp) exp_avg = safe_get_full_optimizer_state(lp, 'exp_avg') exp_avg_sq = safe_get_full_optimizer_state(lp, 'exp_avg_sq') hp_grad = safe_get_full_grad(lp) param_list = [hp, hp_grad, exp_avg, exp_avg_sq] if lp.requires_grad: assert all([p is not None for p in param_list]) else: assert all([p is None for p in param_list]) class MyModel(torch.nn.Module): def __init__(self, hidden_dim, frozen_weights): super(MyModel, self).__init__() self.act = torch.nn.ReLU() self.cel = torch.nn.CrossEntropyLoss() self.linears = torch.nn.ModuleList( [torch.nn.Linear(hidden_dim, 1), torch.nn.Linear(1, 1), torch.nn.Linear(1, hidden_dim)]) if frozen_weights: self.linears[0].weight.requires_grad = False self.linears[0].bias.requires_grad = False def forward(self, x, y): for l in self.linears: x = l(x) x = self.act(x) loss = self.cel(x, y) val = (x, loss) return val def run_fragmented_model(model, config_dict, hidden_dim, dtype): model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) data_loader = random_dataloader(model=model, total_samples=10, hidden_dim=hidden_dim, device=model.device, dtype=dtype) dist.barrier() for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) loss = loss[1] model.backward(loss) validate_full_tensors(model) model.step() # Needed in ZeRO 3. Not doing so can give memory leak model.destroy() @pytest.mark.parametrize('frozen_weights', [True, False]) class TestTensorFragment(DistributedTest): # Need multiple gpus to test possible hanging world_size = 2 reuse_dist_env = True @pytest.mark.parametrize('zero_stage', [1, 2, 3]) @pytest.mark.parametrize('offload_device', [OffloadDeviceEnum.none, OffloadDeviceEnum.cpu, OffloadDeviceEnum.nvme]) def test_zero_fragments(self, tmpdir, zero_stage, offload_device, frozen_weights): if offload_device == OffloadDeviceEnum.nvme: if zero_stage != 3: pytest.skip(f"Nvme offload not supported for zero stage {zero_stage}") if not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]: pytest.skip('Skip tests since async-io is not compatible') config_dict = { "train_micro_batch_size_per_gpu": 1, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 1e-6 } }, "fp16": { "enabled": True, "initial_scale_power": 2 }, "zero_optimization": { "stage": zero_stage, } } if offload_device == OffloadDeviceEnum.cpu: config_dict["zero_optimization"]["offload_optimizer"] = {"device": offload_device} elif offload_device == OffloadDeviceEnum.nvme: config_dict["zero_optimization"]["offload_optimizer"] = { "device": offload_device, "nvme_path": str(tmpdir) } hidden_dim = 128 if zero_stage == 3: with deepspeed.zero.Init(config_dict_or_path=config_dict): model = MyModel(hidden_dim, frozen_weights) else: model = MyModel(hidden_dim, frozen_weights) run_fragmented_model(model, config_dict, hidden_dim, torch.float16) def test_bf16_fragments(self, frozen_weights): if frozen_weights: pytest.skip("TODO: Frozen weights not currently supported by BF16 Optimizer") if not bf16_required_version_check(accelerator_check=False): pytest.skip( " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) config_dict = { "train_micro_batch_size_per_gpu": 1, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 1e-6 } }, "bf16": { "enabled": True }, "zero_optimization": { "stage": 0, } } hidden_dim = 128 model = MyModel(hidden_dim, frozen_weights) run_fragmented_model(model, config_dict, hidden_dim, torch.bfloat16)
5,412
34.379085
137
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/zero/test_zero_context_ancestry.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import deepspeed from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus from deepspeed.accelerator import get_accelerator from utils import setup_serial_env from unit.common import DistributedTest config = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True, "loss_scale": 138. }, "zero_optimization": { "stage": 3, "stage3_param_persistence_threshold": 1, } } # test that sub-classes get params that aren't prematurely partitioned and thus requiring gathering # fixed by https://github.com/microsoft/DeepSpeed/pull/1202 class GrandPa(torch.nn.Module): def __init__(self, *args): super().__init__(*args) self.param_grandpa = torch.nn.Parameter(torch.ones(5)) self.param_grandpa.data = (self.param_grandpa.data + 1).data # test param is not yet partitioned class Pa(GrandPa): def __init__(self, *args): super().__init__(*args) self.param_pa = torch.nn.Parameter(torch.ones(5)) self.param_pa.data = (self.param_pa.data + 1).data # test param is not yet partitioned self.param_grandpa.data = (self.param_grandpa.data + 1).data # test param is not yet partitioned class Son(Pa): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.ones(5)) self.param.data = (self.param.data + 1).data # test param is not yet partitioned self.param_pa.data = (self.param_pa.data + 1).data # test param is not yet partitioned self.param_grandpa.data = (self.param_grandpa.data + 1).data # test param is not yet partitioned class TestSerialParamInit(DistributedTest): world_size = 1 init_distributed = False set_dist_env = False def test_subclass_param_init(self): setup_serial_env() with deepspeed.zero.Init(config=config): model = Son().cpu() # test that all params have been partitioned assert model.param_grandpa.ds_status == ZeroParamStatus.NOT_AVAILABLE assert model.param_pa.ds_status == ZeroParamStatus.NOT_AVAILABLE assert model.param.ds_status == ZeroParamStatus.NOT_AVAILABLE # test that the weights manipulation during each __init__ worked in all w/o needing gathering ones = torch.ones(5).half().to(get_accelerator().device_name()) with deepspeed.zero.GatheredParameters(list(model.parameters(recurse=False))): assert torch.equal(model.param, ones + 1) assert torch.equal(model.param_pa, ones + 2) assert torch.equal(model.param_grandpa, ones + 3) class TestDSInitWZinit(DistributedTest): world_size = 2 def test(self): ds_config = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } } } class Model(torch.nn.Module): def __init__(self): super(Model, self).__init__() self.linear = torch.nn.Linear(4, 4) def magic(self): return 42 with deepspeed.zero.Init(): model = Model() engine, *_ = deepspeed.initialize(model=model, config=ds_config, model_parameters=model.parameters()) assert engine.magic() == 42
3,616
30.72807
113
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/zero/test_zeropp.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest import deepspeed.comm as dist from torch.nn import Module from unit.common import DistributedTest from unit.simple_model import random_dataloader import deepspeed from deepspeed.runtime.zero.config import DeepSpeedZeroConfig import torch.nn as nn class NNModel(nn.Module): def __init__(self, h_dim=1024, n_layers=2): super(NNModel, self).__init__() self.layers = nn.ModuleList([nn.Linear(h_dim, h_dim) for i in range(n_layers)]) self.cross_entropy_loss = nn.CrossEntropyLoss() def forward(self, x, y): for layer in self.layers: x = layer(x) return self.cross_entropy_loss(x, y) def test_zero_hpz_partition_size_config(): config = DeepSpeedZeroConfig(**{"zero_hpz_partition_size": 4}) assert config.zero_hpz_partition_size == 4 def _assert_no_secondary_tensor_group(model: Module) -> None: for _, param in model.named_parameters(): assert param.ds_secondary_tensor is None assert param.ds_zero_param_process_group is None def _assert_secondary_tensor_size(model: Module) -> None: for _, param in model.named_parameters(): assert param.ds_secondary_tensor is not None assert param.ds_secondary_tensor.size()[0] % param.ds_tensor.size()[0] == 0 #Large sweep along hidden dim, num_layers, and zpg of different sizes #Assert when zpg=1 that secondary group and tensors are invalid @pytest.mark.sequential @pytest.mark.parametrize("h_dim", [1024]) @pytest.mark.parametrize("n_layers", [4, 9]) @pytest.mark.parametrize("zpg", [1, 2, 4]) class TestZeroPPConfigSweep(DistributedTest): world_size = 4 def test(self, h_dim: int, n_layers: int, zpg: int) -> None: config_dict = { "train_micro_batch_size_per_gpu": 1, "zero_optimization": { "stage": 3, "stage3_max_reuse_distance": 0, "zero_hpz_partition_size": zpg, "zero_quantized_weights": True, "zero_quantized_gradients": True, "contiguous_gradients": True, "overlap_comm": True, }, "optimizer": { "type": "Adam", "params": { "lr": 1. } }, "fp16": { "enabled": True, "loss_scale": 1., } } model = NNModel(h_dim, n_layers) model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) data_loader = random_dataloader(model=model, total_samples=20, hidden_dim=h_dim, device=model.device) dist.barrier() if zpg == 1: _assert_no_secondary_tensor_group(model) for n, batch in enumerate(data_loader): if n == 0 and zpg != 1: _assert_secondary_tensor_size(model) loss = model(batch[0], batch[1]) model.backward(loss) model.step()
3,091
31.547368
115
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/zero/test_ignore_unused_parameters.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest from unit.common import DistributedTest from unit.simple_model import UnusedParametersModel, random_dataloader from deepspeed.ops.op_builder import CPUAdamBuilder import deepspeed @pytest.mark.parametrize('ignore_unused_parameters', [False, True]) class TestStage2IgnoreUnusedParameters(DistributedTest): world_size = 1 def test(self, ignore_unused_parameters): use_cpu_offload = True if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, "steps_per_print": 1, "zero_optimization": { "stage": 2, "cpu_offload": use_cpu_offload, "ignore_unused_parameters": ignore_unused_parameters }, "optimizer": { "type": "Adam", "params": { "lr": 1e-3 } }, "fp16": { "enabled": True, "initial_scale_power": 8 } } hidden_dim = 4 model = UnusedParametersModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=10, hidden_dim=hidden_dim, device=model.device) def _loop(): for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() if ignore_unused_parameters: _loop() else: with pytest.raises(AssertionError) as e: _loop() assert e.value.args and 'ignore_unused_parameters' in e.value.args[0]
2,017
31.031746
115
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/zero/test_zero_dynamic_class.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch from unit.common import DistributedTest import deepspeed class TestNewClassDeclaredNestingInit(DistributedTest): world_size = 1 def test_new_class_declared_nesting_init(self): ds_config = dict(train_batch_size=1, zero_optimization=dict(stage=3)) with deepspeed.zero.Init(config_dict_or_path=ds_config): class MyModel(torch.nn.Module): def __init__(self): super().__init__() self.fc = torch.nn.Linear(4, 4) with deepspeed.zero.Init(config_dict_or_path=ds_config): model = MyModel() # ensure that zero3 processed the parameter assert hasattr(model.fc.weight, "ds_id") deepspeed_engine, *_ = deepspeed.initialize(model=model, config_params=ds_config) class TestNewClassDeclaredInsideNestingInit(DistributedTest): world_size = 1 def test_new_class_declared_inside_nesting_init(self): ds_config = dict(train_batch_size=1, zero_optimization=dict(stage=3)) with deepspeed.zero.Init(config_dict_or_path=ds_config): class MyModel(torch.nn.Module): def __init__(self): super().__init__() self.fc = torch.nn.Linear(1, 1) model = MyModel() # ensure that zero3 processed the parameter assert hasattr(model.fc.weight, "ds_id") deepspeed_engine, *_ = deepspeed.initialize(model=model, config_params=ds_config)
1,594
28.537037
89
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/zero/utils.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os from unit.common import get_master_port def setup_serial_env(): # Setup for a serial run os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = get_master_port() os.environ['LOCAL_RANK'] = '0' os.environ['RANK'] = '0' os.environ['WORLD_SIZE'] = '1'
394
22.235294
49
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/zero/test_zero.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import math from collections import namedtuple from typing import Dict, List, NamedTuple, Set, Tuple import pytest import deepspeed.comm as dist import torch from torch import Tensor from torch.nn import Linear, Module from torch.nn.modules.container import ModuleList from torch.nn.modules.loss import L1Loss from torch.nn.parameter import Parameter from unit.common import DistributedTest from unit.simple_model import SimpleModel, random_dataloader import deepspeed from deepspeed.runtime.engine import DeepSpeedEngine from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint from deepspeed.runtime.zero.utils import ZeRORuntimeException from deepspeed.accelerator import get_accelerator def run_unbalanced_gradients(model, data_loader): def drop_some_gradients(model, iter): odd_iteration = iter % 2 for i, p in enumerate(model.parameters()): p.requires_grad = (i % 2) == odd_iteration def enable_grads(model): for p in model.parameters(): p.requires_grad = True for i, batch in enumerate(data_loader): drop_some_gradients(model, i + 1) loss = model(batch[0], batch[1]) model.backward(loss) model.step() enable_grads(model) def dump_state_dict(model): if dist.get_rank() == 0: print("state_dict:") for name, param in model.named_parameters(): print(f"{name} {param.data}") @pytest.mark.parametrize("zero_stage", [1, 2, 3]) class TestZeroUnbalancedGradients(DistributedTest): world_size = 1 def test(self, zero_stage): config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, "steps_per_print": 1, "zero_optimization": { "stage": zero_stage }, "optimizer": { "type": "Adam", "params": { "lr": 1e-3 } }, "fp16": { "enabled": True, "initial_scale_power": 8 }, } hidden_dim = 4 model = SimpleModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) run_unbalanced_gradients(model, data_loader) # testing the fix https://github.com/microsoft/DeepSpeed/pull/1227 class TestZero3RepeatForwardLoop(DistributedTest): world_size = 1 def test(self, zero_stage=3): # force all params to be partitioned by forcing threshold=0 config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, "steps_per_print": 1, "zero_optimization": { "stage": zero_stage, "stage3_param_persistence_threshold": 0, }, "optimizer": { "type": "Adam", "params": { "lr": 1e-3 } }, "fp16": { "enabled": True, "initial_scale_power": 8 }, } hidden_dim = 4 class AlbertLikeModel(torch.nn.Module): def __init__(self, hidden_dim): super().__init__() self.linear = torch.nn.Linear(hidden_dim, hidden_dim) self.cross_entropy_loss = torch.nn.CrossEntropyLoss() def forward(self, x, y): # run the same layer multiple times in a loop - to test a stack of forwards, followed by a stack of backwards hidden = x for i in range(3): hidden = hidden + self.linear(hidden) return self.cross_entropy_loss(hidden, y) model = AlbertLikeModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() # testing the fix https://github.com/microsoft/DeepSpeed/pull/1227 # also reproduces the https://github.com/microsoft/DeepSpeed/pull/1372 @pytest.mark.parametrize("zero_stage", [2, 3]) @pytest.mark.parametrize("freeze_params", [True, False]) class TestZeroToFP32(DistributedTest): world_size = 2 def test_1_param_group(self, tmpdir, zero_stage, freeze_params): # XXX: ideally refactor with the 2_param_group test as 75% is the same # force all params to be partitioned by forcing threshold=0 config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, "steps_per_print": 1, "zero_optimization": { "stage": zero_stage, "stage3_param_persistence_threshold": 0, }, "optimizer": { "type": "Adam", "params": { "lr": 1e-3 } }, "fp16": { "enabled": True, "initial_scale_power": 8 }, } class MyModel(torch.nn.Module): def __init__(self, hidden_dim, n_layers, freeze_params): super().__init__() # to reproduce https://github.com/microsoft/DeepSpeed/pull/1372 it is important that # the number of total elements is uneven: # (1) 4 layers of 3*(3+1)=12 elements each, 48 in total self.ll = torch.nn.ModuleList(torch.nn.Linear(hidden_dim, hidden_dim) for i in range(n_layers)) # (2) the following adds 4+1=5 elements self.classifier = torch.nn.Linear(4, 1) # total 48+5=53 (uneven as desired) elements self.cross_entropy_loss = torch.nn.CrossEntropyLoss() if freeze_params: self.ll[0].weight.requires_grad = False self.ll[0].bias.requires_grad = False def forward(self, x, y): hidden = x for l in self.ll: hidden = l(hidden) return self.cross_entropy_loss(hidden, y) hidden_dim = 3 # do not change world_size = dist.get_world_size() # we want at least 2x layers as there are gpus to trigger round_robin_fp16_groups reshuffle in zero2 n_layers = world_size * 2 model = MyModel(hidden_dim=hidden_dim, n_layers=n_layers, freeze_params=freeze_params) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) # Flush zero stage 3 cache model.empty_partition_cache() data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() model.empty_partition_cache() model.save_checkpoint(tmpdir) # make sure all sides saved it dist.barrier() orig_state_dict = {} for name, param in model.module.named_parameters(): if zero_stage == 3: with deepspeed.zero.GatheredParameters(param, modifier_rank=None): orig_state_dict[name] = param.detach().cpu() else: orig_state_dict[name] = param.detach().cpu() if zero_stage == 3: with deepspeed.zero.GatheredParameters(model.parameters(), modifier_rank=None): fp32_model = load_state_dict_from_zero_checkpoint(model.module, tmpdir) fp32_state_dict = fp32_model.state_dict() else: fp32_model = load_state_dict_from_zero_checkpoint(model.module, tmpdir) fp32_state_dict = fp32_model.state_dict() # dump_state_dict(fp32_model) if dist.get_rank() == 0: for name in orig_state_dict.keys(): # float() workaround for torch<1.6 assert torch.allclose(orig_state_dict[name].float(), fp32_state_dict[name].float()) def test_2_param_groups(self, tmpdir, zero_stage, freeze_params): # TODO: # - need to test with multiple param groups # force all params to be partitioned by forcing threshold=0 config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, "steps_per_print": 1, "zero_allow_untested_optimizer": 1, "zero_optimization": { "stage": zero_stage, "stage3_param_persistence_threshold": 0, }, "optimizer": { "type": "Adam", "params": { "lr": 1e-3 } }, "fp16": { "enabled": True, "initial_scale_power": 8 }, } class MyModel(torch.nn.Module): def __init__(self, hidden_dim, n_layers, freeze_params): super().__init__() self.ll = torch.nn.ModuleList(torch.nn.Linear(hidden_dim, hidden_dim) for i in range(n_layers)) self.cross_entropy_loss = torch.nn.CrossEntropyLoss() if freeze_params: self.ll[0].weight.requires_grad = False self.ll[0].bias.requires_grad = False def forward(self, x, y): hidden = x for l in self.ll: hidden = l(hidden) return self.cross_entropy_loss(hidden, y) hidden_dim = 3 world_size = dist.get_world_size() n_layers = world_size * 2 model = MyModel(hidden_dim=hidden_dim, n_layers=n_layers, freeze_params=freeze_params) optim_groups = [ { "params": [l.weight for l in model.ll], "weight_decay": 0.01, }, { "params": [l.bias for l in model.ll], "weight_decay": 0.0 }, ] optim = torch.optim.SGD(optim_groups, lr=0.1) model, _, _, _ = deepspeed.initialize( model=model, model_parameters=model.parameters(), optimizer=optim, config=config_dict, ) model.empty_partition_cache() data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() model.empty_partition_cache() model.save_checkpoint(tmpdir) # make sure all sides saved it dist.barrier() # dump_state_dict(model) orig_state_dict = {} for name, param in model.module.named_parameters(): if zero_stage == 3: with deepspeed.zero.GatheredParameters(param, modifier_rank=None): orig_state_dict[name] = param.detach().cpu() else: orig_state_dict[name] = param.detach().cpu() if zero_stage == 3: with deepspeed.zero.GatheredParameters(model.parameters(), modifier_rank=None): fp32_model = load_state_dict_from_zero_checkpoint(model.module, tmpdir) fp32_state_dict = fp32_model.state_dict() else: fp32_model = load_state_dict_from_zero_checkpoint(model.module, tmpdir) fp32_state_dict = fp32_model.state_dict() # dump_state_dict(fp32_model) if dist.get_rank() == 0: for name in orig_state_dict.keys(): # float() workaround for torch<1.6 assert torch.allclose(orig_state_dict[name].float(), fp32_state_dict[name].float()) @pytest.mark.parametrize("allgather_bucket_size", [1000, 1001]) class TestIncorectAllgatherBucketSize(DistributedTest): world_size = 1 def test(self, allgather_bucket_size, zero_stage=2): config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, "steps_per_print": 1, "zero_optimization": { "stage": zero_stage, "allgather_bucket_size": allgather_bucket_size, }, "optimizer": { "type": "Adam", "params": { "lr": 1e-3 } }, "fp16": { "enabled": True, "initial_scale_power": 8 }, } hidden_dim = 4 model = SimpleModel(hidden_dim=hidden_dim) if allgather_bucket_size % 2 == 0: model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) else: with pytest.raises(AssertionError) as assertinfo: model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) assert ("allgather_bucket_size must be a multiple of nccl_start_alignment_factor" in str(assertinfo)) class TestPartitionNcclAlignment(DistributedTest): world_size = 4 def test(self, zero_stage=2): config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, "steps_per_print": 1, "zero_optimization": { "stage": zero_stage }, "optimizer": { "type": "Adam", "params": { "lr": 1e-3 } }, "fp16": { "enabled": True, "initial_scale_power": 8 }, } hidden_dim = 4 model = SimpleModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) # get nccl all-gather send buffers alignment factor nccl_start_alignment_factor = model.optimizer.nccl_start_alignment_factor parallel_partitioned_bit16_groups = (model.optimizer.parallel_partitioned_bit16_groups if zero_stage == 2 else model.optimizer.parallel_partitioned_fp16_groups) for data_parallel_partitions in parallel_partitioned_bit16_groups: for partition_id, partitioned_data in enumerate(data_parallel_partitions): # verify that data partition start locations are 4-byte aligned assert (partitioned_data.data_ptr() % (2 * nccl_start_alignment_factor) == 0) def _ds_initialize_for_param_partitioning_testing(model: Module, cfg: dict) -> DeepSpeedEngine: ds_engine, _, _, _ = deepspeed.initialize(config=cfg, model=model, model_parameters=model.parameters()) return ds_engine def _assert_partition_status(model: Module, valid_statuses: Set[ZeroParamStatus]) -> None: for _, param in model.named_parameters(): assert param.ds_status in valid_statuses, param.ds_summary() def _assert_fully_available(model: Module) -> None: for _, param in model.named_parameters(): assert param.ds_status == ZeroParamStatus.AVAILABLE class EltwiseMultiplicationModule(Module): def __init__(self, weight: Parameter) -> None: super().__init__() self.weight = weight def forward(self, x: Tensor) -> Tensor: _assert_fully_available(self) result = self.weight * x return result class EltwiseMultiplicationTestNetwork_Dict(Module): """used for testing purposes""" def __init__( self, weight1: Parameter, weight2: Parameter, weight3: Parameter, ) -> None: super().__init__() self.__layer1 = EltwiseMultiplicationModule(weight1) self.__layer2 = EltwiseMultiplicationModule(weight2) self.__layer3 = EltwiseMultiplicationModule(weight3) self.loss = L1Loss(reduction="none") def forward(self, x: Tensor, y: Tensor, use_module_trace: bool, param_prefetching: bool) -> Dict[str, Tensor]: _assert_partition_status( self, { ZeroParamStatus.NOT_AVAILABLE, ZeroParamStatus.INFLIGHT, ZeroParamStatus.AVAILABLE, } if use_module_trace else {ZeroParamStatus.NOT_AVAILABLE}, ) pre_layer_expected_states = { ZeroParamStatus.INFLIGHT if param_prefetching else ZeroParamStatus.NOT_AVAILABLE, ZeroParamStatus.AVAILABLE, } post_layer_expected_states = { ZeroParamStatus.AVAILABLE if param_prefetching else ZeroParamStatus.NOT_AVAILABLE, } _assert_partition_status(self.__layer1, pre_layer_expected_states) hidden1 = self.__layer1(x) _assert_partition_status(self.__layer1, post_layer_expected_states) _assert_partition_status(self.__layer2, pre_layer_expected_states) hidden2 = self.__layer2(hidden1) _assert_partition_status(self.__layer2, post_layer_expected_states) _assert_partition_status(self.__layer3, pre_layer_expected_states) y_hat = self.__layer3(hidden2) _assert_partition_status(self.__layer3, post_layer_expected_states) loss = self.loss(y_hat, y) _assert_partition_status( self, { ZeroParamStatus.NOT_AVAILABLE, ZeroParamStatus.INFLIGHT, ZeroParamStatus.AVAILABLE, } if use_module_trace else {ZeroParamStatus.NOT_AVAILABLE}, ) return { "hidden1": hidden1, "hidden2": hidden2, "y_hat": y_hat, "loss": loss, } @staticmethod def to_dict(outputs: Dict[str, Tensor]) -> Dict[str, Tensor]: return outputs class EltwiseMultiplicationNamedTuple(NamedTuple): hidden1: Tensor hidden2: Tensor y_hat: Tensor loss: Tensor class EltwiseMultiplicationTestNetwork_NamedTuple(EltwiseMultiplicationTestNetwork_Dict): def forward(self, *args, **kwargs) -> EltwiseMultiplicationNamedTuple: outputs_dicts = super().forward(*args, **kwargs) return EltwiseMultiplicationNamedTuple( hidden1=outputs_dicts["hidden1"], hidden2=outputs_dicts["hidden2"], y_hat=outputs_dicts["y_hat"], loss=outputs_dicts["loss"], ) @staticmethod def to_dict(outputs: EltwiseMultiplicationNamedTuple) -> Dict[str, Tensor]: return { "hidden1": outputs.hidden1, "hidden2": outputs.hidden2, "y_hat": outputs.y_hat, "loss": outputs.loss, } EltwiseMultiplication_namedtuple = namedtuple("EltwiseMultiplication_namedtuple", ["hidden1", "hidden2", "y_hat", "loss"]) class EltwiseMultiplicationTestNetwork_namedtuple(EltwiseMultiplicationTestNetwork_Dict): def forward(self, *args, **kwargs) -> EltwiseMultiplication_namedtuple: outputs_dicts = super().forward(*args, **kwargs) return EltwiseMultiplication_namedtuple( hidden1=outputs_dicts["hidden1"], hidden2=outputs_dicts["hidden2"], y_hat=outputs_dicts["y_hat"], loss=outputs_dicts["loss"], ) @staticmethod def to_dict(outputs: EltwiseMultiplicationNamedTuple) -> Dict[str, Tensor]: return { "hidden1": outputs.hidden1, "hidden2": outputs.hidden2, "y_hat": outputs.y_hat, "loss": outputs.loss, } class EltwiseMultiplicationTestNetwork_Tuple(EltwiseMultiplicationTestNetwork_Dict): def forward(self, *args, **kwargs) -> Tuple[Tensor, Tensor, Tensor, Tensor]: outputs_dicts = super().forward(*args, **kwargs) return ( outputs_dicts["hidden1"], outputs_dicts["hidden2"], outputs_dicts["y_hat"], outputs_dicts["loss"], ) @staticmethod def to_dict(outputs: Tuple[Tensor, Tensor, Tensor, Tensor]) -> Dict[str, Tensor]: return { "hidden1": outputs[0], "hidden2": outputs[1], "y_hat": outputs[2], "loss": outputs[3], } class EltwiseMultiplicationTestNetwork_List(EltwiseMultiplicationTestNetwork_Dict): def forward(self, *args, **kwargs) -> List[Tensor]: outputs_dicts = super().forward(*args, **kwargs) return [ outputs_dicts["hidden1"], outputs_dicts["hidden2"], outputs_dicts["y_hat"], outputs_dicts["loss"], ] @staticmethod def to_dict(outputs: List[Tensor]) -> Dict[str, Tensor]: return { "hidden1": outputs[0], "hidden2": outputs[1], "y_hat": outputs[2], "loss": outputs[3], } class TestZero3ParamPartitioningBase(DistributedTest): world_size = 2 @pytest.mark.parametrize("param_persistence_threshold", [0, 10]) def test_param_persistence_threshold(self, param_persistence_threshold): self._test(param_persistence_threshold=param_persistence_threshold) @pytest.mark.parametrize("fp16_enabled", [True, False]) def test_fp16_enabled(self, fp16_enabled): self._test(fp16_enabled=fp16_enabled) @pytest.mark.parametrize("contiguous_gradients", [True, False]) def test_contiguous_gradients(self, contiguous_gradients): self._test(contiguous_gradients=contiguous_gradients) @pytest.mark.parametrize("offload_optimizer", [True, False]) def test_offload_optimizer(self, offload_optimizer): self._test(offload_optimizer=offload_optimizer) @pytest.mark.parametrize("zero_grad", [True, False]) def test_zero_grad(self, zero_grad): self._test(zero_grad=zero_grad) @pytest.mark.parametrize("prefetching", [True, False]) def test_prefetching(self, prefetching): self._test(prefetching=prefetching) @pytest.mark.parametrize("reduce_scatter", [True, False]) def test_reduce_scatter(self, reduce_scatter): self._test(reduce_scatter=reduce_scatter) @pytest.mark.parametrize("model_class", [ EltwiseMultiplicationTestNetwork_Dict, EltwiseMultiplicationTestNetwork_NamedTuple, EltwiseMultiplicationTestNetwork_namedtuple, EltwiseMultiplicationTestNetwork_Tuple, EltwiseMultiplicationTestNetwork_List ]) def test_model_class(self, model_class): self._test(model_class=model_class) def _test( self, param_persistence_threshold: int = 0, fp16_enabled: bool = False, contiguous_gradients: bool = False, offload_optimizer: bool = False, zero_grad: bool = False, prefetching: bool = False, reduce_scatter: bool = False, model_class: EltwiseMultiplicationTestNetwork_Dict = EltwiseMultiplicationTestNetwork_Dict, ) -> None: if offload_optimizer and not contiguous_gradients: return m = 3 n = 5 weights = [Parameter(torch.zeros((m, n), dtype=torch.float32)) for _ in range(3)] model = model_class(*weights) prefetch_bucket_size = sum([p.numel() for p in model.parameters(recurse=True)]) cfg = { "train_micro_batch_size_per_gpu": 1, "zero_optimization": { "stage": 3, "stage3_max_reuse_distance": 0, "stage3_param_persistence_threshold": param_persistence_threshold, "contiguous_gradients": contiguous_gradients, "stage3_prefetch_bucket_size": prefetch_bucket_size if prefetching else 0, "reduce_scatter": reduce_scatter, }, "optimizer": { "type": "Adam", "params": { "lr": 1.0 } }, "fp16": { "enabled": fp16_enabled, "loss_scale": 1.0, }, } if offload_optimizer: cfg["zero_optimization"]["offload_optimizer"] = { "device": "cpu", "pin_memory": True, } ds_engine = _ds_initialize_for_param_partitioning_testing(model, cfg) for i, weight in enumerate(weights): weight.ds_tensor.data = torch.full_like(weight.ds_tensor.data, (i + 1) * (1 + dist.get_rank())) def create_tensor(vals, dtype: torch.dtype = None) -> Tensor: return torch.as_tensor( vals, dtype=dtype or (torch.float16 if fp16_enabled else torch.float32), device=ds_engine.device, ) expected_hidden1 = create_tensor([ [1, 1, 1, 1, 1], [1, 1, 1, 2, 2], [2, 2, 2, 2, 2], ]) expected_hidden2 = create_tensor([ [2, 2, 2, 2, 2], [2, 2, 2, 8, 8], [8, 8, 8, 8, 8], ]) expected_yhat = create_tensor([[6, 6, 6, 6, 6], [6, 6, 6, 48, 48], [48, 48, 48, 48, 48]]) expected_loss = create_tensor([ [5, 5, 5, 5, 5], [5, 5, 5, 47, 47], [47, 47, 47, 47, 47], ]) for train_iter in range(3): activations = ds_engine( x=torch.ones( (m, n), dtype=torch.float16 if fp16_enabled else torch.float32, device=ds_engine.device, ), y=torch.ones( (m, n), dtype=torch.float16 if fp16_enabled else torch.float32, device=ds_engine.device, ), use_module_trace=train_iter > 0, param_prefetching=prefetching and train_iter > 0, ) # for ease in testing convert outputs to dict. activations = model_class.to_dict(activations) assert torch.allclose(activations["hidden1"], expected_hidden1) assert torch.allclose(activations["hidden2"], expected_hidden2) assert torch.allclose(activations["y_hat"], expected_yhat) assert torch.allclose(activations["loss"], expected_loss) ds_engine.backward(activations["loss"].sum()) # check the gradients grad_partitions = ds_engine.optimizer.get_fp32_grad_partitions() assert set(grad_partitions.keys()) == {0 }, f"should have one parameter group but got {len(grad_partitions)}" assert set(grad_partitions[0].keys()) == {0, 1, 2} dloss_wrt_layer1 = grad_partitions[0][0] dloss_wrt_layer2 = grad_partitions[0][1] dloss_wrt_layer3 = grad_partitions[0][2] assert dloss_wrt_layer1.dtype == torch.float assert dloss_wrt_layer2.dtype == torch.float assert dloss_wrt_layer3.dtype == torch.float # layer1 = [..., 1, 2, ...] # layer2 = [..., 2, 4, ...] # layer3 = [..., 3, 6, ...] # dloss_wrt_layer3 = hidden2 # dloss_wrt_layer2 = layer3 * hidden1 # dloss_wrt_layer1 = layer3 * layer2 * x grad_multiplier = 1 if zero_grad else (train_iter + 1) if dist.get_rank() == 0: assert torch.allclose( dloss_wrt_layer3.to(get_accelerator().device_name()), grad_multiplier * create_tensor([2] * 8, torch.float), ) assert torch.allclose( dloss_wrt_layer2.to(get_accelerator().device_name()), grad_multiplier * create_tensor([3 * 1] * 8, torch.float), ) assert torch.allclose( dloss_wrt_layer1.to(get_accelerator().device_name()), grad_multiplier * create_tensor([3 * 2 * 1] * 8, torch.float), ) elif dist.get_rank() == 1: # parameters dont split evenly across ranks so rank 1 has a zero-padded # partition assert torch.allclose( dloss_wrt_layer3.to(get_accelerator().device_name()), grad_multiplier * create_tensor(([8] * 7) + [0], torch.float), ) assert torch.allclose( dloss_wrt_layer2.to(get_accelerator().device_name()), grad_multiplier * create_tensor(([6 * 2] * 7) + [0], torch.float), ) assert torch.allclose( dloss_wrt_layer1.to(get_accelerator().device_name()), grad_multiplier * create_tensor(([6 * 4 * 1] * 7) + [0], torch.float), ) else: raise RuntimeError("test has world size of two") if zero_grad: ds_engine.optimizer.zero_grad() # TODO. add testing for this - for now we just call it to make sure it # doesn't throw ds_engine.optimizer.step() # taking an optimizer step invalidates all parameters, make sure everything # has been partitioned afterwards _assert_partition_status(ds_engine, {ZeroParamStatus.NOT_AVAILABLE}) assert not math.isclose(ds_engine.optimizer._global_grad_norm, 0.0) @pytest.mark.parametrize("init_context_manager", [True, False]) @pytest.mark.parametrize("reduce_scatter", [True, False]) class TestZero3ParamPartitioningLargeParam(DistributedTest): world_size = 4 def test(self, init_context_manager: bool, reduce_scatter: bool, param_sz: int = 8100) -> None: class LargeParamModel(Module): def __init__(self): super().__init__() self.param = Parameter(torch.zeros((param_sz, ), dtype=torch.float32)) # only do weight initialization on root rank to # make sure we are broadcasting correctly from rank 0 if dist.get_rank() == 0: partition_sz = math.ceil(self.param.numel() / dist.get_world_size()) offset = 0 for rank in range(dist.get_world_size()): with torch.no_grad(): self.param[offset:offset + partition_sz].fill_(rank) offset += partition_sz def forward(self, x: Tensor) -> Tensor: return x * self.param ds_config = { "train_micro_batch_size_per_gpu": 1, "zero_optimization": { "stage": 3, "stage3_max_reuse_distance": 0, "contiguous_gradients": True, "overlap_comm": True, "reduce_scatter": reduce_scatter, }, "optimizer": { "type": "Adam", "params": { "lr": 1.0 } }, "fp16": { "enabled": True, "loss_scale": 1.0, }, } with deepspeed.zero.Init(mem_efficient_linear=False, enabled=init_context_manager): model = LargeParamModel() ds_engine = _ds_initialize_for_param_partitioning_testing(model, ds_config) for train_iter in range(3): # test multiple iterations to cover prefetching activation: Tensor = ds_engine(torch.ones(param_sz, dtype=torch.float16, device=ds_engine.device)) partition_sz = math.ceil(param_sz / self.world_size) for rank_idx, start_idx in enumerate(range(0, param_sz, partition_sz)): activation_from_partition = activation[start_idx:start_idx + partition_sz] assert torch.allclose( activation_from_partition, torch.full_like(activation_from_partition, rank_idx), ) ds_engine.backward(activation.sum()) ds_engine.allreduce_gradients() avgd_gradients = ds_engine.optimizer.averaged_gradients assert set(avgd_gradients.keys()) == {0}, "should only have one parameter group" (weight_gradient, ) = avgd_gradients[0] expected_weight_gradient = (train_iter + 1) * torch.full_like(weight_gradient, 1) assert torch.allclose(weight_gradient, expected_weight_gradient) @pytest.mark.parametrize("init_context_manager", [True, False]) class TestZero3ParamPartitioningManyParams(DistributedTest): world_size = 2 def test(self, init_context_manager: bool, param_sz: int = 100, n_layers: int = 100) -> None: class ManyParamModel(Module): def __init__(self) -> None: super().__init__() self.modulelist = ModuleList( EltwiseMultiplicationModule(weight=Parameter(torch.empty((param_sz, ), dtype=torch.float32))) for _ in range(n_layers)) for layer_num, module in enumerate(self.modulelist): with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0): param: Parameter = module.weight partition_sz = math.ceil(param.numel() / dist.get_world_size()) offset = 0 for rank in range(dist.get_world_size()): with torch.no_grad(): param[offset:offset + partition_sz].fill_(2 * layer_num * rank) offset += partition_sz def forward(self, x: Tensor) -> Tensor: activations = [] for module in self.modulelist: x = module(x) activations.append(x) return activations ds_cfg = { "train_micro_batch_size_per_gpu": 1, "zero_optimization": { "stage": 3, "stage3_max_reuse_distance": 0, "contiguous_gradients": True, "overlap_comm": True, }, "optimizer": { "type": "Adam", "params": { "lr": 1.0 } }, "fp16": { "enabled": True, "loss_scale": 1.0, }, } with deepspeed.zero.Init(config=ds_cfg, mem_efficient_linear=False, enabled=init_context_manager): model = ManyParamModel() ds_engine = _ds_initialize_for_param_partitioning_testing(model, ds_cfg) for _ in range(3): # test multiple iterations to cover prefetching activations: List[Tensor] = ds_engine( torch.ones((param_sz, ), dtype=torch.float16, device=ds_engine.device)) assert len(activations) == n_layers partition_sz = math.ceil(param_sz / self.world_size) expected_activations = torch.empty(param_sz, dtype=torch.float16, device=ds_engine.device) for start_idx in range(0, param_sz, partition_sz): expected_activations[start_idx:start_idx + partition_sz] = dist.get_rank() for layer_num, activation in enumerate(activations): expected_activations *= 2 * layer_num assert torch.allclose(activation, expected_activations) # TODO. finish writing this test ds_engine.backward(activations[-1].sum()) avgd_gradients = ds_engine.optimizer.averaged_gradients assert set(avgd_gradients.keys()) == {0}, "should only have one parameter group" weight_gradients: List[Tensor] = avgd_gradients[0] for layer_num, activation in enumerate(weight_gradients): pass class TestZero3InitForParentWeightInitialization(DistributedTest): world_size = 4 def test(self): class ModelWhereParentInitializesChildWeights(Module): def __init__(self) -> None: super().__init__() self.linear = Linear(12, 1) self.apply(self.__init_weights) def __init_weights(self, module): if isinstance(module, Linear): with torch.no_grad(): module.weight.fill_(1 + dist.get_rank()) ds_cfg = { "train_micro_batch_size_per_gpu": 1, "zero_optimization": { "stage": 3, "stage3_max_reuse_distance": 0, "contiguous_gradients": True, "overlap_comm": True, }, "optimizer": { "type": "Adam", "params": { "lr": 1.0 } }, "fp16": { "enabled": True, "loss_scale": 1.0, }, } with deepspeed.zero.Init(config=ds_cfg, mem_efficient_linear=False, enabled=True): model = ModelWhereParentInitializesChildWeights() assert model.linear.weight.ds_tensor.numel() == math.ceil(12 / self.world_size) assert torch.allclose( model.linear.weight.ds_tensor, torch.full_like(model.linear.weight.ds_tensor, 1), ) @pytest.mark.skip("not working") @pytest.mark.parametrize("param_persistence_threshold", [0, 10]) @pytest.mark.parametrize("contiguous_gradients", [True, False]) @pytest.mark.parametrize("offload_optimizer", [True, False]) @pytest.mark.parametrize("zero_grad", [True, False]) @pytest.mark.parametrize("prefetching", [True, False]) @pytest.mark.parametrize("reduce_scatter", [True, False]) @pytest.mark.parametrize( "model_class", [ EltwiseMultiplicationTestNetwork_Dict, EltwiseMultiplicationTestNetwork_NamedTuple, EltwiseMultiplicationTestNetwork_namedtuple, EltwiseMultiplicationTestNetwork_Tuple, EltwiseMultiplicationTestNetwork_List, ], ) class TestZero3ParamPartitioningBaseBF16(DistributedTest): world_size = 2 def test( self, param_persistence_threshold: int, contiguous_gradients: bool, offload_optimizer: bool, zero_grad: bool, prefetching: bool, reduce_scatter: bool, model_class: EltwiseMultiplicationTestNetwork_Dict, ) -> None: if offload_optimizer and not contiguous_gradients: return m = 3 n = 5 weights = [Parameter(torch.zeros((m, n), dtype=torch.float32)) for _ in range(3)] model = model_class(*weights) prefetch_bucket_size = sum([p.numel() for p in model.parameters(recurse=True)]) cfg = { "train_micro_batch_size_per_gpu": 1, "zero_optimization": { "stage": 3, "stage3_max_reuse_distance": 0, "stage3_param_persistence_threshold": param_persistence_threshold, "contiguous_gradients": contiguous_gradients, "stage3_prefetch_bucket_size": prefetch_bucket_size if prefetching else 0, "reduce_scatter": reduce_scatter, }, "optimizer": { "type": "Adam", "params": { "lr": 1.0 } }, "bf16": { "enabled": True, "loss_scale": 1.0, }, } if offload_optimizer: cfg["zero_optimization"]["offload_optimizer"] = { "device": "cpu", "pin_memory": True, } ds_engine = _ds_initialize_for_param_partitioning_testing(model, cfg) for i, weight in enumerate(weights): weight.ds_tensor.data = torch.full_like(weight.ds_tensor.data, (i + 1) * (1 + dist.get_rank())) def create_tensor(vals): return torch.as_tensor(vals, dtype=torch.bfloat16, device=ds_engine.device) expected_hidden1 = create_tensor([ [1, 1, 1, 1, 1], [1, 1, 1, 2, 2], [2, 2, 2, 2, 2], ]) expected_hidden2 = create_tensor([ [2, 2, 2, 2, 2], [2, 2, 2, 8, 8], [8, 8, 8, 8, 8], ]) expected_yhat = create_tensor([[6, 6, 6, 6, 6], [6, 6, 6, 48, 48], [48, 48, 48, 48, 48]]) expected_loss = create_tensor([ [5, 5, 5, 5, 5], [5, 5, 5, 47, 47], [47, 47, 47, 47, 47], ]) for train_iter in range(3): _assert_partition_status(ds_engine, {ZeroParamStatus.NOT_AVAILABLE}) activations = ds_engine( x=torch.ones((m, n), dtype=torch.bfloat16, device=ds_engine.device), y=torch.ones((m, n), dtype=torch.bfloat16, device=ds_engine.device), use_module_trace=train_iter > 0, param_prefetching=prefetching and train_iter > 0, ) # for ease in testing convert outputs to dict. activations = model_class.to_dict(activations) assert torch.allclose(activations["hidden1"], expected_hidden1) assert torch.allclose(activations["hidden2"], expected_hidden2) assert torch.allclose(activations["y_hat"], expected_yhat) assert torch.allclose(activations["loss"], expected_loss) ds_engine.backward(activations["loss"].sum()) _assert_partition_status(ds_engine, {ZeroParamStatus.NOT_AVAILABLE}) # check the gradients grad_partitions = ds_engine.optimizer.get_fp32_grad_partitions() assert set(grad_partitions.keys()) == {0 }, f"should have one parameter group but got {len(grad_partitions)}" assert set(grad_partitions[0].keys()) == {0, 1, 2} dloss_wrt_layer1 = grad_partitions[0][0] dloss_wrt_layer2 = grad_partitions[0][1] dloss_wrt_layer3 = grad_partitions[0][2] # layer1 = [..., 1, 2, ...] # layer2 = [..., 2, 4, ...] # layer3 = [..., 3, 6, ...] # dloss_wrt_layer3 = hidden2 # dloss_wrt_layer2 = layer3 * hidden1 # dloss_wrt_layer1 = layer3 * layer2 * x expected_grad_dtype = torch.float32 if offload_optimizer else torch.bfloat16 grad_multiplier = 1 if zero_grad else (train_iter + 1) if dist.get_rank() == 0: assert torch.allclose( dloss_wrt_layer3.to(get_accelerator().device_name()), grad_multiplier * create_tensor([2] * 8).to(expected_grad_dtype), ) assert torch.allclose( dloss_wrt_layer2.to(get_accelerator().device_name()), grad_multiplier * create_tensor([3 * 1] * 8).to(expected_grad_dtype), ) assert torch.allclose( dloss_wrt_layer1.to(get_accelerator().device_name()), grad_multiplier * create_tensor([3 * 2 * 1] * 8).to(expected_grad_dtype), ) elif dist.get_rank() == 1: # parameters dont split evenly across ranks so rank 1 has a zero-padded # partition assert torch.allclose( dloss_wrt_layer3.to(get_accelerator().device_name()), grad_multiplier * create_tensor(([8] * 7) + [0]).to(expected_grad_dtype), ) assert torch.allclose( dloss_wrt_layer2.to(get_accelerator().device_name()), grad_multiplier * create_tensor(([6 * 2] * 7) + [0]).to(expected_grad_dtype), ) assert torch.allclose( dloss_wrt_layer1.to(get_accelerator().device_name()), grad_multiplier * create_tensor(([6 * 4 * 1] * 7) + [0]).to(expected_grad_dtype), ) else: raise RuntimeError("test has world size of two") if zero_grad: ds_engine.optimizer.zero_grad() # TODO. add testing for this - for now we just call it to make sure it # doesn't throw ds_engine.optimizer.step() _assert_partition_status(ds_engine, {ZeroParamStatus.NOT_AVAILABLE}) class TestZeroOffloadStage1(DistributedTest): world_size = 2 def test(self): config_dict = { "train_batch_size": 4, "gradient_accumulation_steps": 2, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 1e-4 } }, "fp16": { "enabled": True }, "zero_optimization": { "stage": 1, "offload_optimizer": { "device": "cpu" } }, } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) dist.barrier() for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() @pytest.mark.parametrize("return_type", [tuple, list, dict]) class TestZero3DictFwd(DistributedTest): world_size = 1 def test(self, return_type): config_dict = { "train_batch_size": 4, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 1e-4 } }, "fp16": { "enabled": True }, "zero_optimization": { "stage": 3 }, } hidden_dim = 10 class MyModel(torch.nn.Module): def __init__(self, hidden_dim): super(MyModel, self).__init__() self.l1 = torch.nn.Linear(hidden_dim, hidden_dim) self.cel = torch.nn.CrossEntropyLoss() def forward(self, x, y): x = self.l1(x) loss = self.cel(x, y) if return_type == dict: val = {"a": x, "loss": loss, "b": 1, "c": None} elif return_type == list: val = [x, loss] elif return_type == tuple: val = (x, loss) else: raise NotImplementedError return val with deepspeed.zero.Init(): model = MyModel(hidden_dim) model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) dist.barrier() for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) if return_type == dict: loss = loss["loss"] else: loss = loss[1] model.backward(loss) model.step() @pytest.mark.parametrize("zero_stage", [1, 2, 3]) class TestZeroAdamOptimizerStepCount(DistributedTest): world_size = 1 def test(self, zero_stage): # force all params to be partitioned by forcing threshold=0 config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, "steps_per_print": 1, "zero_optimization": { "stage": zero_stage, "stage3_param_persistence_threshold": 0, "sub_group_size": 4, }, "optimizer": { "type": "Adam", "params": { "lr": 1e-3 } }, "fp16": { "enabled": True, "initial_scale_power": 8 }, } hidden_dim = 4 model = SimpleModel(hidden_dim=hidden_dim, nlayers=12) model, optimizer, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() step_counts = [] if zero_stage == 3: for sub_group_id, _ in enumerate(optimizer.fp16_groups): fp32_param = optimizer.fp32_partitioned_groups_flat[sub_group_id] state = optimizer.optimizer.state[fp32_param] step_counts.append(state["step"]) assert all(step == step_counts[0] for step in step_counts) elif zero_stage == 1 or zero_stage == 2: for param_group in optimizer.optimizer.param_groups: for param in param_group["params"]: state = optimizer.optimizer.state[param] step_counts.append(state["step"]) assert all(step == step_counts[0] for step in step_counts) class TestZeroFrozenWeights(DistributedTest): world_size = 1 def test(self): config_dict = { "train_batch_size": 4, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 1e-4 } }, "fp16": { "enabled": True }, "zero_optimization": { "stage": 3 }, } hidden_dim = 10 class MyModel(torch.nn.Module): def __init__(self, hidden_dim): super(MyModel, self).__init__() self.l1 = torch.nn.Linear(hidden_dim, hidden_dim) self.l2 = torch.nn.Linear(hidden_dim, hidden_dim) self.act = torch.nn.ReLU() self.cel = torch.nn.CrossEntropyLoss() # freeze one fc self.l2.weight.requires_grad = False self.l2.bias.requires_grad = False def forward(self, x, y): x = self.l1(x) x = self.act(x) x = self.l2(x) loss = self.cel(x, y) val = (x, loss) return val with deepspeed.zero.Init(config_dict_or_path=config_dict): model = MyModel(hidden_dim) model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) dist.barrier() for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) loss = loss[1] model.backward(loss) model.step() @pytest.mark.parametrize("force_ds_optim", [True, False]) class TestZeroOffloadOptim(DistributedTest): world_size = 1 def test(self, force_ds_optim): config_dict = { "train_batch_size": 4, "gradient_accumulation_steps": 2, "steps_per_print": 1, "fp16": { "enabled": True }, "zero_optimization": { "stage": 1, "offload_optimizer": { "device": "cpu" } }, "zero_force_ds_cpu_optimizer": force_ds_optim, } hidden_dim = 10 model = SimpleModel(hidden_dim) optimizer = torch.optim.Adam(model.parameters()) if force_ds_optim: with pytest.raises(ZeRORuntimeException): model, _, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config=config_dict) else: model, _, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config=config_dict) @pytest.mark.parametrize("training", [True, False]) class TestZeroPartitionCache(DistributedTest): world_size = 1 def test_training_partition_cache(self, training): hidden_dim = 10 config_dict = { "train_batch_size": 2, "fp16": { "enabled": True, "initial_scale_power": 8 }, "zero_optimization": { "stage": 3, "stage3_param_persistence_threshold": hidden_dim, }, } if training: config_dict["optimizer"] = {"type": "Adam"} with deepspeed.zero.Init(config_dict_or_path=config_dict): model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, _ = deepspeed.initialize(model=model, config=config_dict) dtype = torch.half data_loader = random_dataloader( model=model, total_samples=6, hidden_dim=hidden_dim, device=model.device, dtype=dtype, ) for _, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) if training: model.backward(loss) model.step() persist_param_size = sum([p.numel() for p in model.parameters() if p.ds_persist]) assert persist_param_size >= sum([p.numel() for p in model.parameters()]) model.empty_partition_cache() assert sum([p.numel() for p in model.parameters()]) == 0
54,564
36.119048
125
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/zero/test_zero_context_return.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from types import SimpleNamespace import torch import pytest import deepspeed from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus from utils import setup_serial_env from unit.common import DistributedTest class DanglingBias(torch.nn.Linear): def forward(self, *inputs): out = super().forward(*inputs) # return the bias to trigger a dangling external param return out, self.bias class DataClass: """Just wraps data in an object. """ def __init__(self, out=None, bias=None): self.out = out self.bias = bias class DanglingBiasClass(DanglingBias): def forward(self, *inputs): out, bias = super().forward(*inputs) return DataClass(out=out, bias=bias) class DanglingAttention(torch.nn.Linear): def __init__(self, dim=16, return_obj=False): super().__init__(dim, dim) self.dim = dim self.return_obj = return_obj if return_obj: self.d_linear = DanglingBiasClass(dim, dim) else: self.d_linear = DanglingBias(dim, dim) def forward(self, input): out = super().forward(input) if self.return_obj: out_obj = self.d_linear(out) assert out_obj.bias.ds_status == ZeroParamStatus.AVAILABLE # forward the external param return out_obj.out, out_obj.bias else: out, bias = self.d_linear(out) assert hasattr(bias, 'ds_status') or hasattr(bias, 'ds_param_alias') z3_bias = bias if hasattr(bias, 'ds_status') else bias.ds_param_alias assert z3_bias.ds_status == ZeroParamStatus.AVAILABLE return out, bias class ModelContainer(torch.nn.Module): def __init__(self, dim=16, return_obj=False): super().__init__() self.dim = dim self.linear1 = torch.nn.Linear(dim, dim) self.dangler = DanglingAttention(dim, return_obj=return_obj) def forward(self, input): act1 = self.linear1(input) # bias is actually dangler.d_linear1.bias act2, bias = self.dangler(act1) return (act2 + bias).sum() class DanglingExt(torch.nn.Module): def __init__(self, dim=16): super().__init__() self.dim = dim self.container = ModelContainer(dim) def forward(self, input): out = self.container(input) # Make sure it's at the right level of the stack assert len(self._external_params) == 0 assert len(self.container._external_params) == 1 assert len(self.container.dangler._external_params) == 0 return out class ModelContainerVariableOutputType(ModelContainer): def __init__(self, dim=16, output_type=dict): super().__init__() self.output_type = output_type self.dim = dim self.linear1 = torch.nn.Linear(dim, dim) def forward(self, input): act1 = self.linear1(input) if self.output_type is dict: return {'loss': act1.sum()} if self.output_type is torch.tensor: return act1.sum() config = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True, "loss_scale": 138. }, "zero_optimization": { "stage": 3, "stage3_param_persistence_threshold": 1, } } class TestReturnParam(DistributedTest): world_size = 1 def test_ext_param_return(self): setup_serial_env() net = DanglingExt() args = SimpleNamespace(local_rank=0) engine, _, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config) for _ in range(5): input = torch.rand(net.dim).to(engine.device).half() loss = engine(input) engine.backward(loss) engine.step() @pytest.mark.skip('WIP') def test_ext_param_returnobj(self): setup_serial_env() print() net = ModelContainer(return_obj=True) args = SimpleNamespace(local_rank=0) engine, _, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config) for _ in range(5): input = torch.rand(net.dim).to(engine.device).half() loss = engine(input) assert len(net._external_params) == 1 assert len(net.dangler._external_params) == 0 engine.backward(loss) engine.step() @pytest.mark.parametrize('output_type', [torch.tensor, dict, None]) def test_stage_3_output_type(self, output_type): setup_serial_env() print() net = ModelContainerVariableOutputType(output_type=output_type) args = SimpleNamespace(local_rank=0) engine, _, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config) for _ in range(1): input = torch.rand(net.dim).to(engine.device).half() loss = engine(input) if loss is not None: if isinstance(loss, dict): loss = loss['loss'] engine.backward(loss) engine.step()
5,373
27.892473
118
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/zero/test_zero_tiled.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import copy import torch from deepspeed.runtime.zero.tiling import TiledLinear, TiledLinearReturnBias import pytest @pytest.mark.parametrize('in_splits,out_splits', [(1, 1), (2, 2), (5, 5), (32, 32)]) def test_tiled_init(in_splits, out_splits): in_f = 32 out_f = 40 base = torch.nn.Linear(in_f, out_f, bias=True) l = TiledLinear(in_f, out_f, bias=True, init_linear=copy.deepcopy(base), out_splits=out_splits, in_splits=in_splits) for out_id in range(out_splits): for in_id in range(in_splits): local_l = l.linears[out_id][in_id] assert isinstance(local_l, torch.nn.Linear) rstart = l.out_parts[out_id] rstop = l.out_parts[out_id + 1] cstart = l.in_parts[in_id] cstop = l.in_parts[in_id + 1] local_out = rstop - rstart local_in = cstop - cstart assert local_l.weight.size()[1] == local_in, f'local[{out_id}][{in_id}].size {local_l.weight.size()}' assert local_l.weight.size()[0] == local_out test = base.weight[rstart:rstop, cstart:cstop] assert local_l.weight.size() == test.size() assert torch.equal(local_l.weight.data, test.data) if in_id == in_splits - 1: assert local_l.bias is not None assert local_l.bias.size()[0] == local_out else: assert local_l.bias is None @pytest.mark.parametrize('in_splits,out_splits', [(0, 0), (33, 33)]) def test_tiled_baddim(in_splits, out_splits): dim = 32 with pytest.raises(RuntimeError): l = TiledLinear(dim, dim, out_splits=out_splits, in_splits=in_splits) @pytest.mark.skip(reason="seeing nondeterministic failures, skipping for now") @pytest.mark.parametrize('bias', [False, True]) @pytest.mark.parametrize('in_splits,out_splits', [(1, 1), (2, 2)]) @pytest.mark.parametrize('in_f,out_f', [(32, 32), (23, 29), (29, 23)]) def test_tiled_forward(in_splits, out_splits, bias, in_f, out_f): base = torch.nn.Linear(in_f, out_f, bias=bias) test = TiledLinear(in_f, out_f, bias=bias, init_linear=copy.deepcopy(base), out_splits=out_splits, in_splits=in_splits) inp = torch.rand(in_f) base_out = base(copy.deepcopy(inp)) test_out = test(copy.deepcopy(inp)) assert torch.allclose(base_out, test_out, rtol=1e-4) @pytest.mark.skip(reason="seeing nondeterministic failures, skipping for now") @pytest.mark.parametrize('bias', [False, True]) @pytest.mark.parametrize('in_splits,out_splits', [(1, 1), (2, 2)]) @pytest.mark.parametrize('in_f,out_f', [(32, 32), (23, 29), (29, 23)]) def test_tiled_backward(in_splits, out_splits, bias, in_f, out_f): base = torch.nn.Linear(in_f, out_f, bias=bias) test = TiledLinear(in_f, out_f, bias=bias, init_linear=copy.deepcopy(base), out_splits=out_splits, in_splits=in_splits) inp = torch.rand(in_f) base_out = base(copy.deepcopy(inp)) test_out = test(copy.deepcopy(inp)) assert torch.allclose(base_out, test_out, rtol=1e-4) base_out.sum().backward() test_out.sum().backward() # compare grads for row in range(out_splits): rstart = test.out_parts[row] rstop = test.out_parts[row + 1] for col in range(in_splits): cstart = test.in_parts[col] cstop = test.in_parts[col + 1] local = test.linears[row][col] base_grad = base.weight.grad[rstart:rstop, cstart:cstop] assert torch.allclose(base_grad, local.weight.grad, rtol=1e-4) if local.bias is not None: base_grad = base.bias.grad[rstart:rstop] assert torch.allclose(base_grad, local.bias.grad, rtol=1e-4) class LinearWrapper(torch.nn.Linear): """Returns its own bias to simulate Megatron-LM's behavior. Megatron-LM optionally delays the bias addition to fuse with a proceeding kernel. """ def forward(self, input): out = super().forward(input) return out, self.bias @pytest.mark.skip(reason="seeing nondeterministic failures, skipping for now") @pytest.mark.parametrize('bias', [False, True]) @pytest.mark.parametrize('in_splits,out_splits', [(1, 1), (2, 2)]) @pytest.mark.parametrize('in_f,out_f', [(32, 32), (23, 29), (29, 23)]) def test_tiled_returnbias_backward(in_splits, out_splits, bias, in_f, out_f): base = LinearWrapper(in_f, out_f, bias=bias) test = TiledLinearReturnBias(in_f, out_f, bias=bias, linear_cls=LinearWrapper, init_linear=copy.deepcopy(base), out_splits=out_splits, in_splits=in_splits) inp = torch.rand(in_f) base_out_t, base_out_b = base(copy.deepcopy(inp)) test_out_t, test_out_b = test(copy.deepcopy(inp)) assert torch.allclose(base_out_t, test_out_t, rtol=1e-4) if base_out_b is None: assert test_out_b is None base_out_b = torch.zeros_like(base_out_t) test_out_b = torch.zeros_like(test_out_t) else: assert test_out_b is not None assert torch.allclose(base_out_b, test_out_b, rtol=1e-4) (base_out_t + base_out_b).sum().backward() (test_out_t + test_out_b).sum().backward() # compare grads for row in range(out_splits): rstart = test.out_parts[row] rstop = test.out_parts[row + 1] for col in range(in_splits): cstart = test.in_parts[col] cstop = test.in_parts[col + 1] local = test.linears[row][col] base_grad = base.weight.grad[rstart:rstop, cstart:cstop] assert torch.allclose(base_grad, local.weight.grad, rtol=1e-4) if local.bias is not None: base_grad = base.bias.grad[rstart:rstop] assert torch.allclose(base_grad, local.bias.grad, rtol=1e-4)
6,364
34.758427
113
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/zero/test_zero_context.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from types import SimpleNamespace import torch import deepspeed from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus, partitioned_param_data_shape import deepspeed.comm as dist from unit.common import DistributedTest from unit.simple_model import SimpleModel from utils import setup_serial_env # Test that no sub-class or super-class is missed class ConvX(torch.nn.Conv1d): def __init__(self, *args): super().__init__(*args) # This would not be partitioned before bugfix 5ca8167 self.param_in = torch.nn.Parameter(torch.FloatTensor(5).uniform_()) def forward(self, x): return x class ConvNet(torch.nn.Module): def __init__(self): super().__init__() self.conv1 = ConvX(1, 3, 4) self.param = torch.nn.Parameter(torch.FloatTensor(5).uniform_()) def forward(self, x): return x config = { "train_batch_size": 1, "steps_per_print": 1, "optimizer": { "type": "Adam", "params": { "lr": 0.00015 } }, "fp16": { "enabled": True, "loss_scale": 138. }, "zero_optimization": { "stage": 3, "stage3_param_persistence_threshold": 1, } } class TestZeroGatheredParametersFree(DistributedTest): world_size = 1 def test(self): config_dict = {"train_batch_size": 1, "zero_optimization": {"stage": 3}} hidden_dim = 10 class MyModel(torch.nn.Module): def __init__(self, hidden_dim): super(MyModel, self).__init__() self.l1 = torch.nn.Linear(hidden_dim, hidden_dim) with deepspeed.zero.Init(config_dict_or_path=config_dict): model = MyModel(hidden_dim) with deepspeed.zero.GatheredParameters(list(model.parameters())): assert model.l1.weight.numel() != 0, "GatheredParameters should give a non-0-sized tensor" # on exit from `GatheredParameters` the gathered params should be freed and not leak memory assert model.l1.weight.numel() == 0, "outside of GatheredParameters the param should go back to be 0-sized" class TestSerialContext(DistributedTest): world_size = 1 init_distributed = False set_dist_env = False def test_subclass_param(self): setup_serial_env() with deepspeed.zero.Init(config=config): model = ConvNet() assert model.param.ds_status == ZeroParamStatus.NOT_AVAILABLE assert model.conv1.param_in.ds_status == ZeroParamStatus.NOT_AVAILABLE def test_scattered_init_dist(self): setup_serial_env() assert not dist.is_initialized() with deepspeed.zero.Init(): assert dist.is_initialized() def test_scatter_halftype(self): setup_serial_env() with deepspeed.zero.Init(): l = torch.nn.Linear(10, 10) assert l.weight.ds_tensor.dtype == torch.float16 y = torch.LongTensor([3, 3]) assert y.dtype == torch.long def test_throughput_calculation(self): setup_serial_env() train_micro_batch_size_per_gpu = 7 gradient_accumulation_steps = 6 config_dict = { "train_micro_batch_size_per_gpu": train_micro_batch_size_per_gpu, "gradient_accumulation_steps": gradient_accumulation_steps, "optimizer": { "type": "Adam", "params": { "lr": 0.001, } }, "zero_optimization": { "stage": 0 }, } args = SimpleNamespace(local_rank=0) net = SimpleModel(hidden_dim=4) engine, _, _, _ = deepspeed.initialize(args=args, config=config_dict, model=net, model_parameters=net.parameters()) assert engine.tput_timer.batch_size == train_micro_batch_size_per_gpu * gradient_accumulation_steps assert not engine.tput_timer.initialized assert not engine.tput_timer.started assert engine.tput_timer.start_step == 2 assert engine.tput_timer.start_time == 0 assert engine.tput_timer.micro_step_count == 0 assert engine.tput_timer.global_step_count == 0 assert engine.tput_timer.total_elapsed_time == 0 # calling stop() while uninitialized - has no effect engine.tput_timer.stop() assert not engine.tput_timer.initialized assert not engine.tput_timer.started assert engine.tput_timer.start_time == 0 assert engine.tput_timer.micro_step_count == 0 assert engine.tput_timer.global_step_count == 0 assert engine.tput_timer.total_elapsed_time == 0 # any call to start() (from dataloader or not) initializes the timer engine.tput_timer.start() assert engine.tput_timer.initialized assert engine.tput_timer.started assert engine.tput_timer.start_time == 0 assert engine.tput_timer.micro_step_count == 0 assert engine.tput_timer.global_step_count == 0 assert engine.tput_timer.total_elapsed_time == 0 # calling stop() after initialized - increments the local micro step counter engine.tput_timer.stop() assert engine.tput_timer.initialized assert not engine.tput_timer.started assert engine.tput_timer.start_time == 0 assert engine.tput_timer.micro_step_count == 1 assert engine.tput_timer.global_step_count == 0 assert engine.tput_timer.total_elapsed_time == 0 # calling start()/stop() to increment the step counter until start_step while engine.tput_timer.micro_step_count < (gradient_accumulation_steps * engine.tput_timer.start_step): engine.tput_timer.start() global_step = (engine.tput_timer.micro_step_count + 1) % gradient_accumulation_steps == 0 engine.tput_timer.stop(global_step=global_step) assert engine.tput_timer.global_step_count == engine.tput_timer.start_step assert engine.tput_timer.total_elapsed_time == 0 # calling start()/stop() accumulates duration during gradient accumulation while engine.tput_timer.global_step_count == engine.tput_timer.start_step: engine.tput_timer.start() current_duration = engine.tput_timer.step_elapsed_time total_duration = engine.tput_timer.total_elapsed_time global_step = (engine.tput_timer.micro_step_count + 1) % gradient_accumulation_steps == 0 engine.tput_timer.stop(global_step=global_step) duration = engine.tput_timer.end_time - engine.tput_timer.start_time # step elapsed time is reset after gradient accumulation steps assert engine.tput_timer.step_elapsed_time == ( 0 if engine.tput_timer.global_step_count != engine.tput_timer.start_step else current_duration + duration) assert engine.tput_timer.total_elapsed_time == total_duration + duration def test_ext_param_getattr(self): setup_serial_env() class ExtLinear(torch.nn.Module): def __init__(self, dim=16): super().__init__() self.dim = dim self.linear1 = torch.nn.Linear(dim, dim) self.linear2 = torch.nn.Linear(dim, dim) def forward(self, input): A = self.linear1(input) B = self.linear2(A) # external use of self.linear1.weight C = torch.nn.functional.linear(B, self.linear1.weight) return C.sum() net = ExtLinear() args = SimpleNamespace(local_rank=0) engine, optim, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config) with deepspeed.zero.GatheredParameters(net.linear1.weight): assert net.linear1.weight.numel() == net.dim**2 input = torch.rand(net.dim).to(engine.device).half() loss = engine(input) engine.backward(loss) engine.step() class TestScatterGather(DistributedTest): world_size = 2 def test(self): with deepspeed.zero.Init(): l = torch.nn.Linear(6, 3) assert l.weight.ds_status == ZeroParamStatus.NOT_AVAILABLE assert l.weight.shape == torch.Size(partitioned_param_data_shape) # Ensure there is no impact outside the context l2 = torch.nn.Linear(6, 3) assert not hasattr(l2.weight, 'ds_status') assert l2.weight.numel() == l2.in_features * l2.out_features with deepspeed.zero.GatheredParameters(l.weight): assert l.weight.ds_status == ZeroParamStatus.AVAILABLE assert l.weight.numel() == l.in_features * l.out_features class TestGatherUpdate(DistributedTest): world_size = 2 def test(self): with deepspeed.zero.Init(): l = torch.nn.Linear(4, 2) assert l.weight.ds_status == ZeroParamStatus.NOT_AVAILABLE # Gather and make a change with deepspeed.zero.GatheredParameters(l.weight, modifier_rank=1): assert l.weight.ds_status == ZeroParamStatus.AVAILABLE if dist.get_rank() == 1: with torch.no_grad(): l.weight.zero_() # should now be scattered again # Now gather again and ensure the change is global with deepspeed.zero.GatheredParameters(l.weight): # all ranks compare assert torch.equal(l.weight, torch.zeros_like(l.weight))
9,928
35.237226
115
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/zero/test_zero_nesting_init.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch from unit.common import DistributedTest import deepspeed class TestNestingInit(DistributedTest): world_size = 1 def test_nesting_init(self): ds_config = dict(train_batch_size=1, zero_optimization=dict(stage=3)) with deepspeed.zero.Init(config_dict_or_path=ds_config): with deepspeed.zero.Init(config_dict_or_path=ds_config): model = torch.nn.Linear(4, 4) # ensure that zero3 processed the parameter assert hasattr(model.weight, "ds_id") deepspeed_engine, *_ = deepspeed.initialize(model=model, config_params=ds_config) class TestShutdownInNestingInit(DistributedTest): world_size = 1 def test_shutdown_in_nesting_init(self): ds_config = dict(train_batch_size=1, zero_optimization=dict(stage=3)) with deepspeed.zero.Init(config_dict_or_path=ds_config): with deepspeed.zero.Init(config_dict_or_path=ds_config): model1 = torch.nn.Linear(4, 4) assert hasattr(model1.weight, "ds_id") deepspeed_engine1, *_ = deepspeed.initialize(model=model1, config_params=ds_config) with deepspeed.zero.Init(config_dict_or_path=ds_config): model2 = torch.nn.Linear(4, 4) # ensure that zero3 processed the parameter assert hasattr(model2.weight, "ds_id") deepspeed_engine2, *_ = deepspeed.initialize(model=model2, config_params=ds_config)
1,545
31.893617
95
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/zero/test_zero_config.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed.runtime.zero.config import DeepSpeedZeroConfig, DeepSpeedZeroOffloadParamConfig, DeepSpeedZeroOffloadOptimizerConfig def test_zero_config_deprecatedfields(): config = DeepSpeedZeroConfig(**{"cpu_offload_param": True}) assert isinstance(config.offload_param, DeepSpeedZeroOffloadParamConfig) config = DeepSpeedZeroConfig(**{"cpu_offload": True}) assert isinstance(config.offload_optimizer, DeepSpeedZeroOffloadOptimizerConfig) config = DeepSpeedZeroConfig(**{"stage3_gather_fp16_weights_on_model_save": True}) assert config.gather_16bit_weights_on_model_save == True def test_zero_config_aliasfields(): config = DeepSpeedZeroConfig(**{"stage3_prefetch_bucket_size": 12345}) assert config.prefetch_bucket_size == 12345 config = DeepSpeedZeroConfig(**{"stage3_param_persistence_threshold": 12345}) assert config.param_persistence_threshold == 12345 config = DeepSpeedZeroConfig(**{"stage3_max_reuse_distance": 12345}) assert config.max_reuse_distance == 12345 config = DeepSpeedZeroConfig(**{"stage3_gather_16bit_weights_on_model_save": True}) assert config.gather_16bit_weights_on_model_save == True def test_zero_config_overlapcomm(): for stage in [0, 1, 2]: config = DeepSpeedZeroConfig(**{"stage": stage}) assert config.overlap_comm == False config = DeepSpeedZeroConfig(**{"stage": 3}) assert config.overlap_comm == True def test_zero_config_offload_configs(): config = DeepSpeedZeroConfig() assert config.offload_param == None assert config.offload_optimizer == None config = DeepSpeedZeroConfig(**{"offload_param": None, "offload_optimizer": None}) assert config.offload_param == None assert config.offload_optimizer == None config = DeepSpeedZeroConfig(**{"offload_param": {}, "offload_optimizer": {}}) assert isinstance(config.offload_param, DeepSpeedZeroOffloadParamConfig) assert isinstance(config.offload_optimizer, DeepSpeedZeroOffloadOptimizerConfig) def test_zero_offload_optimizer_config_pipeline(): config = DeepSpeedZeroOffloadOptimizerConfig() assert config.pipeline == False config = DeepSpeedZeroOffloadOptimizerConfig(**{"pipeline_read": True, "pipeline_write": False}) assert config.pipeline == True config = DeepSpeedZeroOffloadOptimizerConfig(**{"pipeline_read": False, "pipeline_write": True}) assert config.pipeline == True config = DeepSpeedZeroOffloadOptimizerConfig(**{"pipeline_read": True, "pipeline_write": True}) assert config.pipeline == True
2,659
37.550725
131
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/utils/test_partition.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest import torch import deepspeed.comm as dist from deepspeed.runtime.utils import partition_uniform from deepspeed.runtime.utils import partition_balanced from deepspeed.runtime.utils import prefix_sum_inc from deepspeed.runtime.utils import PartitionedTensor from deepspeed.accelerator import get_accelerator from unit.common import DistributedTest class TestPartitionedTensor(DistributedTest): world_size = 4 def test(self): world = dist.get_world_size() rank = dist.get_rank() group = dist.new_group(ranks=list(range(world))) rows = world * 4 cols = 3 full = torch.rand(rows, cols).to(get_accelerator().device_name()) dist.broadcast(full, src=0, group=group) part = PartitionedTensor(full, group=group) assert len(part.local_size()) == 1 assert part.local_size()[0] * world == full.numel() reconstructed = part.full() assert torch.equal(full, reconstructed) class TestPartitionedTensorMeta(DistributedTest): world_size = 4 def test(self): world = dist.get_world_size() rank = dist.get_rank() group = dist.new_group(ranks=list(range(world))) rows = world * 7 cols = 3 full = torch.rand(rows, cols).to(get_accelerator().device_name()) dist.broadcast(full, src=0, group=group) part = PartitionedTensor(full, group=group) my_meta = PartitionedTensor.from_meta(part.to_meta(), part.local_data, group) assert torch.equal(full, my_meta.full()) def assert_valid_partition(weights, parts, P): N = len(weights) assert len(parts) == P + 1 assert parts[0] == 0 assert parts[P] == N for idx in range(P): assert parts[idx] <= parts[idx + 1] def get_partition_weights(weights, parts): """ Return the amount of weight in each partition. """ costs = [0] * (len(parts) - 1) P = len(parts) - 1 for p in range(P): start = parts[p] stop = parts[p + 1] costs[p] = sum(weights[start:stop]) return costs def test_prefix_sum(): x = [3, 4, 5] psum = prefix_sum_inc(x) assert psum == [3, 7, 12] def test_valid_partition(): N = 10 P = 1 weights = [1] * N parts = partition_balanced(weights, P) assert_valid_partition(weights, parts, P) def test_short_partition_uniform(): N = 2 P = 4 weights = [1] * N parts = partition_uniform(len(weights), P) assert_valid_partition(weights, parts, P) def test_short_partition(): N = 2 P = 4 weights = [1] * N parts = partition_balanced(weights, P) assert_valid_partition(weights, parts, P) def test_easy_balance_uniform(): weights = [1] * 8 P = 4 parts = partition_uniform(len(weights), P) assert_valid_partition(weights, parts, P) costs = get_partition_weights(weights, parts) assert all(c == 2 for c in costs) def test_easy_balance_balanced(): weights = [1] * 8 P = 4 parts = partition_balanced(weights, P) assert_valid_partition(weights, parts, P) costs = get_partition_weights(weights, parts) assert all(c == 2 for c in costs), costs def test_int_balanced(): weights = [0, 1, 2, 3, 3, 3] P = 4 parts = partition_balanced(weights, P) assert parts == [0, 3, 4, 5, 6] assert_valid_partition(weights, parts, P) costs = get_partition_weights(weights, parts) assert all(c == 3 for c in costs) def test_float_balanced(): weights = [0., 1.1, 1.9, 3., 3., 3.] P = 4 parts = partition_balanced(weights, P) assert_valid_partition(weights, parts, P) assert parts == [0, 3, 4, 5, 6] @pytest.mark.skip(reason="Variance-minimizing partitioning returns different result.") def test_float_lastheavy(): weights = [0., 1.1, 1.9, 3., 30.] P = 2 parts = partition_balanced(weights, P) assert_valid_partition(weights, parts, P) assert parts == [0, 4, 5] def test_float_midheavy(): weights = [0., 1.1, 30, 3.] P = 3 parts = partition_balanced(weights, P) assert_valid_partition(weights, parts, P) assert parts == [0, 2, 3, 4] def test_balance_bert(): # Parameters per layer for a transformer model with 24 transformers and hidden dim 1024 weights = [ 52559872, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 0, 52559872 ] P = 8 parts = partition_balanced(weights, P) assert_valid_partition(weights, parts, P)
4,766
25.932203
117
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/pipe/test_topology.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest import torch import deepspeed.comm as dist from deepspeed.runtime.pipe.topology import PipelineParallelGrid as Grid from deepspeed.runtime.pipe.topology import ProcessTopology as Topo from deepspeed.runtime.pipe.topology import _prime_factors from deepspeed.accelerator import get_accelerator from unit.common import DistributedTest def test_topology_2d(): topo = Topo(axes=['row', 'col'], dims=[2, 2]) assert topo.world_size() == 4 assert topo.get_rank(row=0, col=0) == 0 assert topo.get_rank(row=0, col=1) == 1 assert topo.get_rank(row=1, col=0) == 2 assert topo.get_rank(row=1, col=1) == 3 assert topo.get_axis_list(axis='row', idx=0) == [0, 1] assert topo.get_axis_list(axis='row', idx=1) == [2, 3] assert topo.get_axis_list(axis='col', idx=0) == [0, 2] assert topo.get_axis_list(axis='col', idx=1) == [1, 3] def test_topology_dims(): topo = Topo(axes=['a', 'b', 'c'], dims=[2, 3, 4]) assert topo.world_size() == 24 assert topo.get_dim('a') == 2 assert topo.get_dim('b') == 3 assert topo.get_dim('c') == 4 def test_topology_match(): topo = Topo(axes=['pipe', 'data', 'model'], dims=[2, 2, 2]) print(topo.filter_match(pipe=0, data=1)) assert topo.filter_match(pipe=0, data=1) == [2, 3] print([topo.get_coord(r) for r in topo.filter_match(pipe=0, data=1)]) def test_topology_rank_repr(): topo = Topo(axes=['a', 'b'], dims=[2, 2]) assert topo.get_rank_repr(rank=0) == 'a_00-b_00' assert topo.get_rank_repr(rank=1) == 'a_00-b_01' assert topo.get_rank_repr(rank=2) == 'a_01-b_00' assert topo.get_rank_repr(rank=3) == 'a_01-b_01' assert topo.get_rank_repr(rank=3, inner_sep='+') == 'a+01-b+01' assert topo.get_rank_repr(rank=3, inner_sep='🤗', outer_sep='_JEFF_') == 'a🤗01_JEFF_b🤗01' topo = Topo(axes=['pipe', 'data'], dims=[2, 2]) assert topo.get_rank_repr(rank=0) == '' assert topo.get_rank_repr(rank=1) == '' assert topo.get_rank_repr(rank=2) == '' assert topo.get_rank_repr(rank=3) == '' assert topo.get_rank_repr(rank=0, omit_axes=['pipe']) == 'data_00' assert topo.get_rank_repr(rank=1, omit_axes=['pipe']) == 'data_01' assert topo.get_rank_repr(rank=2, omit_axes=['pipe']) == 'data_00' assert topo.get_rank_repr(rank=3, omit_axes=['pipe']) == 'data_01' assert topo.get_rank_repr(rank=0, omit_axes=[]) == 'pipe_00-data_00' assert topo.get_rank_repr(rank=1, omit_axes=[]) == 'pipe_00-data_01' assert topo.get_rank_repr(rank=2, omit_axes=[]) == 'pipe_01-data_00' assert topo.get_rank_repr(rank=3, omit_axes=[]) == 'pipe_01-data_01' topo = Topo(axes=['pipe', 'data', 'model'], dims=[2, 2, 2]) assert topo.get_rank_repr(rank=0) == 'model_00' assert topo.get_rank_repr(rank=1) == 'model_01' assert topo.get_rank_repr(rank=2) == 'model_00' assert topo.get_rank_repr(rank=3) == 'model_01' assert topo.get_rank_repr(rank=4) == 'model_00' assert topo.get_rank_repr(rank=5) == 'model_01' assert topo.get_rank_repr(rank=6) == 'model_00' assert topo.get_rank_repr(rank=7) == 'model_01' def test_topology_3d(): topo = Topo(axes=['a', 'b', 'c'], dims=[2, 2, 2]) assert topo.get_rank(a=0, b=0, c=0) == 0 assert topo.get_rank(a=0, b=0, c=1) == 1 assert topo.get_rank(a=0, b=1, c=0) == 2 assert topo.get_rank(a=0, b=1, c=1) == 3 assert topo.get_rank(a=1, b=0, c=0) == 4 assert topo.get_rank(a=1, b=0, c=1) == 5 assert topo.get_rank(a=1, b=1, c=0) == 6 assert topo.get_rank(a=1, b=1, c=1) == 7 assert topo.get_axis_list('a', 0) == [0, 1, 2, 3] assert topo.get_axis_list('a', 1) == [4, 5, 6, 7] assert topo.get_axis_list('b', 0) == [0, 1, 4, 5] assert topo.get_axis_list('b', 1) == [2, 3, 6, 7] assert topo.get_axis_list('c', 0) == [0, 2, 4, 6] assert topo.get_axis_list('c', 1) == [1, 3, 5, 7] assert topo.get_coord(0) == topo.ProcessCoord(0, 0, 0) assert topo.get_coord(1) == topo.ProcessCoord(0, 0, 1) assert topo.get_coord(2) == topo.ProcessCoord(0, 1, 0) assert topo.get_coord(3) == topo.ProcessCoord(0, 1, 1) assert topo.get_coord(4) == topo.ProcessCoord(1, 0, 0) assert topo.get_coord(5) == topo.ProcessCoord(1, 0, 1) assert topo.get_coord(6) == topo.ProcessCoord(1, 1, 0) assert topo.get_coord(7) == topo.ProcessCoord(1, 1, 1) assert topo.filter_match(a=0) == [0, 1, 2, 3] assert topo.filter_match(b=1, c=1) == [3, 7] assert topo.filter_match(a=1, b=1, c=1) == [7] # Easy access method assert topo.get_coord(0).a == 0 def test_topology_comm_list(): topo = Topo(axes=['pipe', 'data', 'model'], dims=[2, 2, 2]) assert topo.get_rank(pipe=0, data=0, model=0) == 0 assert topo.get_rank(pipe=0, data=0, model=1) == 1 assert topo.get_rank(pipe=0, data=1, model=0) == 2 assert topo.get_rank(pipe=0, data=1, model=1) == 3 assert topo.get_rank(pipe=1, data=0, model=0) == 4 assert topo.get_rank(pipe=1, data=0, model=1) == 5 assert topo.get_rank(pipe=1, data=1, model=0) == 6 assert topo.get_rank(pipe=1, data=1, model=1) == 7 pipe_list = [ [0, 4], # data=0, model=0 [1, 5], # data=0, model=1 [2, 6], # data=1, model=0 [3, 7], # data=1, model=1 ] assert topo.get_axis_comm_lists('pipe') == pipe_list data_list = [ [0, 2], # pipe=0, model=0 [1, 3], # pipe=0, model=1 [4, 6], # pipe=1, model=0 [5, 7], # pipe=1, model=1 ] assert topo.get_axis_comm_lists('data') == data_list model_list = [ [0, 1], # pipe=0, data=0 [2, 3], # pipe=0, data=1 [4, 5], # pipe=1, data=0 [6, 7], # pipe=1, data=1 ] assert topo.get_axis_comm_lists('model') == model_list # Handle nonsense. We don't want to RuntimeError because it allows us to write more # generalized code for data/model/pipe parallelism assert topo.get_axis_comm_lists('jeff') == [] class TestDistributedTopology(DistributedTest): world_size = 4 def test_grid_pipe_data(self): topo = Topo(axes=['pipe', 'data'], dims=[2, 2]) grid = Grid(topology=topo) assert grid._is_grid_valid() rank = dist.get_rank() assert grid.is_first_stage == (grid.get_stage_id() == 0) assert grid.is_last_stage == (grid.get_stage_id() == grid.get_pipe_parallel_world_size() - 1) # Test collectives along the pipeline parallel process groups rank_tensor = torch.LongTensor(data=[rank]).to(get_accelerator().device_name()) dist.all_reduce(rank_tensor, group=grid.get_pipe_parallel_group()) pipe_group = grid.pp_group assert torch.all(rank_tensor == sum(pipe_group)) # Test collectives along the data parallel process groups rank_tensor = torch.LongTensor(data=[rank]).to(get_accelerator().device_name()) dist.all_reduce(rank_tensor, group=grid.get_data_parallel_group()) data_group = grid.dp_group assert torch.all(rank_tensor == sum(data_group)) def test_stage_to_global(self): topo = Topo(axes=['pipe', 'data'], dims=[2, 2]) grid = Grid(topology=topo) assert grid._is_grid_valid() assert grid.stage_to_global(stage_id=0, data=0) == 0 assert grid.stage_to_global(stage_id=0, data=1) == 1 assert grid.stage_to_global(stage_id=1, data=0) == 2 assert grid.stage_to_global(stage_id=1, data=1) == 3 me = topo.get_coord(rank=dist.get_rank()) if me.data == 0: assert grid.stage_to_global(stage_id=0) == 0 assert grid.stage_to_global(stage_id=1) == 2 else: assert grid.stage_to_global(stage_id=0) == 1 assert grid.stage_to_global(stage_id=1) == 3 def test_primes(): """ Test prime factorizations. """ def _product(ps): p = 1 for num in ps: p *= num return p with pytest.raises(ValueError): _prime_factors(0) for x in range(1, 30): primes = _prime_factors(x) assert _product(primes) == x for p in primes: assert _prime_factors(p) == [p]
8,248
35.339207
101
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/pipe/test_pipe_schedule.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest import deepspeed.runtime.pipe.schedule as schedule def _count_type(cmds, classtype): return len(list(filter(lambda c: type(c) == classtype, cmds))) def test_pipe_inference_schedule_singlestage(): sched = schedule.InferenceSchedule(micro_batches=4, stages=1, stage_id=0) assert sched.num_micro_batches == 4 full = list(iter(sched)) for idx, cmds in enumerate(full): assert len(cmds) == 2 assert type(cmds[0]) == schedule.LoadMicroBatch assert type(cmds[1]) == schedule.ForwardPass assert cmds[0].buffer_id == cmds[1].buffer_id assert len(full) == sched.num_micro_batches def test_pipe_train_schedule_singlestage(): sched = schedule.TrainSchedule(micro_batches=4, stages=1, stage_id=0) assert sched.num_micro_batches == 4 full = list(iter(sched)) for idx, cmds in enumerate(full): if (idx % 2) != 0: assert (len(cmds) == 1) or (len(cmds) == 4) assert type(cmds[0]) == schedule.BackwardPass else: assert len(cmds) == 2 assert type(cmds[0]) == schedule.LoadMicroBatch assert type(cmds[1]) == schedule.ForwardPass assert cmds[0].buffer_id == cmds[1].buffer_id assert len(full) == sched.num_micro_batches * 2 @pytest.mark.parametrize('micro_batches', [1, 3, 8, 10]) def test_pipe_inference_schedule_firststage(micro_batches, stages=3): sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=stages, stage_id=0) assert sched.num_micro_batches == micro_batches full = list(iter(sched)) for idx, cmds in enumerate(full): # Ensure we don't send an activation the first step if idx == 0: assert len(cmds) == 2 assert type(cmds[0]) == schedule.LoadMicroBatch assert type(cmds[1]) == schedule.ForwardPass assert cmds[0].buffer_id == cmds[1].buffer_id continue # the last active step is only a send if idx == sched.num_micro_batches: assert len(cmds) == 1 assert type(cmds[0]) == schedule.SendActivation continue # no work later on if idx > sched.num_micro_batches: assert len(cmds) == 0 continue # Normally we need to load/forward/send assert len(cmds) == 3 assert _count_type(cmds, schedule.LoadMicroBatch) == 1 assert _count_type(cmds, schedule.ForwardPass) == 1 assert _count_type(cmds, schedule.SendActivation) == 1 assert len(full) == micro_batches + stages - 1 @pytest.mark.parametrize('micro_batches', [1, 3, 8, 10]) def test_pipe_inference_schedule_midstage(micro_batches, stages=3): sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=stages, stage_id=1) full = list(iter(sched)) for idx, cmds in enumerate(full): if idx < sched.stage: assert len(cmds) == 0 continue if idx == sched.stage + sched.num_micro_batches: assert len(cmds) == 1 assert type(cmds[0]) == schedule.SendActivation continue if idx > sched.stage + sched.num_micro_batches: assert len(cmds) == 0 continue assert _count_type(cmds, schedule.LoadMicroBatch) == 0 assert _count_type(cmds, schedule.ForwardPass) == 1 assert _count_type(cmds, schedule.RecvActivation) == 1 if idx > sched.stage: assert _count_type(cmds, schedule.SendActivation) == 1 assert len(full) == micro_batches + stages - 1 @pytest.mark.parametrize('micro_batches', [1, 3, 8, 10]) def test_pipe_inference_schedule_laststage(micro_batches, stages=3): sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=stages, stage_id=2) full = list(iter(sched)) for idx, cmds in enumerate(full): if idx < sched.stage or idx > sched.stage + sched.num_micro_batches: assert len(cmds) == 0 continue assert _count_type(cmds, schedule.LoadMicroBatch) == 1 assert _count_type(cmds, schedule.ForwardPass) == 1 assert _count_type(cmds, schedule.RecvActivation) == 1 assert _count_type(cmds, schedule.SendActivation) == 0 assert len(full) == micro_batches + stages - 1 def test_pipe_schedule_firststage(): sched = schedule.TrainSchedule(micro_batches=8, stages=3, stage_id=0) for cmds in sched: assert all(instr.__class__ != schedule.SendGrad for instr in cmds) assert all(instr.__class__ != schedule.RecvActivation for instr in cmds) for instr in cmds: if isinstance(instr, schedule.BufferOpInstruction): assert 0 <= instr.buffer_id < sched.num_pipe_buffers() def test_pipe_schedule_laststage(): sched = schedule.TrainSchedule(stages=3, micro_batches=4, stage_id=2) assert len(list(iter(sched))) == 2 * (sched.micro_batches + sched.stages - 1) for cmds in sched: assert all(instr.__class__ != schedule.SendActivation for instr in cmds) assert all(instr.__class__ != schedule.RecvGrad for instr in cmds) def test_pipe_stagequery(): sched = schedule.TrainSchedule(stages=3, micro_batches=4, stage_id=0) assert sched.is_first_stage assert not sched.is_last_stage sched = schedule.TrainSchedule(stages=3, micro_batches=4, stage_id=1) assert not sched.is_first_stage assert not sched.is_last_stage sched = schedule.TrainSchedule(stages=3, micro_batches=4, stage_id=2) assert not sched.is_first_stage assert sched.is_last_stage
5,683
38.472222
94
py
DeepSpeed
DeepSpeed-master/tests/unit/runtime/pipe/test_pipe.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import copy import torch.nn as nn import pytest import deepspeed.comm as dist from deepspeed.runtime.pipe.topology import PipeDataParallelTopology from deepspeed.runtime.pipe.module import PipelineModule from unit.alexnet_model import AlexNetPipe, train_cifar from unit.common import DistributedTest from unit.util import skip_on_arch PipeTopo = PipeDataParallelTopology def rel_diff(A, B): return abs(A - B) / abs(A) @pytest.mark.parametrize('topo_config', [ { "num_pp": 1, "num_dp": 4 }, { "num_pp": 2, "num_dp": 2 }, { "num_pp": 4, "num_dp": 1 }, ]) class TestPipeCifar10(DistributedTest): world_size = 4 def test(self, topo_config): skip_on_arch(min_arch=7) config_dict = { "train_batch_size": 4, "grandient_accumulation_steps": 1, "steps_per_print": 20, "optimizer": { "type": "Adam", "params": { "lr": 0.001, "betas": [0.9, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "zero_optimization": { "stage": 0 }, "fp16": { "enabled": False }, "pipeline": { "seed_layers": True, "activation_checkpoint_interval": 1 } } topo = PipeTopo(**topo_config) steps = 100 # must be >=100 # Allocate model for consistent initial weights. init_net = AlexNetPipe() base_net = copy.deepcopy(init_net) base_model = PipelineModule(layers=base_net.to_layers(), num_stages=1, loss_fn=nn.CrossEntropyLoss()) # Train with just data parallelism base_losses = train_cifar(base_model, config=config_dict, num_steps=steps, fp16=config_dict['fp16']['enabled']) test_net = copy.deepcopy(init_net) test_model = PipelineModule(layers=test_net.to_layers(), topology=topo, loss_fn=nn.CrossEntropyLoss()) test_losses = train_cifar(test_model, config=config_dict, num_steps=steps, fp16=config_dict['fp16']['enabled']) abs_diffs = [l0 - l1 for l0, l1 in zip(base_losses, test_losses)] rel_diffs = [rel_diff(l0, l1) for l0, l1 in zip(base_losses, test_losses)] if dist.get_rank() == 0: print(f'abs min={min(abs_diffs)} max={max(abs_diffs)} avg={sum(abs_diffs)/len(abs_diffs)}') print(f'rel min={min(rel_diffs)} max={max(rel_diffs)} avg={sum(rel_diffs)/len(rel_diffs)}') print(f'first: base={base_losses[0]} test={test_losses[0]} abs={abs_diffs[0]} rel={rel_diffs[0]}') for lastX in [1, 10, 100]: base_avg = sum(base_losses[-lastX:]) / lastX test_avg = sum(test_losses[-lastX:]) / lastX print( f'last-{lastX}: base={base_avg} test={test_avg} abs={base_avg - test_avg} rel={rel_diff(base_avg, test_avg)}' ) lastX = 100 base = base_losses[-lastX:] base_avg = sum(base) / len(base) test = test_losses[-lastX:] test_avg = sum(test) / len(test) assert rel_diff(base_avg, test_avg) < 0.05 # Originally 0.03, but seeing instability with AMD results
3,446
31.518868
129
py
DeepSpeed
DeepSpeed-master/tests/unit/moe/test_moe_tp.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import deepspeed import pytest from unit.common import DistributedTest from unit.util import required_torch_version from deepspeed.moe.layer import MoE class MPU(): def __init__(self, tp_world_size): self.rank = deepspeed.comm.get_rank() self.world_size = deepspeed.comm.get_world_size() self.tp_world_size = tp_world_size for i in range(0, self.world_size, tp_world_size): ranks = range(i, i + tp_world_size) group = deepspeed.comm.new_group(ranks) if self.rank in ranks: self.tp_group = group for i in range(0, tp_world_size): ranks = range(i, self.world_size, tp_world_size) group = deepspeed.comm.new_group(ranks) if self.rank in ranks: self.dp_group = group def get_model_parallel_rank(self): return self.rank % self.tp_world_size def get_model_parallel_world_size(self): return self.tp_world_size def get_data_parallel_rank(self): return self.rank // self.tp_world_size def get_data_parallel_world_size(self): return self.world_size // self.tp_world_size def get_data_parallel_group(self): return self.dp_group def get_model_parallel_group(self): return self.tp_group @pytest.mark.parametrize("ep_size, tp_size", [(1, 2), (1, 4), (2, 2)]) @pytest.mark.parametrize("enable_expert_tp", [True, False]) @pytest.mark.parametrize("use_residual", [True, False]) class TestMOETensorParallel(DistributedTest): world_size = 4 def test(self, ep_size, tp_size, enable_expert_tp, use_residual): # TODO: replace this with a true parallel mlp in the future # and run convergence tests if not required_torch_version(): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") config_dict = {"train_batch_size": 8, "steps_per_print": 1, "fp16": {"enabled": True}} hidden_dim = 16 tensor_parallel_expert = torch.nn.Sequential(torch.nn.Linear(hidden_dim, 4 * hidden_dim // tp_size), torch.nn.ReLU(), torch.nn.Linear(4 * hidden_dim // tp_size, hidden_dim)) # set num experts to world size world_size = deepspeed.comm.get_world_size() model = MoE( hidden_size=hidden_dim, expert=tensor_parallel_expert, num_experts=world_size, ep_size=ep_size, use_residual=use_residual, enable_expert_tensor_parallelism=enable_expert_tp, ) optimizer = torch.optim.AdamW(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer, dist_init_required=False, mpu=MPU(tp_size)) assert model.num_local_experts == world_size // ep_size if enable_expert_tp: assert deepspeed.utils.groups._get_expert_model_parallel_world_size() == tp_size else: assert deepspeed.utils.groups._get_expert_model_parallel_world_size() == 1
3,434
35.935484
108
py
DeepSpeed
DeepSpeed-master/tests/unit/moe/test_moe.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import deepspeed import pytest from unit.common import DistributedTest from unit.simple_model import SimplePRMoEModel, SimpleMoEModel, sequence_dataloader from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer, is_moe_param from unit.util import required_torch_version @pytest.mark.parametrize("ep_size", [2, 4]) @pytest.mark.parametrize("zero_stage", [0, 1, 2]) @pytest.mark.parametrize("use_residual", [True, False]) class TestMoE(DistributedTest): world_size = 4 def test(self, ep_size, zero_stage, use_residual): if not required_torch_version(): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") config_dict = { "train_micro_batch_size_per_gpu": 1, "steps_per_print": 1, "fp16": { "enabled": True }, "zero_optimization": { "stage": zero_stage } } hidden_dim = 16 # E+D -- ep_size = 2 # E only -- ep_size = 4 model = SimpleMoEModel(hidden_dim, ep_size=ep_size, use_residual=use_residual) param_group = {'params': [p for p in model.parameters()], 'name': 'random-unique-name'} params = split_params_into_different_moe_groups_for_optimizer(param_group) optimizer = torch.optim.AdamW(params=params) model, optimizer, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer, dist_init_required=False) #dist_init_required=False -- parameterize to True/False? data_loader = sequence_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) def strict_average_tensor(tensor): process_group = optimizer.dp_process_group curr_size = 0 pg_offsets = [] for i, param, param_id in optimizer.params_in_ipg_bucket: process_group = optimizer.dp_process_group if optimizer.ipg_bucket_has_moe_params: process_group = optimizer.expert_dp_process_group[param.group_name] if is_moe_param( param) else optimizer.dp_process_group partition_ids = optimizer.param_to_partition_ids[i][param_id] # Get all partition ids + their offsets partition_offsets = [] for partition_id in partition_ids: offset = optimizer.grad_start_offset[i][partition_id][param_id] partition_offsets.append(offset) partition_offsets.sort() # Calculate rank and offsets for grad slices for idx, offset in enumerate(partition_offsets): # Calculate numel for grad slice depending on partition location if idx == len(partition_offsets) - 1: # Last partition_id uses its own offset numel = param.numel() - offset else: # Set numel to next partition's offset numel = partition_offsets[idx + 1] - offset pg_offsets.append((curr_size, process_group)) curr_size += numel def strict_narrow(dim, start, length): lo, hi = 0, len(pg_offsets) - 1 while lo < hi: mi = lo + (hi - lo) // 2 if pg_offsets[mi][0] >= start: hi = mi else: lo = mi + 1 curr_slice, reduce_process_group = lo, pg_offsets[lo][1] while curr_slice < len(pg_offsets) and start + length > pg_offsets[curr_slice][0]: assert reduce_process_group == pg_offsets[curr_slice][ 1], "reduce process_group does not match the parameter's process_group" curr_slice += 1 return orig_narrow(dim, start, length) # real call orig_narrow, tensor.narrow = tensor.narrow, strict_narrow type(optimizer).average_tensor(optimizer, tensor) # real call tensor.narrow = orig_narrow if "average_tensor" in dir(optimizer): optimizer.average_tensor = strict_average_tensor for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() @pytest.mark.parametrize("ep_size, use_residual", [(2, True), (2, False)]) class TestPRMoE(DistributedTest): world_size = 4 def test(self, ep_size, use_residual): if not required_torch_version(): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") config_dict = {"train_batch_size": 8, "steps_per_print": 1, "fp16": {"enabled": True}} hidden_dim = 16 # E+D -- ep_size = 2 # E only -- ep_size = 4 model = SimplePRMoEModel(hidden_dim, ep_size=ep_size, use_residual=use_residual) optimizer = torch.optim.AdamW(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer, dist_init_required=False) data_loader = sequence_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step()
5,900
43.368421
116
py
DeepSpeed
DeepSpeed-master/tests/unit/launcher/test_multinode_runner.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from copy import deepcopy from deepspeed.launcher import multinode_runner as mnrunner from deepspeed.launcher.runner import encode_world_info, parse_args import os import pytest @pytest.fixture def runner_info(): hosts = {'worker-0': 4, 'worker-1': 4} world_info = encode_world_info(hosts) env = deepcopy(os.environ) args = parse_args(['test_launcher.py']) return env, hosts, world_info, args def test_pdsh_runner(runner_info): env, resource_pool, world_info, args = runner_info runner = mnrunner.PDSHRunner(args, world_info) cmd, kill_cmd = runner.get_cmd(env, resource_pool) assert cmd[0] == 'pdsh' assert env['PDSH_RCMD_TYPE'] == 'ssh' def test_openmpi_runner(runner_info): env, resource_pool, world_info, args = runner_info runner = mnrunner.OpenMPIRunner(args, world_info, resource_pool) cmd = runner.get_cmd(env, resource_pool) assert cmd[0] == 'mpirun' def test_mpich_runner(runner_info): env, resource_pool, world_info, args = runner_info runner = mnrunner.MPICHRunner(args, world_info, resource_pool) cmd = runner.get_cmd(env, resource_pool) assert cmd[0] == 'mpirun' def test_slurm_runner(runner_info): env, resource_pool, world_info, args = runner_info runner = mnrunner.SlurmRunner(args, world_info, resource_pool) cmd = runner.get_cmd(env, resource_pool) assert cmd[0] == 'srun' def test_mvapich_runner(runner_info): env, resource_pool, world_info, args = runner_info runner = mnrunner.MVAPICHRunner(args, world_info, resource_pool) cmd = runner.get_cmd(env, resource_pool) assert cmd[0] == 'mpirun'
1,725
29.821429
68
py
DeepSpeed
DeepSpeed-master/tests/unit/launcher/test_ds_arguments.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import argparse import pytest import deepspeed from deepspeed.utils.numa import parse_range_list def basic_parser(): parser = argparse.ArgumentParser() parser.add_argument('--num_epochs', type=int) return parser def test_no_ds_arguments_no_ds_parser(): parser = basic_parser() args = parser.parse_args(['--num_epochs', '2']) assert args assert hasattr(args, 'num_epochs') assert args.num_epochs == 2 assert not hasattr(args, 'deepspeed') assert not hasattr(args, 'deepspeed_config') def test_no_ds_arguments(): parser = basic_parser() parser = deepspeed.add_config_arguments(parser) args = parser.parse_args(['--num_epochs', '2']) assert args assert hasattr(args, 'num_epochs') assert args.num_epochs == 2 assert hasattr(args, 'deepspeed') assert args.deepspeed == False assert hasattr(args, 'deepspeed_config') assert args.deepspeed_config == None def test_no_ds_enable_argument(): parser = basic_parser() parser = deepspeed.add_config_arguments(parser) args = parser.parse_args(['--num_epochs', '2', '--deepspeed_config', 'foo.json']) assert args assert hasattr(args, 'num_epochs') assert args.num_epochs == 2 assert hasattr(args, 'deepspeed') assert args.deepspeed == False assert hasattr(args, 'deepspeed_config') assert type(args.deepspeed_config) == str assert args.deepspeed_config == 'foo.json' def test_no_ds_config_argument(): parser = basic_parser() parser = deepspeed.add_config_arguments(parser) args = parser.parse_args(['--num_epochs', '2', '--deepspeed']) assert args assert hasattr(args, 'num_epochs') assert args.num_epochs == 2 assert hasattr(args, 'deepspeed') assert type(args.deepspeed) == bool assert args.deepspeed == True assert hasattr(args, 'deepspeed_config') assert args.deepspeed_config == None def test_no_ds_parser(): parser = basic_parser() with pytest.raises(SystemExit): args = parser.parse_args(['--num_epochs', '2', '--deepspeed']) def test_core_deepscale_arguments(): parser = basic_parser() parser = deepspeed.add_config_arguments(parser) args = parser.parse_args(['--num_epochs', '2', '--deepspeed', '--deepspeed_config', 'foo.json']) assert args assert hasattr(args, 'num_epochs') assert args.num_epochs == 2 assert hasattr(args, 'deepspeed') assert type(args.deepspeed) == bool assert args.deepspeed == True assert hasattr(args, 'deepspeed_config') assert type(args.deepspeed_config) == str assert args.deepspeed_config == 'foo.json' def test_core_binding_arguments(): core_list = parse_range_list("0,2-4,6,8-9") assert core_list == [0, 2, 3, 4, 6, 8, 9] try: # negative case for range overlapping core_list = parse_range_list("0,2-6,5-9") except ValueError as e: pass else: # invalid core list must fail assert False try: # negative case for reverse order -- case 1 core_list = parse_range_list("8,2-6") except ValueError as e: pass else: # invalid core list must fail assert False try: # negative case for reverse order -- case 2 core_list = parse_range_list("1,6-2") except ValueError as e: pass else: # invalid core list must fail assert False
3,515
25.238806
100
py
DeepSpeed
DeepSpeed-master/tests/unit/launcher/test_run.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest from deepspeed.launcher import runner as dsrun def test_parser_mutual_exclusive(): '''Ensure dsrun.parse_resource_filter() raises a ValueError when include_str and exclude_str are both provided. ''' with pytest.raises(ValueError): dsrun.parse_resource_filter({}, include_str='A', exclude_str='B') def test_parser_local(): ''' Test cases with only one node. ''' # First try no include/exclude hosts = {'worker-0': [0, 1, 2, 3]} ret = dsrun.parse_resource_filter(hosts) assert (ret == hosts) # exclude slots ret = dsrun.parse_resource_filter(hosts, exclude_str='worker-0:1') assert (ret == {'worker-0': [0, 2, 3]}) ret = dsrun.parse_resource_filter(hosts, exclude_str='worker-0:1,2') assert (ret == {'worker-0': [0, 3]}) # only use one slot ret = dsrun.parse_resource_filter(hosts, include_str='worker-0:1') assert (ret == {'worker-0': [1]}) # including slots multiple times shouldn't break things ret = dsrun.parse_resource_filter(hosts, include_str='worker-0:1,1') assert (ret == {'worker-0': [1]}) ret = dsrun.parse_resource_filter(hosts, include_str='worker-0:1@worker-0:0,1') assert (ret == {'worker-0': [0, 1]}) # including just 'worker-0' without : should still use all GPUs ret = dsrun.parse_resource_filter(hosts, include_str='worker-0') assert (ret == hosts) # excluding just 'worker-0' without : should eliminate everything ret = dsrun.parse_resource_filter(hosts, exclude_str='worker-0') assert (ret == {}) # exclude all slots manually ret = dsrun.parse_resource_filter(hosts, exclude_str='worker-0:0,1,2,3') assert (ret == {}) def test_parser_multinode(): # First try no include/exclude hosts = {'worker-0': [0, 1, 2, 3], 'worker-1': [0, 1, 2, 3]} ret = dsrun.parse_resource_filter(hosts) assert (ret == hosts) # include a node ret = dsrun.parse_resource_filter(hosts, include_str='worker-1:0,3') assert (ret == {'worker-1': [0, 3]}) # exclude a node ret = dsrun.parse_resource_filter(hosts, exclude_str='worker-1') assert (ret == {'worker-0': [0, 1, 2, 3]}) # exclude part of each node ret = dsrun.parse_resource_filter(hosts, exclude_str='worker-0:0,1@worker-1:3') assert (ret == {'worker-0': [2, 3], 'worker-1': [0, 1, 2]}) def test_parser_errors(): '''Ensure we catch errors. ''' hosts = {'worker-0': [0, 1, 2, 3], 'worker-1': [0, 1, 2, 3]} # host does not exist with pytest.raises(ValueError): dsrun.parse_resource_filter(hosts, include_str='jeff') with pytest.raises(ValueError): dsrun.parse_resource_filter(hosts, exclude_str='jeff') # slot does not exist with pytest.raises(ValueError): dsrun.parse_resource_filter(hosts, include_str='worker-1:4') with pytest.raises(ValueError): dsrun.parse_resource_filter(hosts, exclude_str='worker-1:4') # formatting with pytest.raises(ValueError): dsrun.parse_resource_filter(hosts, exclude_str='worker-1@worker-0:1@5') def test_num_plus_parser(): ''' Ensure we catch errors relating to num_nodes/num_gpus + -i/-e being mutually exclusive''' # inclusion with pytest.raises(ValueError): dsrun.main(args="--num_nodes 1 -i localhost foo.py".split()) with pytest.raises(ValueError): dsrun.main(args="--num_nodes 1 --num_gpus 1 -i localhost foo.py".split()) with pytest.raises(ValueError): dsrun.main(args="--num_gpus 1 -i localhost foo.py".split()) # exclusion with pytest.raises(ValueError): dsrun.main(args="--num_nodes 1 -e localhost foo.py".split()) with pytest.raises(ValueError): dsrun.main(args="--num_nodes 1 --num_gpus 1 -e localhost foo.py".split()) with pytest.raises(ValueError): dsrun.main(args="--num_gpus 1 -e localhost foo.py".split()) def test_hostfile_good(): # good hostfile w. empty lines and comment hostfile = """ worker-1 slots=2 worker-2 slots=2 localhost slots=1 123.23.12.10 slots=2 #worker-1 slots=3 # this is a comment """ r = dsrun._parse_hostfile(hostfile.splitlines()) assert "worker-1" in r assert "worker-2" in r assert "localhost" in r assert "123.23.12.10" in r assert r["worker-1"] == 2 assert r["worker-2"] == 2 assert r["localhost"] == 1 assert r["123.23.12.10"] == 2 assert len(r) == 4 def test_hostfiles_bad(): # duplicate host hostfile = """ worker-1 slots=2 worker-2 slots=1 worker-1 slots=1 """ with pytest.raises(ValueError): dsrun._parse_hostfile(hostfile.splitlines()) # incorrect whitespace hostfile = """ this is bad slots=1 """ with pytest.raises(ValueError): dsrun._parse_hostfile(hostfile.splitlines()) # no whitespace hostfile = """ missingslots """ with pytest.raises(ValueError): dsrun._parse_hostfile(hostfile.splitlines()) # empty hostfile = """ """ with pytest.raises(ValueError): dsrun._parse_hostfile(hostfile.splitlines()) # mix of good/bad hostfile = """ worker-1 slots=2 this is bad slots=1 worker-2 slots=4 missingslots """ with pytest.raises(ValueError): dsrun._parse_hostfile(hostfile.splitlines())
5,445
29.088398
97
py
DeepSpeed
DeepSpeed-master/tests/unit/model_parallelism/test_configurable_parallel_pp.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import torch import deepspeed import pytest import random import numpy as np import deepspeed.comm as dist from unit.common import DistributedTest, DistributedFixture from unit.megatron_model import get_megatron_version from unit.megatron_model import MockGPT2ModelPipe as GPT2ModelPipe from deepspeed.utils import RepeatingLoader from deepspeed.accelerator import get_accelerator from unit.util import required_minimum_torch_version, required_maximum_torch_version pytestmark = pytest.mark.skipif(not required_minimum_torch_version(major_version=1, minor_version=5), reason='Megatron-LM package requires Pytorch version 1.5 or above') pytestmark = pytest.mark.skipif(not required_maximum_torch_version(major_version=1, minor_version=13), reason='Megatron-LM package requires Pytorch version 1.13 or below') def get_deepspeed_model(model): ds_config_dict = { "train_micro_batch_size_per_gpu": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, } model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=ds_config_dict) return model.to(get_accelerator().device_name()) def get_topology(mp, pp, world_size): assert world_size % (pp * mp) == 0 dp = world_size // (pp * mp) from deepspeed.runtime.pipe.topology import PipeModelDataParallelTopology topo = PipeModelDataParallelTopology(num_pp=pp, num_mp=mp, num_dp=dp) return topo class ConfigurablePP(DistributedTest): @pytest.fixture(autouse=True) def reset_random(self, seed=1234): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) get_accelerator().manual_seed_all(seed) @pytest.fixture def inputs(self, bs=1, seq_len=1, hidden_size=128): hidden_states = torch.randn(bs, seq_len, hidden_size) attention_mask = torch.randint(low=0, high=2, size=(bs, seq_len), dtype=torch.bool) return (hidden_states, attention_mask) class TestConfigurablePP(ConfigurablePP): mp_size = 2 pp_size = 2 world_size = 4 # mp_size * pp_size @pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.") def test_pp_basic(self, inputs, tmpdir): # basic test case, mp_size=2, pp_size=2, verify ckpt saving/loading. args_defaults = { 'num_layers': 8, 'hidden_size': 128, 'num_attention_heads': 8, 'max_position_embeddings': 128, } mp_size = self.mp_size pp_size = self.pp_size world_size = self.world_size topo = get_topology(mp_size, pp_size, world_size) gpt2_pipe_model = GPT2ModelPipe(num_layers=8, num_stages=pp_size, mp_size=mp_size, args_others=args_defaults, topo=topo) model = get_deepspeed_model(gpt2_pipe_model) tag = 'pp_basic' state_dict = {} state_dict['checkpoint_version'] = get_megatron_version() model.save_checkpoint(tmpdir, tag=tag, client_state=state_dict) if model.is_first_stage() or model.is_last_stage(): loader = RepeatingLoader([(inputs[0], 0)]) data_iter = iter(loader) else: data_iter = None baseline = model.eval_batch(data_iter=data_iter, compute_loss=False, reduce_output=None) dist.barrier() model.load_checkpoint(tmpdir, tag=tag, load_optimizer_states=False, load_lr_scheduler_states=False) dist.barrier() test = model.eval_batch(data_iter=data_iter, compute_loss=False, reduce_output=None) if test is not None: assert len(baseline) == len(test) # Compare outputs of each microbatch for mb in range(len(baseline)): for b, t in zip(baseline[mb], test[mb]): if b.is_floating_point(): # don't compare masks assert torch.allclose( b, t, atol=1e-07), f"Baseline output {baseline} is not equal to save-then-load output {test}" # Fixture for defining the checkpoint path since all tests in # TestConfigurableResizePP will use the same tmpdir @pytest.fixture def checkpoint_tag(mp_size, pp_size, mp_resize, pp_resize): return f"{mp_size}-{pp_size}-{mp_resize}-{pp_resize}" # Base class for creating / saving model output for baseline models. This is # not meant to be used directly as a fixture to any classes class _baseline(DistributedFixture): world_size = None def run(self, inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size): assert int(os.environ["WORLD_SIZE"]) == (pp_size * mp_size), "world size does not match provided pp_size and mp_size" args_defaults = { 'num_layers': 8, 'hidden_size': 128, 'num_attention_heads': 8, 'max_position_embeddings': 128, } topo = get_topology(mp_size, pp_size, mp_size * pp_size) gpt2_pipe_model = GPT2ModelPipe(num_layers=8, num_stages=pp_size, mp_size=mp_size, args_others=args_defaults, topo=topo) model = get_deepspeed_model(gpt2_pipe_model) with torch.no_grad(): inputs = [x.to(get_accelerator().device_name()) for x in inputs] if model.is_first_stage() or model.is_last_stage(): loader = RepeatingLoader([(inputs[0], 0)]) data_iter = iter(loader) else: data_iter = None baseline = model.eval_batch(data_iter=data_iter, compute_loss=False, reduce_output=None) if baseline is not None: # baseline should be [[hidden, True]]] assert len(baseline) == 1 assert len(baseline[0]) == 1 assert torch.is_tensor(baseline[0][0]) save_path = os.path.join(class_tmpdir, f"output-{checkpoint_tag}.pt") torch.save(baseline[0][0].cpu(), save_path) state_dict = {} state_dict['checkpoint_version'] = get_megatron_version() model.save_checkpoint(class_tmpdir, tag=checkpoint_tag, client_state=state_dict) # This may look odd, but there is a limitation with DistributedFixture that # doesn't allow us to reuse a fixture with different worldsizes. This could be # implemented in conftest.py::pytest_fixture_setup and common.py::DistributedFixture class baseline_ws1(_baseline): world_size = 1 class baseline_ws2(_baseline): world_size = 2 class baseline_ws4(_baseline): world_size = 4 class TestConfigurableResizePP(ConfigurablePP): def _test(self, inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size, mp_resize, pp_resize): args_defaults = { 'num_layers': 8, 'hidden_size': 128, 'num_attention_heads': 8, 'max_position_embeddings': 128, } topo = get_topology(mp_resize, pp_resize, mp_resize * pp_resize) gpt2_pipe_model = GPT2ModelPipe(num_layers=8, num_stages=pp_resize, mp_size=mp_resize, args_others=args_defaults, topo=topo) model = get_deepspeed_model(gpt2_pipe_model) with torch.no_grad(): model.load_checkpoint(class_tmpdir, tag=checkpoint_tag, load_optimizer_states=False, load_lr_scheduler_states=False) inputs = [x.to(get_accelerator().device_name()) for x in inputs] if model.is_first_stage() or model.is_last_stage(): loader = RepeatingLoader([(inputs[0], 0)]) data_iter = iter(loader) else: data_iter = None test = model.eval_batch(data_iter=data_iter, compute_loss=False, reduce_output=None) if test is not None: # test should be [[hidden, True]]] assert len(test) == 1 assert len(test[0]) == 1 assert torch.is_tensor(test[0][0]) test = test[0][0].cpu() load_path = os.path.join(class_tmpdir, f"output-{checkpoint_tag}.pt") baseline = torch.load(load_path) assert torch.allclose( baseline, test, atol=1e-03), f"Baseline output {baseline} is not equal to save-then-load output {test}" # These tests are divided by baseline model worldsize and test model worldsize @pytest.mark.world_size(1) @pytest.mark.parametrize("mp_size, pp_size, mp_resize, pp_resize", [(1, 2, 1, 1)]) @pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.") def test_world_size_2to1(self, inputs, class_tmpdir, checkpoint_tag, baseline_ws2, mp_size, pp_size, mp_resize, pp_resize): self._test(inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size, mp_resize, pp_resize) @pytest.mark.world_size(1) @pytest.mark.parametrize("mp_size, pp_size, mp_resize, pp_resize", [(2, 2, 1, 1)]) @pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.") def test_world_size_4to1(self, inputs, class_tmpdir, checkpoint_tag, baseline_ws4, mp_size, pp_size, mp_resize, pp_resize): self._test(inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size, mp_resize, pp_resize) @pytest.mark.world_size(2) @pytest.mark.parametrize("mp_size, pp_size, mp_resize, pp_resize", [(2, 2, 2, 1)]) @pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.") def test_world_size_4to2(self, inputs, class_tmpdir, checkpoint_tag, baseline_ws4, mp_size, pp_size, mp_resize, pp_resize): self._test(inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size, mp_resize, pp_resize) @pytest.mark.world_size(4) @pytest.mark.parametrize("mp_size, pp_size, mp_resize, pp_resize", [(1, 1, 2, 2)]) @pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.") def test_world_size_1to4(self, inputs, class_tmpdir, checkpoint_tag, baseline_ws1, mp_size, pp_size, mp_resize, pp_resize): self._test(inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size, mp_resize, pp_resize) @pytest.mark.world_size(4) @pytest.mark.parametrize("mp_size, pp_size, mp_resize, pp_resize", [(1, 2, 1, 4), (2, 1, 2, 2)]) @pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.") def test_world_size_2to4(self, inputs, class_tmpdir, checkpoint_tag, baseline_ws2, mp_size, pp_size, mp_resize, pp_resize): self._test(inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size, mp_resize, pp_resize)
11,505
41.614815
115
py
DeepSpeed
DeepSpeed-master/tests/unit/model_parallelism/test_configurable_parallel_mp.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import torch import deepspeed import pytest import random import numpy as np import deepspeed.comm as dist from deepspeed.accelerator import get_accelerator from unit.common import DistributedTest, DistributedFixture from unit.megatron_model import get_gpt2_model, get_megatron_version from unit.util import required_minimum_torch_version, required_maximum_torch_version pytestmark = pytest.mark.skipif(not required_minimum_torch_version(major_version=1, minor_version=5), reason='Megatron-LM package requires Pytorch version 1.5 or above') pytestmark = pytest.mark.skipif(not required_maximum_torch_version(major_version=1, minor_version=13), reason='Megatron-LM package requires Pytorch version 1.13 or below') # TODO: integrated testing of TP and ZeRO 1/2/3 def get_deepspeed_model(model): ds_config_dict = { "train_micro_batch_size_per_gpu": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, } from megatron import mpu model, _, _, _ = deepspeed.initialize(model=model, mpu=mpu, model_parameters=model.parameters(), config=ds_config_dict) return model class ConfigurableMP(DistributedTest): @pytest.fixture(autouse=True) def reset_random(self, seed=1234): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) get_accelerator().manual_seed_all(seed) @pytest.fixture def inputs(self, bs=1, seq_len=20): input_ids = torch.randint(low=0, high=1000, size=(bs, seq_len)) position_ids = torch.randint(low=0, high=2, size=(bs, seq_len)) attention_mask = torch.randint(low=0, high=2, size=(bs, seq_len), dtype=torch.bool) return [input_ids, position_ids, attention_mask] class TestConfigurableMP(ConfigurableMP): @pytest.mark.world_size(1) @pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.") def test_gpt2_basic(self, tmpdir, inputs): args_defaults = { 'num_layers': 2, 'hidden_size': 128, 'num_attention_heads': 8, 'max_position_embeddings': 128, } model = get_gpt2_model(args_defaults) model = get_deepspeed_model(model) model.eval() device_name = get_accelerator().device_name() baseline = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name)) tag = 'mp_1' state_dict = {} state_dict['checkpoint_version'] = get_megatron_version() model.save_checkpoint(tmpdir, tag=tag, client_state=state_dict) dist.barrier() model.load_checkpoint(tmpdir, tag=tag, load_optimizer_states=False, load_lr_scheduler_states=False) test = model(inputs[0], inputs[1], inputs[2]) assert torch.allclose(baseline, test, atol=1e-07), f"Baseline output {baseline} is not equal to save-then-load output {test}" @pytest.mark.world_size(2) @pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.") def test_gpt2_mp2_no_resize(self, tmpdir, inputs): args_defaults = { 'num_layers': 2, 'hidden_size': 128, 'num_attention_heads': 8, 'max_position_embeddings': 128, } model = get_gpt2_model(args_defaults, mp_size=2) model = get_deepspeed_model(model) model.eval() device_name = get_accelerator().device_name() baseline = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name)) tag = 'mp_2' state_dict = {} state_dict['checkpoint_version'] = get_megatron_version() model.save_checkpoint(tmpdir, tag=tag, client_state=state_dict) dist.barrier() model.load_checkpoint(tmpdir, tag=tag, load_optimizer_states=False, load_lr_scheduler_states=False) device_name = get_accelerator().device_name() test = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name)) assert torch.allclose(baseline, test, rtol=1.0, atol=1e-07), f"Baseline output {baseline} is not equal to save-then-load output {test}" # This fixture provides the baseline model with mp=2 to TestConfigurableMPResize class baseline_mp2(DistributedFixture): world_size = 2 def run(self, inputs, class_tmpdir): args_defaults = { 'num_layers': 2, 'hidden_size': 128, 'num_attention_heads': 8, 'max_position_embeddings': 128, } model = get_gpt2_model(args_defaults, mp_size=self.world_size) model = get_deepspeed_model(model) model.eval() with torch.no_grad(): device_name = get_accelerator().device_name() baseline = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name)) if dist.get_rank() == 0: save_path = os.path.join(class_tmpdir, "output.pt") torch.save(baseline.cpu(), save_path) state_dict = {} state_dict['checkpoint_version'] = get_megatron_version() model.save_checkpoint(class_tmpdir, client_state=state_dict) class TestConfigurableResizeMP(ConfigurableMP): world_size = [1, 4] @pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.") def test(self, baseline_mp2, inputs, class_tmpdir): args_defaults = { 'num_layers': 2, 'hidden_size': 128, 'num_attention_heads': 8, 'max_position_embeddings': 128, } world_size = os.environ["WORLD_SIZE"] model = get_gpt2_model(args_defaults, mp_size=world_size) model = get_deepspeed_model(model) model.eval() with torch.no_grad(): model.load_checkpoint(class_tmpdir, load_optimizer_states=False, load_lr_scheduler_states=False) device_name = get_accelerator().device_name() test = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name)) if dist.get_rank() == 0: load_path = os.path.join(class_tmpdir, "output.pt") baseline = torch.load(load_path) test = test.cpu() assert torch.allclose( baseline, test, atol=1e-03), f"Baseline output {baseline} is not equal to save-then-load output {test}"
6,844
37.027778
117
py
DeepSpeed
DeepSpeed-master/tests/unit/comm/test_dist.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import torch import deepspeed.comm as dist import deepspeed from unit.common import DistributedTest, DistributedFixture, get_master_port from unit.simple_model import SimpleModel from deepspeed.accelerator import get_accelerator import pytest class TestInit(DistributedTest): world_size = 3 def test(self): assert dist.is_initialized() assert dist.get_world_size() == 3 assert dist.get_rank() < 3 # Demonstration of pytest's parameterization and fixtures @pytest.fixture(params=["hello"]) def greeting(request): return request.param @pytest.mark.parametrize("number,color", [(1138, "purple")]) class TestDistArgs(DistributedTest): world_size = 2 """ Classes that use DistributedTest class must define a test* method """ @pytest.mark.parametrize("shape", ["icosahedron"]) def test(self, number, color, shape, greeting): """Ensure that we can parse args to DistributedTest methods. """ assert dist.get_world_size() == 2 assert number == 1138 assert color == "purple" assert shape == "icosahedron" assert greeting == "hello" # Demonstration of distributed tests grouped in single class @pytest.mark.parametrize("number", [1138]) class TestGroupedDistTest(DistributedTest): world_size = 2 def test_one(self, number): assert dist.get_world_size() == 2 assert number == 1138 def test_two(self, number, color="purple"): assert dist.get_world_size() == 2 assert number == 1138 assert color == "purple" # Demonstration of world_size override class TestWorldSizeOverrideDistTest(DistributedTest): world_size = 2 def test_world_size_2(self): assert dist.get_world_size() == 2 @pytest.mark.world_size(1) def test_world_size_1(self): assert dist.get_world_size() == 1 # Demonstration of the DistributedFixture class @pytest.fixture(params=[2, 4]) def val1(request): return request.param @pytest.fixture(params=[16, 32]) def val2(request): return request.param class distributed_fixture(DistributedFixture): world_size = 2 def run(self, class_tmpdir, val1, val2): assert int(os.environ["WORLD_SIZE"]) == self.world_size local_rank = os.environ["LOCAL_RANK"] file_path = os.path.join(class_tmpdir, f"checkpoint-{local_rank}.pt") with open(file_path, "w") as f: f.write(f"{local_rank},{val1},{val2}") class TestDistributedFixture(DistributedTest): world_size = 1 def test(self, distributed_fixture, class_tmpdir, val1, val2): for rank in range(2): file_path = os.path.join(class_tmpdir, f"checkpoint-{rank}.pt") with open(file_path, "r") as f: chkpt = f.read() assert chkpt == f"{rank},{val1},{val2}" assert int(os.environ["WORLD_SIZE"]) == 1 class TestDistAllReduce(DistributedTest): device_count = get_accelerator().device_count() if device_count >= 4: world_size = [1, 2, 4] elif device_count >= 2: world_size = [1, 2] else: world_size = [1] def test(self): x = torch.ones(1, 3).to(get_accelerator().device_name()) * (dist.get_rank() + 1) sum_of_ranks = (dist.get_world_size() * (dist.get_world_size() + 1)) // 2 result = torch.ones(1, 3).to(get_accelerator().device_name()) * sum_of_ranks dist.all_reduce(x) assert torch.all(x == result) @pytest.mark.parametrize("dist_init_required", [True, False, None]) class TestDistInit(DistributedTest): init_distributed = False def test_already_init(self, dist_init_required): torch.distributed.init_process_group(get_accelerator().communication_backend_name()) deepspeed.init_distributed(get_accelerator().communication_backend_name(), dist_init_required=dist_init_required) def test_no_init(self, dist_init_required): if dist_init_required or dist_init_required is None: deepspeed.init_distributed(get_accelerator().communication_backend_name(), dist_init_required=dist_init_required) else: # torch.dist is not done and for some reason the user says they don't want it done with pytest.raises(Exception): deepspeed.init_distributed(get_accelerator().communication_backend_name(), dist_init_required=dist_init_required) class TestDistInitNoEnv(DistributedTest): world_size = 1 init_distributed = False set_dist_env = False def test(self): torch.distributed.init_process_group(backend=get_accelerator().communication_backend_name(), init_method=f"tcp://127.0.0.1:{get_master_port()}", world_size=1, rank=0) assert torch.distributed.is_initialized() deepspeed.init_distributed(get_accelerator().communication_backend_name(), auto_mpi_discovery=True) @pytest.mark.parametrize("dist_init_required", [True, False]) class TestDistInitWithModel(DistributedTest): init_distributed = False def test_already_init(self, dist_init_required): torch.distributed.init_process_group(get_accelerator().communication_backend_name()) model = SimpleModel(4) config_dict = {"train_micro_batch_size_per_gpu": 1, "optimizer": {"type": "Adam", "params": {}}} engine, *_ = deepspeed.initialize(model=model, config=config_dict, model_parameters=model.parameters(), dist_init_required=dist_init_required) def test_no_init(self, dist_init_required): model = SimpleModel(4) config_dict = {"train_micro_batch_size_per_gpu": 1, "optimizer": {"type": "Adam", "params": {}}} if dist_init_required: engine, *_ = deepspeed.initialize(model=model, config=config_dict, model_parameters=model.parameters(), dist_init_required=dist_init_required) else: # torch.dist is not done and for some reason the user says they don't want it done with pytest.raises(Exception): engine, *_ = deepspeed.initialize(model=model, config=config_dict, model_parameters=model.parameters(), dist_init_required=dist_init_required)
6,882
35.611702
107
py
DeepSpeed
DeepSpeed-master/tests/unit/monitor/test_monitor.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed.monitor.tensorboard import TensorBoardMonitor from deepspeed.monitor.wandb import WandbMonitor from deepspeed.monitor.csv_monitor import csvMonitor from deepspeed.monitor.config import DeepSpeedMonitorConfig from unit.common import DistributedTest from deepspeed.runtime.config import DeepSpeedConfig class TestTensorBoard(DistributedTest): world_size = 2 def test_tensorboard(self): config_dict = { "train_batch_size": 2, "tensorboard": { "enabled": True, "output_path": "test_output/ds_logs/", "job_name": "test" } } ds_config = DeepSpeedConfig(config_dict) tb_monitor = TensorBoardMonitor(ds_config.monitor_config.tensorboard) assert tb_monitor.enabled == True assert tb_monitor.output_path == "test_output/ds_logs/" assert tb_monitor.job_name == "test" def test_empty_tensorboard(self): config_dict = {"train_batch_size": 2, "tensorboard": {}} ds_config = DeepSpeedConfig(config_dict) tb_monitor = TensorBoardMonitor(ds_config.monitor_config.tensorboard) defaults = DeepSpeedMonitorConfig().tensorboard assert tb_monitor.enabled == defaults.enabled assert tb_monitor.output_path == defaults.output_path assert tb_monitor.job_name == defaults.job_name class TestWandB(DistributedTest): world_size = 2 def test_wandb(self): config_dict = { "train_batch_size": 2, "wandb": { "enabled": False, "group": "my_group", "team": "my_team", "project": "my_project" } } ds_config = DeepSpeedConfig(config_dict) wandb_monitor = WandbMonitor(ds_config.monitor_config.wandb) assert wandb_monitor.enabled == False assert wandb_monitor.group == "my_group" assert wandb_monitor.team == "my_team" assert wandb_monitor.project == "my_project" def test_empty_wandb(self): config_dict = {"train_batch_size": 2, "wandb": {}} ds_config = DeepSpeedConfig(config_dict) wandb_monitor = WandbMonitor(ds_config.monitor_config.wandb) defaults = DeepSpeedMonitorConfig().wandb assert wandb_monitor.enabled == defaults.enabled assert wandb_monitor.group == defaults.group assert wandb_monitor.team == defaults.team assert wandb_monitor.project == defaults.project class TestCSVMonitor(DistributedTest): world_size = 2 def test_csv_monitor(self): config_dict = { "train_batch_size": 2, "csv_monitor": { "enabled": True, "output_path": "test_output/ds_logs/", "job_name": "test" } } ds_config = DeepSpeedConfig(config_dict) csv_monitor = csvMonitor(ds_config.monitor_config.csv_monitor) assert csv_monitor.enabled == True assert csv_monitor.output_path == "test_output/ds_logs/" assert csv_monitor.job_name == "test" def test_empty_csv_monitor(self): config_dict = {"train_batch_size": 2, "csv_monitor": {}} ds_config = DeepSpeedConfig(config_dict) csv_monitor = csvMonitor(ds_config.monitor_config.csv_monitor) defaults = DeepSpeedMonitorConfig().csv_monitor assert csv_monitor.enabled == defaults.enabled assert csv_monitor.output_path == defaults.output_path assert csv_monitor.job_name == defaults.job_name
3,661
35.62
77
py
DeepSpeed
DeepSpeed-master/tests/unit/elasticity/test_elastic.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest import deepspeed from unit.common import DistributedTest from deepspeed.git_version_info import version as ds_version import os from unit.simple_model import SimpleModel @pytest.fixture def ds_config(): config_dict = { "elasticity": { "enabled": True, "max_train_batch_size": 10000, "micro_batch_sizes": [8, 12, 16, 17], "min_gpus": 32, "max_gpus": 1500, "min_time": 20, "version": 0.1 } } return config_dict def test_basic_10k(ds_config): final_batch_size, valid_gpus = deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version) for gpu_num in valid_gpus: assert final_batch_size % gpu_num == 0, f"Batch {final_batch_size} is not divisible by GPU count {gpu_num}" batch_per_gpu = final_batch_size // gpu_num found_valid_mbsize = False for mb in ds_config['elasticity']['micro_batch_sizes']: if batch_per_gpu % mb == 0: found_valid_mb = True break assert found_valid_mb, "No valid mb found" assert len(valid_gpus) == 23 assert final_batch_size == 9792 def test_old_version(ds_config): with pytest.raises(deepspeed.elasticity.config.ElasticityError): final_batch_size, valid_gpus = deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version="0.2") def test_disabled(ds_config): ds_config['elasticity']['enabled'] = False with pytest.raises(deepspeed.elasticity.config.ElasticityError): final_batch_size, valid_gpus = deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version) def test_valid_world_size(ds_config): final_batch_size, valid_gpus, mbsize = deepspeed.elasticity.compute_elastic_config( ds_config=ds_config, target_deepspeed_version=ds_version, world_size=64) assert mbsize == 17 def test_invalid_world_size(ds_config): with pytest.raises(deepspeed.elasticity.config.ElasticityIncompatibleWorldSize): final_batch_size, valid_gpus, mbsize = deepspeed.elasticity.compute_elastic_config( ds_config=ds_config, target_deepspeed_version=ds_version, world_size=128) def test_future_elastic_version(ds_config): ds_config['elasticity']['version'] = '0.3' with pytest.raises(deepspeed.elasticity.config.ElasticityError): deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version) def test_missing_max_batch(ds_config): del ds_config['elasticity']['max_train_batch_size'] with pytest.raises(deepspeed.elasticity.config.ElasticityError): deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version) def test_missing_micro_batch(ds_config): del ds_config['elasticity']['micro_batch_sizes'] with pytest.raises(deepspeed.elasticity.config.ElasticityError): deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version) def test_empty_config(): ds_config = {"elasticity": {"enabled": True}} with pytest.raises(deepspeed.elasticity.config.ElasticityError): deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version) def test_model_parallel_v1_invalid(ds_config): ds_config["elasticity"]["model_parallel_size"] = 4 ds_config["elasticity"]["num_gpus_per_node"] = 8 ds_config["elasticity"]["version"] = 0.1 with pytest.raises(deepspeed.elasticity.config.ElasticityError): deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version) def test_model_parallel_v2_invalid(ds_config): ds_config["elasticity"]["model_parallel_size"] = 16 ds_config["elasticity"]["num_gpus_per_node"] = 8 ds_config["elasticity"]["version"] = 0.2 with pytest.raises(deepspeed.elasticity.config.ElasticityError): deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version, world_size=16) def test_model_parallel_v2_valid(ds_config): ds_config["elasticity"]["model_parallel_size"] = 4 ds_config["elasticity"]["num_gpus_per_node"] = 8 ds_config["elasticity"]["version"] = 0.2 os.environ["WORLD_SIZE"] = str(16) deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version) os.environ.pop("WORLD_SIZE") @pytest.mark.parametrize('key, value', [('micro_batch_sizes', [1, 4, -1, 2, -10]), ('min_gpus', -1), ('max_gpus', -1), ('micro_batch_sizes', 5), ('micro_batch_sizes', ['a', None, 0.5]), ('micro_batch_sizes', [2, 0.5, 4])]) def test_invalid_config_values(key, value, ds_config): ds_config['elasticity'][key] = value with pytest.raises(deepspeed.elasticity.config.ElasticityError): deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version) def test_proper_mbsz(ds_config): ds_config["elasticity"]["max_train_batch_size"] = 32 ds_config["elasticity"]["micro_batch_sizes"] = [1, 2, 3, 7] ds_config["elasticity"]["min_gpus"] = 1 final_batch_size, valid_gpus, mbsize = deepspeed.elasticity.compute_elastic_config( ds_config=ds_config, target_deepspeed_version=ds_version, world_size=7) assert mbsize == 3 class TestNonElasticBatchParams(DistributedTest): world_size = 2 def test(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0, "elasticity": { "enabled": True, "max_train_batch_size": 4, "micro_batch_sizes": [1, 2, 3, 4], "min_gpus": 1, "max_gpus": 4, "min_time": 20, "version": 0.1 } } hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) with pytest.raises(deepspeed.elasticity.config.ElasticityError): model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) class TestNonElasticBatchParamsWithOverride(DistributedTest): world_size = 2 def test(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0, "elasticity": { "enabled": True, "max_train_batch_size": 4, "micro_batch_sizes": [1, 2, 3, 4], "min_gpus": 1, "max_gpus": 4, "min_time": 20, "version": 0.1, "ignore_non_elastic_batch_info": True } } hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) class TestElasticConfigChanged(DistributedTest): world_size = 2 def test(self): config_dict = { "train_batch_size": 2, "steps_per_print": 1, "optimizer": { "type": "Lamb", "params": { "lr": 0.00015 } }, "gradient_clipping": 1.0, "elasticity": { "enabled": True, "max_train_batch_size": 4, "micro_batch_sizes": [1, 2, 3, 4], "min_gpus": 1, "max_gpus": 4, "min_time": 20, "version": 0.1, "ignore_non_elastic_batch_info": True } } import json, os scheduler_elastic_config = config_dict.copy() scheduler_elastic_config["elasticity"]["max_train_batch_size"] = 27 os.environ['DEEPSPEED_ELASTICITY_CONFIG'] = json.dumps(scheduler_elastic_config) hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) with pytest.raises(deepspeed.elasticity.config.ElasticityError): model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
9,109
36.336066
119
py
DeepSpeed
DeepSpeed-master/tests/unit/utils/test_get_optim_files.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import pytest from deepspeed.utils.zero_to_fp32 import get_optim_files @pytest.mark.parametrize('num_checkpoints', [1, 2, 12, 24]) def test_get_optim_files(tmpdir, num_checkpoints): saved_files = [] for i in range(num_checkpoints): file_name = "zero_" + str(i) + "_optim_states.pt" path_name = os.path.join(tmpdir, file_name) saved_files.append(path_name) with open(path_name, "w") as f: f.write(file_name) loaded_files = get_optim_files(tmpdir) for lf, sf in zip(loaded_files, saved_files): assert lf == sf
684
28.782609
59
py
DeepSpeed
DeepSpeed-master/tests/unit/utils/test_groups.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed.utils.groups import _get_expert_parallel_ranks def test_get_expert_parallel_ranks(): """ Example - E + M + D parallel world_size = 16 model_degree = 2 expert_degree = 4 # number of experts in same group mp_group = [0, 1], [2,3], [4,5] ... data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15] expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15] expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] """ expert_parallel_groups, expert_data_parallel_groups = _get_expert_parallel_ranks(world_size=16, model_parallel_size_=2, expert_parallel_size_=4) assert expert_parallel_groups == [ [0, 2, 4, 6], [8, 10, 12, 14], [1, 3, 5, 7], [9, 11, 13, 15], ] assert expert_data_parallel_groups == [ [0, 8], [2, 10], [4, 12], [6, 14], [1, 9], [3, 11], [5, 13], [7, 15], ]
1,290
32.102564
109
py
DeepSpeed
DeepSpeed-master/tests/unit/utils/test_init_on_device.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import pytest from unit.simple_model import SimpleModel from deepspeed import OnDevice from packaging import version as pkg_version from deepspeed.accelerator import get_accelerator from unit.common import DistributedTest @pytest.mark.parametrize('device', ['meta', get_accelerator().device_name(0)]) class TestOnDevice(DistributedTest): world_size = 1 def test_on_device(self, device): if device == "meta" and pkg_version.parse(torch.__version__) < pkg_version.parse("1.10"): pytest.skip("meta tensors only became stable after torch 1.10") with OnDevice(dtype=torch.half, device=device): model = SimpleModel(4) for p in model.parameters(): assert p.device == torch.device(device) assert p.dtype == torch.half
904
30.206897
97
py
DeepSpeed
DeepSpeed-master/tests/unit/inference/test_inference_config.py
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pytest import torch import deepspeed from unit.common import DistributedTest from unit.simple_model import create_config_from_dict @pytest.mark.inference class TestInferenceConfig(DistributedTest): world_size = 1 def test_overlap_kwargs(self): config = {"replace_with_kernel_inject": True} kwargs = {"replace_with_kernel_inject": True} engine = deepspeed.init_inference(torch.nn.Module(), config=config, **kwargs) assert engine._config.replace_with_kernel_inject def test_overlap_kwargs_conflict(self): config = {"replace_with_kernel_inject": True} kwargs = {"replace_with_kernel_inject": False} with pytest.raises(ValueError): engine = deepspeed.init_inference(torch.nn.Module(), config=config, **kwargs) def test_kwargs_and_config(self): config = {"replace_with_kernel_inject": True} kwargs = {"dtype": torch.float32} engine = deepspeed.init_inference(torch.nn.Module(), config=config, **kwargs) assert engine._config.replace_with_kernel_inject assert engine._config.dtype == kwargs["dtype"] def test_json_config(self, tmpdir): config = {"replace_with_kernel_inject": True} config_json = create_config_from_dict(tmpdir, config) engine = deepspeed.init_inference(torch.nn.Module(), config=config_json) assert engine._config.replace_with_kernel_inject
1,525
32.911111
89
py