repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/zero/partition_parameters.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import os
import types
from typing import Callable, Iterable
from enum import Enum
import functools
import itertools
from typing import List
import logging
import torch
from torch import Tensor
from deepspeed import comm as dist
from torch.nn import Module
from torch.nn import Parameter
from .linear import zero3_linear_wrap
from deepspeed.utils import groups
import deepspeed
from ..utils import get_only_unique_item, see_memory_usage
from deepspeed.runtime.zero.config import DeepSpeedZeroConfig
from deepspeed.runtime.zero.utils import assert_ints_same_as_other_ranks
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.config_utils import get_config_default
from deepspeed.utils import instrument_w_nvtx, logger
from deepspeed.comm.comm import init_distributed
from deepspeed.utils.debug import (debug_param2name_id_shape, debug_param2name_id_shape_device, debug_module2name,
debug_param2name_id, debug_param2name_id_shape_status)
from deepspeed.accelerator import get_accelerator
from ..swap_tensor.partitioned_param_swapper import AsyncPartitionedParameterSwapper, PartitionedParamStatus
param_count = 0
partitioned_param_data_shape = [0]
zero_init_context = 0
top_level_context = None
class NoGatherHandle:
def __init__(self, param: Parameter) -> None:
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"expected param {param.ds_summary()} to be available")
param.data = param.ds_tensor.data.to(device=get_accelerator().current_device_name(),
non_blocking=True).view(param.ds_shape)
self.__param = param
def wait(self) -> None:
get_accelerator().current_stream().synchronize()
self.__param.ds_status = ZeroParamStatus.AVAILABLE
class NoGatherCoalescedHandle:
def __init__(self, params: List[Parameter]) -> None:
self.__params = params
self.__complete = False
for param in self.__params:
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"expected param {param.ds_summary()} to not be available")
param.data = param.ds_tensor.data.to(device=get_accelerator().current_device_name(),
non_blocking=True).view(param.ds_shape)
@instrument_w_nvtx
def wait(self) -> None:
if self.__complete:
return
get_accelerator().current_stream().synchronize()
for param in self.__params:
assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight"
param.ds_status = ZeroParamStatus.AVAILABLE
self.__complete = True
def _dist_allgather_fn(input_tensor: Tensor, output_tensor: Tensor, group=None):
return instrument_w_nvtx(dist.allgather_fn)(output_tensor, input_tensor, group=group, async_op=True)
def print_rank_0(message, debug=False, force=False):
rank = dist.get_rank()
if rank == 0 and (debug or force):
print(message)
# other variations
# - print for all ranks w/o interleaving
# printflock(f"[{rank}] {message}")
# - print to log file per rank
# log_rank_file(rank, message)
def debug_rank0(msg: str) -> None:
if dist.get_rank() == 0:
logger.debug(msg)
def is_zero_param(parameter):
if not torch.is_tensor(parameter):
return False
return hasattr(parameter, 'ds_id')
def _init_external_params(module):
if not hasattr(module, '_external_params'):
module._external_params = {}
def external_parameters(self):
return self._external_params.items()
def all_parameters(self):
return itertools.chain(self.named_parameters(self, recurse=False), external_parameters(self))
module.ds_external_parameters = types.MethodType(external_parameters, module)
module.all_parameters = types.MethodType(all_parameters, module)
def register_external_parameter(module, parameter):
"""Instruct DeepSpeed to coordinate ``parameter``'s collection and partitioning in
the forward and backward passes of ``module``.
This is used when a parameter is accessed outside of its owning module's
``forward()``. DeepSpeed must know to collect it from its partitioned
state and when to release the memory.
.. note::
This is only applicable to training with ZeRO stage 3.
Args:
module (``torch.nn.Module``): The module that requires ``parameter`` in its forward pass.
parameter (``torch.nn.Parameter``): The parameter to register.
Raises:
RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``.
Examples
========
#. Register a weight that is used in another module's forward pass (line 6).
Parameter ``layer1.weight`` is used by ``layer2`` (line 11).
.. code-block:: python
:linenos:
:emphasize-lines: 6,11
class ModuleZ3(torch.nn.Module):
def __init__(self, *args):
super().__init__(self, *args)
self.layer1 = SomeLayer()
self.layer2 = OtherLayer()
deepspeed.zero.register_external_parameter(self, self.layer1.weight)
def forward(self, input):
x = self.layer1(input)
# self.layer1.weight is required by self.layer2.forward
y = self.layer2(x, self.layer1.weight)
return y
"""
if not isinstance(parameter, torch.nn.Parameter):
raise RuntimeError('Parameter is not a torch.nn.Parameter')
if not hasattr(module, '_external_params'):
_init_external_params(module)
key = id(parameter)
module._external_params[key] = parameter
def unregister_external_parameter(module, parameter):
"""Reverses the effects of :meth:`register_external_parameter`.
Args:
module (``torch.nn.Module``): The module to affect.
parameter (``torch.nn.Parameter``): The parameter to unregister.
Raises:
RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``.
RuntimeError: If ``parameter`` is not a registered external parameter of ``module``.
"""
if not isinstance(parameter, torch.nn.Parameter):
raise RuntimeError('Parameter is not a torch.nn.Parameter')
if not hasattr(module, '_external_params') or id(parameter) not in module._external_params:
raise RuntimeError('Parameter is not a registered external parameter of module.')
key = id(parameter)
del module._external_params[key]
class ZeroParamType(Enum):
# same as regular pytorch parameters
NORMAL = 1
# parameters are partitioned across data parallel process
PARTITIONED = 2
# the parameter is held with a unique process rank
# and is not available on all other process
REMOTE = 3
class ZeroParamStatus(Enum):
# parameters are fully present and ready for use on all processes
AVAILABLE = 1
# parameters are either partitioned or remote in some or all process
NOT_AVAILABLE = 2
# parameters are being gathered.
INFLIGHT = 3
_orig_torch_empty = torch.empty
_orig_torch_zeros = torch.zeros
_orig_torch_ones = torch.ones
_orig_torch_full = torch.full
_orig_torch_arange = torch.arange
_orig_torch_eye = torch.eye
def zero_wrapper_for_fp_tensor_constructor(fn: Callable, target_fp_dtype: torch.dtype) -> Callable:
def wrapped_fn(*args, **kwargs) -> Tensor:
if kwargs.get("device", None) is None:
kwargs['device'] = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"]))
tensor: Tensor = fn(*args, **kwargs)
if tensor.is_floating_point():
tensor = tensor.to(target_fp_dtype)
return tensor
return wrapped_fn
def get_new_tensor_fn_for_dtype(dtype: torch.dtype) -> Callable:
def new_tensor(cls, *args) -> Tensor:
device = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"]))
tensor = _orig_torch_empty(0, device=device).new_empty(*args)
if tensor.is_floating_point():
tensor = tensor.to(dtype)
return tensor
return new_tensor
# https://stackoverflow.com/a/63851681/9201239
def get_all_subclasses(cls):
subclass_list = []
def recurse(cl):
for subclass in cl.__subclasses__():
subclass_list.append(subclass)
recurse(subclass)
recurse(cls)
return set(subclass_list)
@instrument_w_nvtx
def free_param(param: Parameter) -> None:
"""Free underlying storage of a parameter."""
assert not param.ds_active_sub_modules, param.ds_summary()
if get_accelerator().on_accelerator(param.data):
# need to make sure that we don't free the parameter while it is still
# being used for computation
param.data.record_stream(get_accelerator().current_stream())
# param.data doesn't store anything meaningful in partitioned state
param.data = torch.empty(0, dtype=param.dtype, device=param.device)
param.ds_status = ZeroParamStatus.NOT_AVAILABLE
reuse_buffers = False
temp_contiguous_tensor = None
empty_buffers = {}
# Inserts _post_init_method at the end of init method
# for all sub classes of torch.nn.Module
class InsertPostInitMethodToModuleSubClasses(object):
def __init__(self, enabled=True, mem_efficient_linear=True, ds_config=None, dtype=None):
self.mem_efficient_linear = mem_efficient_linear
self.enabled = enabled
self._set_dtype(ds_config, dtype)
assert self.dtype in [
torch.half, torch.bfloat16, torch.float
], f"Invalid data type {self.dtype}, allowed values are [torch.half, torch.bfloat16, torch.float]"
self.wrapped_cls = set()
def __enter__(self):
if not self.enabled:
return
global zero_init_context
if zero_init_context == 0:
self.patch_init_and_builtins()
global top_level_context
top_level_context = self
zero_init_context += 1
def __exit__(self, exc_type, exc_value, traceback):
if not self.enabled:
return
global zero_init_context
zero_init_context -= 1
# Exiting the top level context
if zero_init_context == 0:
self.unpatch_init_and_builtins()
global top_level_context
top_level_context = None
if dist.get_rank() == 0:
logger.info("finished initializing model with %.2fB parameters", param_count / 1e9)
# Now that we cleaned up the metaclass injection, raise the exception.
if exc_type is not None:
return False
# To be implemented by inheriting classes
def _post_init_method(self, module):
pass
def _set_dtype(self, ds_config, dtype):
if ds_config is not None and dtype is None:
if ds_config.bfloat16_enabled and ds_config.fp16_enabled:
raise RuntimeError("bfloat16 and fp16 cannot be enabled at once")
if ds_config.bfloat16_enabled:
self.dtype = torch.bfloat16
elif ds_config.fp16_enabled:
self.dtype = torch.half
else:
self.dtype = torch.float
else:
self.dtype = dtype or torch.half
def patch_init_and_builtins(self):
def apply_with_gather(orig_module_apply_fn: Callable) -> Callable:
"""many models make use of child modules like Linear or Embedding which
perform their own weight initialization in their __init__ methods,
but will then have more weight initialization in a parent module's __init__
method that modifies weights of child modules, which is typically done
using the Module.apply method.
since the Init context manager partitions child modules immediately after
they are initialized, without modifying apply we would entirely skip
any initialization done by parent modules.
to get around this issue, we wrap the function passed to Module.apply
so that the applied function is applied to child modules correctly.
"""
def get_wrapped_fn_to_apply(fn_to_apply: Callable) -> Callable:
if hasattr(fn_to_apply, "wrapped"):
return fn_to_apply
@functools.wraps(fn_to_apply)
def wrapped_fn_to_apply(module_to_apply_fn_to: Module) -> None:
"""gathers parameters before calling apply function. afterwards
parameters are broadcasted to ensure consistency across all ranks
then re-partitioned.
takes the following steps:
1. allgathers parameters for the current module being worked on
2. calls the original function
3. broadcasts root rank's parameters to the other ranks
4. re-partitions the parameters
"""
if not all(is_zero_param(p) for p in module_to_apply_fn_to.parameters(recurse=False)):
raise RuntimeError(f"not all parameters for {module_to_apply_fn_to.__class__.__name__}, "
f"were zero params, is it possible that the parameters were "
f"overwritten after they were initialized? "
f"params: {[p for p in module_to_apply_fn_to.parameters(recurse=False)]} ")
params_to_apply_fn_to: Iterable[Parameter] = list(
sorted(module_to_apply_fn_to.parameters(recurse=False), key=lambda p: p.ds_id))
for param in params_to_apply_fn_to:
param.all_gather()
fn_to_apply(module_to_apply_fn_to)
for param in params_to_apply_fn_to:
dist.broadcast(param.data, 0, group=param.ds_process_group)
for param in params_to_apply_fn_to:
param.partition(has_been_updated=True)
wrapped_fn_to_apply.wrapped = True
return wrapped_fn_to_apply
@functools.wraps(orig_module_apply_fn)
def wrapped_apply(module: Module, fn_to_apply: Callable) -> None:
orig_module_apply_fn(module, get_wrapped_fn_to_apply(fn_to_apply))
return wrapped_apply
def partition_after(f):
@functools.wraps(f)
def wrapper(module, *args, **kwargs):
# important logic: We want to run post_init only after child's __init__ is
# completed, and do nothing after __init__ of any of its parents and grandparents in
# the inheritance ancestry. This way the partitioning will need to happen only once
# when the whole object is ready to be partitioned and not before. This is because
# often the child module will need to tweak the weights - for example running a
# custom weights init function. So if a parent created the weights param, the child
# won't need to gather it in order to tweak it
print_rank_0(f'Before initializing {module.__class__.__name__}', force=False)
is_child_module = False
if not hasattr(module, "_ds_child_entered"):
# child's __init__ was called, since parents all see the same object they can now skip post_init
is_child_module = True
setattr(module, "_ds_child_entered", True)
f(module, *args, **kwargs)
if is_child_module:
# child's __init__ is done, now we can run a single post_init on the child object
delattr(module, "_ds_child_entered")
print_rank_0(f'Running post_init for {module.__class__.__name__}', force=False)
self._post_init_method(module)
print_rank_0(f'After initializing followed by post init for {module.__class__.__name__}', force=False)
return wrapper
def _enable_class(cls):
cls._old_init = cls.__init__
cls.__init__ = partition_after(cls.__init__)
def _init_subclass(cls, **kwargs):
cls._old_init = cls.__init__
cls.__init__ = partition_after(cls.__init__)
# Replace .__init__() for all existing subclasses of torch.nn.Module recursively
for subclass in get_all_subclasses(torch.nn.modules.module.Module):
_enable_class(subclass)
# holding onto some methods so we can put them back the way they were in __exit__
torch.nn.modules.module.Module._old_init_subclass = torch.nn.modules.module.Module.__init_subclass__
torch.nn.modules.module.Module._old_apply = torch.nn.modules.module.Module.apply
torch.Tensor.__old_new__ = torch.Tensor.__new__
# Replace .__init__() for future subclasses of torch.nn.Module
torch.nn.modules.module.Module.__init_subclass__ = classmethod(_init_subclass)
torch.nn.modules.module.Module.apply = apply_with_gather(torch.nn.modules.module.Module._old_apply)
self._add_tensor_creation_wrappers()
if self.mem_efficient_linear:
print_rank_0(
"nn.functional.linear has been overridden with a more memory efficient version. This will persist unless manually reset.",
force=False)
self.linear_bk = torch.nn.functional.linear
torch.nn.functional.linear = zero3_linear_wrap
self.patched = True
def unpatch_init_and_builtins(self):
if self.patched:
def _disable_class(cls):
cls.__init__ = cls._old_init
for subclass in get_all_subclasses(torch.nn.modules.module.Module):
_disable_class(subclass)
# putting methods back the way we found them
torch.nn.modules.module.Module.__init_subclass__ = torch.nn.modules.module.Module._old_init_subclass
torch.nn.modules.module.Module.apply = torch.nn.modules.module.Module._old_apply
self._remove_tensor_creation_wrappers()
self.patched = False
def _add_tensor_creation_wrappers(self):
torch.Tensor.__new__ = get_new_tensor_fn_for_dtype(self.dtype)
torch.empty = zero_wrapper_for_fp_tensor_constructor(_orig_torch_empty, self.dtype)
torch.zeros = zero_wrapper_for_fp_tensor_constructor(_orig_torch_zeros, self.dtype)
torch.ones = zero_wrapper_for_fp_tensor_constructor(_orig_torch_ones, self.dtype)
torch.full = zero_wrapper_for_fp_tensor_constructor(_orig_torch_full, self.dtype)
torch.arange = zero_wrapper_for_fp_tensor_constructor(_orig_torch_arange, self.dtype)
torch.eye = zero_wrapper_for_fp_tensor_constructor(_orig_torch_eye, self.dtype)
def _remove_tensor_creation_wrappers(self):
torch.Tensor.__new__ = torch.Tensor.__old_new__
torch.empty = _orig_torch_empty
torch.zeros = _orig_torch_zeros
torch.ones = _orig_torch_ones
torch.full = _orig_torch_full
torch.arange = _orig_torch_arange
torch.eye = _orig_torch_eye
def shutdown_init_context():
"""
This function is used to initialize deepspeed engine inside the context of Init.
We need to remove the wrappers but keep the context.
"""
if top_level_context:
top_level_context.unpatch_init_and_builtins()
def restore_init_context():
"""
This function is used to restore the wrappers after deepspeed engine is initialized.
"""
if top_level_context:
top_level_context.patch_init_and_builtins()
class AllGatherHandle:
def __init__(self, handle, param: Parameter, quantization=None) -> None:
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"expected param {param.ds_summary()} to be available")
self.__handle = handle
self.__param = param
self.__quantization = quantization
def wait(self) -> None:
instrument_w_nvtx(self.__handle.wait)()
if self.__quantization:
instrument_w_nvtx(self.__quantization.quant_handle.wait)()
self.__param.data = self.__quantization.backend.dequantize(
self.__quantization.quantized_param, self.__quantization.scale_buffer).to(self.__param.device)
self.__param.ds_status = ZeroParamStatus.AVAILABLE
class AllGatherCoalescedHandle:
def __init__(
self,
allgather_handle,
params: List[Parameter],
partitions: List[Tensor],
world_size: int,
use_secondary_tensor=False,
forward=False,
quantization=None,
) -> None:
self.allgather_handle = allgather_handle
self.params = params
self.partitions = partitions
self.world_size = world_size
self.use_secondary_tensor = use_secondary_tensor
self.forward = forward
self.complete = False
self.quantization = quantization
for param in self.params:
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"expected param {param.ds_summary()} to not be available")
@instrument_w_nvtx
def wait(self) -> None:
if self.complete:
return
instrument_w_nvtx(self.allgather_handle.wait)()
if self.quantization:
instrument_w_nvtx(self.quantization.quant_handle.wait)()
flat_tensor = self.quantization.backend.dequantize(
self.quantization.quantized_param, self.quantization.scale_buffer).to(self.params[0].device)
self.partitions: List[Parameter] = []
for i in range(self.quantization.world_size):
self.partitions.append(
flat_tensor.narrow(0, self.quantization.partition_sz * i, self.quantization.partition_sz))
# split the single tensor out into individual tensors
param_offset = 0
for param in self.params:
assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight"
partitions: List[Tensor] = []
ds_tensor_numel = param.ds_tensor.ds_numel
if self.use_secondary_tensor and not self.forward:
ds_tensor_numel *= param.ds_secondary_tensor_num_of_groups
for rank in range(self.world_size):
param_start = rank * ds_tensor_numel
if param_start < param.ds_numel:
part_to_copy = self.partitions[rank].narrow(0, param_offset,
min(param.ds_numel - param_start, ds_tensor_numel))
partitions.append(part_to_copy)
param.data = instrument_w_nvtx(torch.cat)(partitions).view(param.ds_shape)
param.ds_status = ZeroParamStatus.AVAILABLE
for part_to_copy in partitions:
part_to_copy.record_stream(get_accelerator().current_stream())
param_offset += ds_tensor_numel
self.complete = True
class QuantizationInfo:
# a placeholder object to store all quant related vars used in handles
def __init__(self) -> None:
self.quantized_param = None
self.backend = None
self.quant_handle = None
self.scale_buffer = None
class CUDAQuantizer:
async_flag = True
target_group_size = 8000 # the optimal size is 4k, so we set the target to be below 8k
group_size_cache = dict()
def __init__(self):
self.quantizer_cuda_module = deepspeed.ops.op_builder.QuantizerBuilder().load()
def quantize(self, param, groups=None):
if groups is None:
try:
groups = self.group_size_cache[param.numel()]
except KeyError:
groups = math.ceil(param.numel() / self.target_group_size)
while groups < param.numel():
if param.numel() % (8 * groups) == 0:
break
groups += 1
while True:
if param.numel() % (8 * groups * 2) == 0 and param.numel(
) / groups > self.target_group_size: #hard limit of 16k group_size
groups *= 2
else:
break
assert (
param.numel() % (8 * groups) == 0
), f"Qantized weight requires the number of weights be a multiple of 8. Yet {param.numel()} cannot be divided by 8*{groups}"
assert (param.numel() / groups < 16000), f"{param.numel()} / {groups} is larger than 16k"
assert param.numel(
) > groups, f"Adaptive grouping algorithm cannot find a group size for input tensor of size {param.numel()}"
self.group_size_cache[param.numel()] = groups
return self.quantizer_cuda_module.quantize(param.to(get_accelerator().device_name()), groups, 8,
self.quantizer_cuda_module.Symmetric)
def dequantize(self, quantized_param, scale):
return self.quantizer_cuda_module.dequantize(quantized_param, scale, scale.numel(), 8,
self.quantizer_cuda_module.Symmetric)
def _no_gather_coalesced(params: Iterable[Parameter]) -> AllGatherCoalescedHandle:
for param in params:
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(param.ds_summary())
param.ds_status = ZeroParamStatus.INFLIGHT
params = sorted(params, key=lambda p: p.ds_id)
if len(params) == 1:
param, = params
return NoGatherHandle(param)
return NoGatherCoalescedHandle(params)
# Replaces all parameters in module with Scattered Parameters
class Init(InsertPostInitMethodToModuleSubClasses):
param_id = 0
param_persistence_threshold = get_config_default(DeepSpeedZeroConfig, "param_persistence_threshold")
model_persistence_threshold = get_config_default(DeepSpeedZeroConfig, "model_persistence_threshold")
num_persisted_parameters = 0
num_persisted_elements = 0
apply_param_persistence = False
def __init__(self,
module=None,
data_parallel_group=None,
mem_efficient_linear=True,
remote_device=None,
pin_memory=False,
config_dict_or_path=None,
config=None,
enabled=True,
dtype=None,
mpu=None,
zero_param_parallel_group=None,
zero_quantized_weights=False):
"""A context to enable massive model construction for training with
ZeRO-3. Models are automatically partitioned (or, sharded) across the
system and converted to half precision.
Args:
module (``torch.nn.Module``, optional): If provided, partition the model as
if it was constructed in the context.
data_parallel_group (``deepspeed.comm`` process group, optional):
The group of processes to partition among. Defaults to all processes.
mem_efficient_linear (bool, optional): Replace
torch.nn.functional.linear with an implementation that allows
DeepSpeed to partition parameters. Defaults to ``True``.
remote_device (string, optional): The initial device to store model
weights e.g., ``cpu``, ``nvme``. Passing ``"cpu"`` will create the model in CPU
memory. The model may still be moved to GPU based on the
offload settings for training. Defaults to param offload device if a config is
defined, otherwise GPU.
pin_memory (bool, optional): Potentially increase performance by
using pinned memory for model weights. ``remote_device`` must be
``"cpu"``. Defaults to pin_memory value in config, otherwise ``False``.
config_dict_or_path (dict or ``json file``, optional): If provided, provides configuration
for swapping fp16 params to NVMe.
config (dict or ``json file``, optional): Deprecated, use config_dict_or_path instead.
enabled (bool, optional): If ``False``, this context has no
effect. Defaults to ``True``.
dtype (``dtype``, optional): Can be used to change the data type of the parameters.
Supported options are ``torch.half`` and ``torch.float``. Defaults to ``None``
mpu (``object``, optional): A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}.
zero_param_parallel_group(``object``, optional): Parallel (comm) group for dual partitioning of ZeRO params.
zero_quantized_weights (bool, optional): If ``True``, turn on quantized weights in all gather weights. Default is ``False``
This context accelerates model initialization and enables models that
are too large to allocate in their entirety in CPU memory. It has the
following effects:
#. allocates tensors to either GPU or CPU memory or NVMe
#. converts floating point tensors to half precision
#. immediately partitions tensors among the group of data-parallel devices
#. (*optional*) replaces ``torch.nn.functional.linear`` with a more
memory-efficient implementation
These modifications allow for models that exceed the size of local CPU/GPU
memory/NVMe, but fit within the total NVMe capacity (*i.e.*, aggregate CPU
or GPU memory or NVMe) across all nodes. Consider initializing a model with one
trillion parameters, whose weights occupy two terabytes (TB) in half
precision. The initial CPU allocation in full precision requires 4TB of
memory *per process*, and so a system with 8 GPUs per node would need 32TB of
CPU memory due to data-parallel redundancies. Instead, by immediately
partitioning tensors we remove the redundancies. The result is that
regardless of the number of GPUs, we still only require the original 4TB. This
allows for a linear increase in model size with the aggregate system memory.
For example, if a node has 1TB of memory and 8 GPUs, we could fit a trillion
parameter model with 4 nodes and 32 GPUs.
Important: If the fp16 weights of the model can't fit onto a single GPU memory
this feature must be used.
.. note::
Initializes ``deepspeed.comm`` if it has not already been done so.
See :meth:`deepspeed.init_distributed` for more information.
.. note::
Only applicable to training with ZeRO-3.
Examples
--------
#. Allocate a model and partition it among all processes:
.. code-block:: python
with deepspeed.zero.Init():
model = MyLargeModel()
#. Allocate a model in pinned CPU memory and partition it among a subgroup of processes:
.. code-block:: python
with deepspeed.zero.Init(data_parallel_group=mpu.get_data_parallel_group(),
remote_device="cpu",
pin_memory=True):
model = MyLargeModel()
#. Partition an already-allocated model in CPU memory:
.. code-block:: python
model = deepspeed.zero.Init(module=model)
"""
if config is not None:
config_dict_or_path = config
logger.warning(
f'zero.Init: the `config` argument is deprecated. Please use `config_dict_or_path` instead.')
_ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,
mpu) if config_dict_or_path is not None else None
if _ds_config is not None:
mem_efficient_linear = _ds_config.zero_config.memory_efficient_linear
super().__init__(enabled=enabled, mem_efficient_linear=mem_efficient_linear, ds_config=_ds_config, dtype=dtype)
if not dist.is_initialized():
init_distributed()
assert dist.is_initialized(), "Parameters cannot be scattered without initializing deepspeed.comm"
if data_parallel_group is None:
self.ds_process_group = dist.get_world_group()
else:
self.ds_process_group = data_parallel_group
self.rank = dist.get_rank(group=self.ds_process_group)
self.dp_world_size = dist.get_world_size(group=self.ds_process_group)
self.zero_param_process_group = zero_param_parallel_group
if _ds_config is not None and _ds_config.zero_config.zero_hpz_partition_size > 1 and self.zero_param_process_group is None:
groups._create_zero_param_parallel_group(_ds_config.zero_config.zero_hpz_partition_size)
self.zero_param_process_group = groups._get_zero_param_intra_parallel_group()
self.num_ranks_in_param_group = self.dp_world_size
self.rank_in_group = self.rank
self.num_param_groups = 1
if self.zero_param_process_group is not None:
self.num_ranks_in_param_group = groups._get_zero_param_intra_parallel_group_world_size()
self.num_param_groups = int(self.dp_world_size / self.num_ranks_in_param_group)
self.rank_in_group = groups._get_zero_param_intra_parallel_rank_in_mygroup()
print_rank_0(f"hpZeRO group size? {self.num_ranks_in_param_group}", force=True)
logger.debug(
"hpZeRO partition parameter my rank in world {} my rank in group {} ranks in my param partition group: {} "
.format(self.rank, self.rank_in_group, groups._get_zero_param_intra_parallel_group_ranks()))
# Local device is the device where the parameters are consumed, must be default device.
# It is the device where parameters are fully instantiated using allgather
self.local_device = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"]))
get_accelerator().set_device(self.local_device)
self.quantized_weights = zero_quantized_weights
if _ds_config is not None and _ds_config.zero_config.zero_quantized_weights and not self.quantized_weights:
self.quantized_weights = _ds_config.zero_config.zero_quantized_weights
self.module = module
if (self.quantized_weights):
self.quantizer_module = CUDAQuantizer()
print_rank_0(f'Using quantizer: {self.quantizer_module.__class__.__name__}', force=True)
if _ds_config is not None and _ds_config.zero_config.offload_param is not None:
remote_device = _ds_config.zero_config.offload_param.device
pin_memory = _ds_config.zero_config.offload_param.pin_memory
self._validate_remote_device(remote_device, _ds_config)
# Remote device is the device where parameter partitions are stored
# It can be same as local_device or it could be CPU or NVMe.
self.remote_device = self.local_device if remote_device in [None, OffloadDeviceEnum.none] else remote_device
self.pin_memory = pin_memory if (self.remote_device in [OffloadDeviceEnum.cpu, OffloadDeviceEnum.nvme
]) else False
# Enable fp16 param swapping to NVMe
if self.remote_device == OffloadDeviceEnum.nvme:
self.param_swapper = AsyncPartitionedParameterSwapper(_ds_config, self.dtype)
else:
self.param_swapper = None
# If we are provided an already-allocated module to prepare.
if module is not None:
assert isinstance(module, torch.nn.Module)
self._convert_to_zero_parameters(module.parameters(recurse=True))
self.use_all_gather_into_tensor = dist.has_all_gather_into_tensor()
if not self.use_all_gather_into_tensor:
logger.info(f"all_gather_into_tensor API is not available in torch {torch.__version__}")
def _update_persist_config(self, ds_config):
Init.apply_param_persistence = True
Init.param_persistence_threshold = ds_config.zero_config.param_persistence_threshold
Init.model_persistence_threshold = ds_config.zero_config.model_persistence_threshold // self.num_partitions
def _convert_to_zero_parameters(self, param_list):
for param in param_list:
if is_zero_param(param):
continue
self._convert_to_deepspeed_param(param)
param.partition()
def _validate_remote_device(self, remote_device, ds_config):
if ds_config is not None:
if remote_device in [None, OffloadDeviceEnum.cpu]:
if ds_config.zero_config.offload_param is not None:
offload_param_device = ds_config.zero_config.offload_param.device
assert offload_param_device != OffloadDeviceEnum.nvme, \
f"'device' in DeepSpeed Config cannot be {offload_param_device} if remote device is {remote_device}."
if remote_device == OffloadDeviceEnum.nvme:
assert ds_config.zero_config.offload_param is not None, \
f'"offload_param" must be defined in DeepSpeed Config if remote device is {OffloadDeviceEnum.nvme}.'
assert ds_config.zero_config.offload_param.nvme_path is not None, \
f'"nvme_path" in DeepSpeed Config cannot be None if remote device is {OffloadDeviceEnum.nvme}'
def _post_init_method(self, module):
#see_memory_usage(f"Before converting params in {module.__class__.__name__}", force=False)
print_rank_0(f'Converting Params in {module.__class__.__name__}', force=False)
see_memory_usage(f"Before converting and partitioning params in {module.__class__.__name__}", force=False)
global param_count
for name, param in module.named_parameters(recurse=False):
param_count += param.numel()
if not is_zero_param(param):
self._convert_to_deepspeed_param(param)
print_rank_0(
f"Partitioning param {debug_param2name_id_shape(param)} module={debug_module2name(module)}")
if get_accelerator().on_accelerator(param):
if dist.get_world_group() == self.get_dp_process_group():
dist.broadcast(param, 0, self.get_dp_process_group())
else:
dist.broadcast(param, dist.get_global_rank(self.get_dp_process_group(), 0),
self.get_dp_process_group())
else:
if dist.get_rank() == 0:
logger.warn(f"param `{name}` in {module.__class__.__name__} "
f"not on GPU so was not broadcasted from rank 0")
param.partition()
see_memory_usage(
f"Param count {param_count}. After converting and partitioning params in {module.__class__.__name__}",
force=False)
def _convert_to_deepspeed_param(self, param):
# Partitioned, Normal, Remote
param.ds_param_type = ZeroParamType.PARTITIONED
# Replicated vs Partitioned vs Inflight
param.ds_status = ZeroParamStatus.AVAILABLE
# Stores the shape of the original tensor
param.ds_shape = param.shape
# Stores the number of elements in the original parameter without padding
param.ds_numel = param.numel()
# Stores the partitioned copy of the tensor
param.ds_tensor = None
# Keeps track of how many active sub-modules need this param at any given point in time
param.ds_active_sub_modules = set()
# If this flag is true, then the parameters are replicated throughput training
# And only partitioned before the step
if Init.apply_param_persistence and param.ds_numel <= Init.param_persistence_threshold and Init.num_persisted_elements + param.ds_numel <= Init.model_persistence_threshold:
param.ds_persist = True
Init.num_persisted_parameters += 1
Init.num_persisted_elements += param.ds_numel
else:
param.ds_persist = False
param.is_external_param = False
# The group that the parameter is scattered across.
param.ds_process_group = self.ds_process_group
# Stores the secondary partitioned copy of the tensor
param.ds_secondary_tensor = None
#Process group for secondary partition all (group) gather
param.ds_zero_param_process_group = self.zero_param_process_group
param.ds_secondary_tensor_group_size = self.num_ranks_in_param_group
param.ds_secondary_tensor_num_of_groups = self.num_param_groups
# This is set to the Async Param swapper if remote device is nvme
# else this is set to None
param.nvme_swapper = self.param_swapper
# DeepSpeed Param ID
param.ds_id = Init.param_id
Init.param_id += 1
def all_gather(param_list=None, async_op=False, hierarchy=0):
cls = param
if param_list is None:
param_list = [cls]
return self._all_gather(param_list, async_op=async_op, hierarchy=hierarchy)
@instrument_w_nvtx
def all_gather_coalesced(params: Iterable[Parameter],
forward: bool,
safe_mode: bool = False) -> AllGatherCoalescedHandle:
# fetches from nvme if the partition is not available and in nvme
self._ensure_availability_of_partitioned_params(params)
quant = self.quantized_weights
if self.module is not None and self.module.training is False:
quant = False
if self.num_partitions == 1:
return _no_gather_coalesced(params)
for param in params:
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(param.ds_summary())
param.ds_status = ZeroParamStatus.INFLIGHT
#use appropriate all gather process group
ds_process_group = self.ds_process_group
rank_in_group = self.rank
world_size = self.dp_world_size
use_secondary_tensor = False
if self.zero_param_process_group and not forward:
ds_process_group = self.zero_param_process_group #intragroup
rank_in_group = self.rank_in_group
world_size = self.num_ranks_in_param_group
#pprint(dir(ds_process_group))
# ensure that each rank has params in same order. the allgather
# is done by flattening the parameter list into a single tensor that
# can be allgathered in a single call - this means that if each rank
# gives a list of the same parameters in a different order we will
# silently get incorrect parameter values, and have very difficult
# to debug correctness issues.
params = sorted(params, key=lambda p: p.ds_id)
if logger.isEnabledFor(logging.DEBUG):
debug_rank0(f"-allgather_coalesced: {[p.ds_id for p in params]}")
if safe_mode:
# ensure that same list (with same ordering) of parameters are
# being allgathered across all ranks, otherwise could mix
# data between tensors.
assert_ints_same_as_other_ranks([p.ds_id for p in params])
# ensure that tensors from each rank agree on the same ds_numel
# otherwise could mix data between tensors.
assert_ints_same_as_other_ranks([p.ds_tensor.ds_numel for p in params])
if len(params) == 1:
# have an opportunity to avoid some intermediate memory allocations
param, = params
buffer_size = math.ceil(param.ds_numel / world_size) * world_size
if not forward and param.ds_secondary_tensor is not None:
buffer_size = param.ds_secondary_tensor.shape[0] * world_size #make sure out is appropriately sized
param_buffer = torch.empty(
buffer_size,
dtype=param.dtype if not quant else torch.int8,
device=get_accelerator().current_device_name(),
requires_grad=False,
)
param_ds_tensor = param.ds_secondary_tensor if not forward and param.ds_secondary_tensor is not None else param.ds_tensor
if not quant:
handles = _dist_allgather_fn(
param_ds_tensor.to(get_accelerator().current_device_name()),
param_buffer,
ds_process_group,
)
param.data = param_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape).to(param.device)
return AllGatherHandle(handles, param)
else:
quantized_param, scales = self.quantizer_module.quantize(param_ds_tensor)
handle = _dist_allgather_fn(quantized_param.to(get_accelerator().current_device_name()),
param_buffer, ds_process_group)
quant_scale_buffer = torch.empty(
scales.numel() * world_size,
dtype=torch.float32,
device=get_accelerator().current_device_name(),
requires_grad=False,
)
quant_handle = _dist_allgather_fn(scales.to(get_accelerator().current_device_name()),
quant_scale_buffer, ds_process_group)
quant_info = QuantizationInfo()
quant_info.quantized_param = param_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape).to(
param.device)
quant_info.backend = self.quantizer_module
quant_info.quant_handle = quant_handle
quant_info.scale_buffer = quant_scale_buffer
return AllGatherHandle(handle, param, quantization=quant_info)
else:
partition_sz = sum(p.ds_tensor.ds_numel for p in params)
if params[0].ds_secondary_tensor is not None and not forward:
partition_sz = sum(p.ds_tensor.ds_numel * p.ds_secondary_tensor_num_of_groups for p in params)
flat_tensor = torch.empty(partition_sz * world_size,
dtype=get_only_unique_item(p.dtype
for p in params) if not quant else torch.int8,
device=get_accelerator().current_device_name(),
requires_grad=False)
if not quant:
partitions: List[Parameter] = []
for i in range(world_size):
partitions.append(flat_tensor.narrow(0, partition_sz * i, partition_sz))
if params[0].ds_secondary_tensor is not None and not forward:
use_secondary_tensor = True
instrument_w_nvtx(torch.cat)(
[p.ds_secondary_tensor.to(get_accelerator().current_device_name()) for p in params],
out=partitions[rank_in_group])
else:
instrument_w_nvtx(
torch.cat)([p.ds_tensor.to(get_accelerator().current_device_name()) for p in params],
out=partitions[rank_in_group])
handle = _dist_allgather_fn(partitions[rank_in_group], flat_tensor, ds_process_group)
#Fix get_partition_dp_group(params[0]))
return AllGatherCoalescedHandle(
allgather_handle=handle,
params=params,
partitions=partitions,
world_size=world_size,
use_secondary_tensor=use_secondary_tensor,
forward=forward,
)
else:
if params[0].ds_secondary_tensor is not None and not forward:
use_secondary_tensor = True
quantized_param, scales = self.quantizer_module.quantize(
instrument_w_nvtx(torch.cat)(
[p.ds_secondary_tensor.to(get_accelerator().current_device_name()) for p in params]))
else:
quantized_param, scales = self.quantizer_module.quantize(
instrument_w_nvtx(
torch.cat)([p.ds_tensor.to(get_accelerator().current_device_name()) for p in params]))
handle = _dist_allgather_fn(quantized_param, flat_tensor, ds_process_group)
quant_info = QuantizationInfo()
quant_scale_buffer = torch.empty(
scales.numel() * world_size,
dtype=torch.float32,
device=get_accelerator().current_device_name(),
requires_grad=False,
)
quant_handle = _dist_allgather_fn(scales, quant_scale_buffer, ds_process_group)
quant_info.quantized_param = flat_tensor
quant_info.backend = self.quantizer_module
quant_info.quant_handle = quant_handle
quant_info.scale_buffer = quant_scale_buffer
quant_info.partition_sz = partition_sz
quant_info.world_size = world_size
return AllGatherCoalescedHandle(
allgather_handle=handle,
params=params,
partitions=None,
world_size=world_size,
use_secondary_tensor=use_secondary_tensor,
forward=forward,
quantization=quant_info,
)
def partition(param_list=None, backward=False, hierarchy=0, has_been_updated=False):
cls = param
print_rank_0(f"{'--'*hierarchy}----Partitioning param {debug_param2name_id_shape_device(cls)}",
force=False)
if param_list is None:
param_list = [cls]
self._partition(param_list, has_been_updated=has_been_updated)
def reduce_gradients_at_owner(param_list=None, hierarchy=0):
cls = param
if param_list is None:
param_list = [cls]
print_rank_0(
f"{'--'*hierarchy}----Reducing Gradients for param with ids {[param.ds_id for param in param_list]} to owner"
)
self._reduce_scatter_gradients(param_list)
def partition_gradients(param_list=None, partition_buffers=None, hierarchy=0, accumulate=False):
cls = param
print_rank_0(
f"{'--'*hierarchy}----Partitioning param gradient with id {debug_param2name_id_shape_device(cls)}")
if param_list is None:
param_list = [cls]
if isinstance(partition_buffers, torch.Tensor):
partition_buffers = [partition_buffers]
self._partition_gradients(param_list, partition_buffers=partition_buffers, accumulate=accumulate)
def aligned_size():
return self._aligned_size(param)
def padding_size():
return self._padding_size(param)
def partition_numel():
return self._partition_numel(param)
def item_override():
param.all_gather()
return param._orig_item()
def ds_summary(slf: torch.Tensor, use_debug_name: bool = False) -> dict:
return {
"id": debug_param2name_id(slf) if use_debug_name else slf.ds_id,
"status": slf.ds_status.name,
"numel": slf.numel(),
"ds_numel": slf.ds_numel,
"shape": tuple(slf.shape),
"ds_shape": tuple(slf.ds_shape),
"requires_grad": slf.requires_grad,
"grad_shape": tuple(slf.grad.shape) if slf.grad is not None else None,
"persist": slf.ds_persist,
"active_sub_modules": slf.ds_active_sub_modules,
"ds_tensor.shape": slf.ds_tensor.shape if slf.ds_tensor is not None else None
}
def convert_to_zero_parameters(param_list):
self._convert_to_zero_parameters(param_list)
def allgather_before(func: Callable) -> Callable:
def wrapped(*args, **kwargs):
param.all_gather()
return func(*args, **kwargs)
return wrapped
# Collectives for gathering and partitioning parameters
param.all_gather = all_gather
param.all_gather_coalesced = all_gather_coalesced
param.partition = partition
# Collective for averaging gradients
param.reduce_gradients_at_owner = reduce_gradients_at_owner
param.partition_gradients = partition_gradients
# Partitioning size utilities
param.aligned_size = aligned_size
param.padding_size = padding_size
param.partition_numel = partition_numel
param.ds_summary = types.MethodType(ds_summary, param)
param.item = allgather_before(param.item)
param.convert_to_zero_parameters = convert_to_zero_parameters
def _aligned_size(self, param):
return param.ds_numel + self._padding_size(param)
def _padding_size(self, param):
remainder = param.ds_numel % self.num_partitions
return (self.num_partitions - remainder) if remainder else 0
def _partition_numel(self, param):
return param.ds_tensor.ds_numel
def _ensure_availability_of_partitioned_params(self, params):
swap_in_list = []
swap_in_flight = []
for param in params:
if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE:
assert param.ds_tensor.final_location == OffloadDeviceEnum.nvme and param.ds_status == ZeroParamStatus.NOT_AVAILABLE
swap_in_list.append(param)
if param.ds_tensor.status == PartitionedParamStatus.INFLIGHT:
assert param.ds_tensor.final_location == OffloadDeviceEnum.nvme and param.ds_status == ZeroParamStatus.NOT_AVAILABLE
swap_in_flight.append(param)
if len(swap_in_list) > 0:
swap_in_list[0].nvme_swapper.swap_in(swap_in_list, async_op=False)
elif len(swap_in_flight) > 0:
swap_in_flight[0].nvme_swapper.synchronize_reads()
@instrument_w_nvtx
def _all_gather(self, param_list, async_op=False, hierarchy=None):
# fetches from nvme if the partition is not available and in nvme
self._ensure_availability_of_partitioned_params(param_list)
handles = []
all_gather_list = []
for param in param_list:
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
if async_op:
handle = self._allgather_param(param, async_op=async_op, hierarchy=hierarchy)
param.ds_status = ZeroParamStatus.INFLIGHT # if async_op else ZeroParamStatus.AVAILABLE
handles.append(handle)
else:
all_gather_list.append(param)
if not async_op:
if len(param_list) == 1:
ret_value = self._allgather_params(all_gather_list, hierarchy=hierarchy)
else:
ret_value = self._allgather_params_coalesced(all_gather_list, hierarchy)
for param in all_gather_list:
param.ds_status = ZeroParamStatus.AVAILABLE
return ret_value
return handles
def _partition(self, param_list, force=False, has_been_updated=False):
for param in param_list:
print_rank_0(f"Before Partitioning Param {param.ds_id}", force=False)
if self.zero_param_process_group is not None:
self._partition_param_sec(param, has_been_updated=has_been_updated)
self._partition_param(param, has_been_updated=has_been_updated)
param.ds_status = ZeroParamStatus.NOT_AVAILABLE
# if param.ds_tensor is not None:
# assert id(param.data) == id(param.ds_tensor.data), \
# "After the parameters are initially partitioned, make sure we are not recreating the partition."
#print_rank_0(f"After Partitioning Param {param.ds_id} {param.ds_tensor.size()} {param.ds_tensor}",force=False)
@instrument_w_nvtx
def _partition_param(self, param, buffer=None, has_been_updated=False):
assert param.ds_status is not ZeroParamStatus.INFLIGHT, f" {param} Cannot partition a param in flight"
global reuse_buffers
print_rank_0(f"Param id {param.ds_id} status is {param.ds_status}", force=False)
if param.ds_status is ZeroParamStatus.AVAILABLE:
print_rank_0(f"Partitioning param id {param.ds_id} reuse buffers {reuse_buffers}", force=False)
# if reuse_buffers and False:
# numel = buffer.numel()
# buffer = param.data.view(-1)
# print_rank_0(
# "Returning buffer for param {param.ds_id} with numel {param.ds_numel} to empty buffers",
# force=False)
# if numel in empty_buffers:
# empty_buffers[numel].append(buffer)
# if deepspeed.comm.get_rank():
# print(f"Releasing {param.data.numel()}")
if param.ds_tensor is not None and not has_been_updated: ##param already partitioned
#print_rank_0(f"Param {param.ds_id} pri {param.ds_tensor.size()} loc? {param.ds_tensor.final_location}", force=True)
#param.data = param.ds_tensor.data
see_memory_usage(f'Before partitioning param {param.ds_id} {param.shape}', force=False)
# param.data does not store anything meaningful in partitioned state
free_param(param)
see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}', force=False)
if param.ds_tensor.final_location == OffloadDeviceEnum.nvme:
print_rank_0(f"Param {param.ds_id} partition released since it exists in nvme", force=False)
param.nvme_swapper.remove_partition_and_release_buffers([param])
print_rank_0(
f"after swap Param {param.ds_id} {param.ds_tensor.shape} partition released since it exists in nvme",
force=False)
return
tensor_size = self._aligned_size(param)
partition_size = tensor_size // self.num_partitions
if param.ds_tensor is None:
final_location = None
if self.remote_device == OffloadDeviceEnum.nvme and self.param_swapper.swappable_tensor(
numel=partition_size):
final_location = OffloadDeviceEnum.nvme
buffer = self.param_swapper.get_buffer(param, partition_size)
partitioned_tensor = torch.empty(0, dtype=param.dtype, device=buffer.device)
partitioned_tensor.data = buffer.data
print_rank_0(f"ID {param.ds_id} Initializing partition for the first time for nvme offload.")
else:
if param.ds_persist:
device = self.local_device
elif self.remote_device == OffloadDeviceEnum.nvme:
device = OffloadDeviceEnum.cpu
else:
device = self.remote_device
partitioned_tensor = torch.empty(partition_size, dtype=param.dtype, device=device)
if device == OffloadDeviceEnum.cpu and self.pin_memory:
partitioned_tensor = get_accelerator().pin_memory(partitioned_tensor)
partitioned_tensor.requires_grad = False
param.ds_tensor = partitioned_tensor
param.ds_tensor.ds_numel = partition_size
param.ds_tensor.status = PartitionedParamStatus.AVAILABLE
param.ds_tensor.final_location = final_location
start = partition_size * self.get_partition_rank()
end = start + partition_size
one_dim_param = param.contiguous().view(-1)
if start < param.ds_numel and end <= param.ds_numel:
src_tensor = one_dim_param.narrow(0, start, partition_size)
param.ds_tensor.copy_(src_tensor)
#partitioned_tensor = src_tensor.clone().detach().to(self.remote_device)
else:
# partitioned_tensor = torch.zeros(partition_size,
# dtype=param.dtype,
# device=self.remote_device )
if start < param.ds_numel:
elements_to_copy = param.ds_numel - start
param.ds_tensor.narrow(0, 0,
elements_to_copy).copy_(one_dim_param.narrow(0, start, elements_to_copy))
#print(f"Remote device {self.remote_device}")
#param.ds_tensor = partitioned_tensor
#param.data = param.ds_tensor.data
# param.data does not store anything meaningful in partitioned state
see_memory_usage(f'Before partitioning param {param.ds_id} {param.shape}', force=False)
free_param(param)
see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}', force=False)
if param.ds_tensor.final_location == OffloadDeviceEnum.nvme:
self.param_swapper.swap_out_and_release([param])
print_rank_0(f"ID {param.ds_id} Offloaded to nvme offload and buffers released.")
see_memory_usage(f"ID {param.ds_id} Offloaded to nvme offload and buffers released.", force=False)
print_rank_0(f"ID {param.ds_id} partitioned type {param.dtype} dev {param.device} shape {param.shape}")
@instrument_w_nvtx
def _partition_param_sec(self, param, buffer=None, has_been_updated=False):
assert param.ds_status is not ZeroParamStatus.INFLIGHT, f" {param} Cannot partition a param in flight"
global reuse_buffers
##support for NVME secondary param offload
#print_rank_0(f"SEC Param id {param.ds_id} status is {param.ds_status}", force=True)
if param.ds_status is ZeroParamStatus.AVAILABLE:
if param.ds_secondary_tensor is not None and not has_been_updated: ##param already partitioned
return
#check padding
tensor_size = self._aligned_size(param)
partition_size = tensor_size // self.dp_world_size
secondary_partition_size = int(tensor_size // self.num_ranks_in_param_group)
if param.ds_secondary_tensor is None:
final_location = None
secondary_partitioned_tensor = torch.empty(secondary_partition_size,
dtype=param.dtype,
device=self.remote_device)
if self.pin_memory:
secondary_partitioned_tensor = secondary_partitioned_tensor.pin_memory()
secondary_partitioned_tensor.requires_grad = False
param.ds_secondary_tensor = secondary_partitioned_tensor
param.ds_secondary_tensor.ds_numel = secondary_partition_size
param.ds_secondary_tensor.status = PartitionedParamStatus.AVAILABLE
param.ds_secondary_tensor.final_location = final_location
#use rank in group for secondary tensor
secondary_start = secondary_partition_size * self.rank_in_group
secondary_end = secondary_start + secondary_partition_size
one_dim_param = param.contiguous().view(-1)
start = partition_size * self.rank
end = start + partition_size
if start < param.ds_numel and end <= param.ds_numel:
if secondary_start < param.ds_numel and secondary_end <= param.ds_numel:
sec_src_tensor = one_dim_param.narrow(0, secondary_start, secondary_partition_size)
param.ds_secondary_tensor.copy_(sec_src_tensor)
else:
if start < param.ds_numel:
elements_to_copy = param.ds_numel - start
elements_to_copy_sec = elements_to_copy * param.ds_secondary_tensor_num_of_groups
param.ds_secondary_tensor.narrow(0, 0, elements_to_copy_sec).copy_(
one_dim_param.narrow(0, secondary_start, elements_to_copy_sec))
print_rank_0(f"{param.ds_id} partitioned type {param.dtype} dev {param.device} shape {param.shape}",
force=False)
def _param_status(self, param):
if param.ds_tensor is not None:
print_rank_0(
f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned numel {param.ds_tensor.numel()}, data numel {param.data.numel()}"
)
else:
print_rank_0(
f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned ds_tensor {param.ds_tensor}, data numel {param.data.numel()}"
)
def _allgather_param(self, param, async_op=False, hierarchy=0):
partition_size = param.ds_tensor.ds_numel
tensor_size = partition_size * self.num_partitions
aligned_param_size = self._aligned_size(param)
assert tensor_size == aligned_param_size, f'param id {param.ds_id} aligned size {aligned_param_size} does not match tensor size {tensor_size}'
print_rank_0(
f"{'--'* hierarchy}---- Before allocating allgather param {debug_param2name_id_shape_status(param)} partition size={partition_size}"
)
see_memory_usage(
f'Before allocate allgather param {debug_param2name_id_shape_status(param)} partition_size={partition_size} ',
force=False)
flat_tensor = torch.zeros(aligned_param_size, dtype=param.dtype, device=param.device).view(-1)
see_memory_usage(
f'After allocate allgather param {debug_param2name_id_shape_status(param)} {aligned_param_size} {partition_size} ',
force=False)
get_accelerator().synchronize()
print_rank_0(
f"{'--'* hierarchy}----allgather param with {debug_param2name_id_shape_status(param)} partition size={partition_size}"
)
# if not flat_tensor.numel() > 100000:
# replicated_tensor = flat_tensor.narrow(0,
# 0,
# param.ds_numel).view(param.ds_shape)
# param.data = replicated_tensor.data
# return None
if self.use_all_gather_into_tensor:
handle = dist.all_gather_into_tensor(flat_tensor,
param.ds_tensor.to(get_accelerator().device_name()),
group=self.get_partition_dp_group(param),
async_op=async_op)
else:
partitions = []
for i in range(self.num_partitions):
partitions.append(flat_tensor.narrow(0, partition_size * i, partition_size))
if i == dist.get_rank(group=self.get_partition_dp_group(param)):
partitions[i].data.copy_(param.ds_tensor.data, non_blocking=True)
handle = dist.all_gather(partitions,
partitions[self.get_partition_rank()],
group=self.get_partition_dp_group(param),
async_op=async_op)
replicated_tensor = flat_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape)
param.data = replicated_tensor.data
return handle
def _allgather_params_coalesced(self, param_list, hierarchy=0):
""" blocking call
avoid explicit memory copy in _allgather_params
"""
if len(param_list) == 0:
return
if self.num_partitions == 1:
handle = _no_gather_coalesced(param_list)
handle.wait()
return None
# collect local tensors and partition sizes
partition_sizes = []
local_tensors = []
for param in param_list:
partition_sizes.append(param.ds_tensor.ds_numel)
local_tensors.append(param.ds_tensor.to(get_accelerator().device_name()))
# allocate memory for allgather params
allgather_params = []
for psize in partition_sizes:
tensor_size = psize * self.num_partitions
flat_tensor = torch.empty(tensor_size, dtype=param_list[0].dtype, device=self.local_device).view(-1)
flat_tensor.requires_grad = False
allgather_params.append(flat_tensor)
# launch
launch_handles = []
for param_idx, param in enumerate(param_list):
input_tensor = local_tensors[param_idx].view(-1)
if self.use_all_gather_into_tensor:
# try the _all_gather_base from Pytorch master
h = dist.all_gather_into_tensor(allgather_params[param_idx],
input_tensor,
group=self.get_partition_dp_group(param),
async_op=True)
else:
output_list = []
for i in range(self.num_partitions):
psize = partition_sizes[param_idx]
partition = allgather_params[param_idx].narrow(0, i * psize, psize)
output_list.append(partition)
if not get_accelerator().on_accelerator(partition):
logger.warning(
f'param {param_idx}, partition {i} is not on CUDA, partition shape {partition.size()}')
# back to old all_gather function
h = dist.all_gather(output_list, input_tensor, group=self.get_partition_dp_group(param), async_op=True)
launch_handles.append(h)
# Wait ensures the operation is enqueued, but not necessarily complete.
launch_handles[-1].wait()
# assign to param.data (not copy)
for i, param in enumerate(param_list):
gathered_tensor = allgather_params[i]
param.data = gathered_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape).data
# guarantee the communication to be completed
get_accelerator().synchronize()
return None
def _allgather_params(self, param_list, hierarchy=0):
if len(param_list) == 0:
return
partition_size = sum([param.ds_tensor.ds_numel for param in param_list])
tensor_size = partition_size * self.num_partitions
flat_tensor = torch.empty(tensor_size, dtype=param_list[0].dtype, device=self.local_device)
flat_tensor.requires_grad = False
partitions = []
for i in range(self.num_partitions):
start = partition_size * i
partitions.append(flat_tensor.narrow(0, start, partition_size))
if i == self.get_partition_rank():
offset = 0
for param in param_list:
param_numel = param.ds_tensor.ds_numel
partitions[i].narrow(0, offset, param_numel).copy_(param.ds_tensor.data)
offset += param_numel
dist.all_gather(partitions,
partitions[self.get_partition_rank()],
group=self.get_partition_dp_group(param),
async_op=False)
param_offset = 0
for param in param_list:
param_partition_size = param.ds_tensor.ds_numel
param_size = param.ds_numel
replicated_tensor = torch.empty(param.ds_shape, dtype=param.dtype, device=self.local_device)
for i in range(self.num_partitions):
start = i * partition_size
param_start = i * param_partition_size
if param_start < param_size:
numel_to_copy = min(param_size - param_start, param_partition_size)
part_to_copy = partitions[i].narrow(0, param_offset, numel_to_copy)
replicated_tensor.view(-1).narrow(0, param_start, numel_to_copy).copy_(part_to_copy)
#param_offset += param.data.numel()
param_offset += param.ds_tensor.ds_numel
param.data = replicated_tensor.data
return None
def _reduce_scatter_gradients(self, param_list):
#print_rank_0([param.grad for param in param_list])
#assert any([param.grad is None for param in param_list]), "None gradients cannot be reduce scattered"
handles_and_reduced_partitions = []
for param in param_list:
assert param.grad.numel(
) == param.ds_numel, f"{param.grad.numel()} != {param.ds_numel} Cannot reduce scatter gradients whose size is not same as the params"
handles_and_reduced_partitions.append(self._reduce_scatter_gradient(param))
for param, (handle, reduced_partition) in zip(param_list, handles_and_reduced_partitions):
if handle is not None:
handle.wait()
# some ranks may have partitions that are padded to go beyond the grad size.
# For these ranks the output of reduce scatter is a separate buffer and needs
# to be copied in
partition_size = param.ds_tensor.ds_numel
start = self.get_partition_rank() * partition_size
end = start + partition_size
#print_rank_0("REduce scatter was executed for param {param.ds_id}")
if start < param.ds_numel and end > param.ds_numel:
elements = param.ds_numel - start
param.grad.view(-1).narrow(0, start, elements).copy_(reduced_partition.narrow(0, 0, elements))
def _reduce_scatter_gradient(self, param):
partition_size = param.ds_tensor.ds_numel
#output = torch.empty(partition_size, dtype=param.dtype, device=param.device)
total_size = partition_size * self.num_partitions
input_list = []
for i in range(self.num_partitions):
start = i * partition_size
end = start + partition_size
#print("before reduce scatter gradients")
if start < param.ds_numel and end <= param.ds_numel:
input = param.grad.view(-1).narrow(0, start, partition_size)
else:
input = torch.zeros(partition_size, dtype=param.dtype, device=param.device)
if start < param.ds_numel:
elements = param.ds_numel - start
input.narrow(0, 0, elements).copy_(param.grad.view(-1).narrow(0, start, elements))
#print("after reduce scatter gradients")
input_list.append(input)
rank = dist.get_rank(group=self.get_partition_dp_group(param))
handle = dist.reduce_scatter(input_list[rank],
input_list,
group=self.get_partition_dp_group(param),
async_op=True)
return handle, input_list[rank]
def _partition_gradients(self, param_list, partition_buffers=None, accumulate=False):
if partition_buffers is None:
partition_buffers = [None] * len(param_list)
for param, partition_buffer in zip(param_list, partition_buffers):
self._partition_gradient(param, partition_buffer=partition_buffer, accumulate=accumulate)
def _partition_gradient(self, param, partition_buffer=None, accumulate=False):
#import pdb;pdb.set_trace()
# param.grad=None
# param.grad.test()
print_rank_0(
f"Partitioning param {param.ds_id} gradient of size {param.grad.numel()} type {param.grad.dtype} part_size {param.ds_tensor.ds_numel}"
)
see_memory_usage("Before partitioning gradients", force=False)
partition_size = param.ds_tensor.ds_numel
if partition_buffer is None:
assert not accumulate, "No buffer to accumulate to"
partition_buffer = torch.zeros(partition_size, dtype=param.dtype, device=param.device)
else:
assert partition_buffer.numel(
) >= partition_size, f"The partition buffer size {partition_buffer.numel()} should match the size of param.ds_tensor {partition_size}"
rank = dist.get_rank(group=self.get_partition_dp_group(param))
start = partition_size * rank
end = start + partition_size
dest_tensor_full_buffer = partition_buffer.view(-1).narrow(0, 0, partition_size)
#print("before partition gradients")
if start < param.ds_numel:
elements = min(param.ds_numel - start, partition_size)
dest_tensor = dest_tensor_full_buffer.narrow(0, 0, elements)
src_tensor = param.grad.view(-1).narrow(0, start, elements)
# just copy the grad partition to the buffer
if not accumulate:
dest_tensor.copy_(src_tensor)
# if source and destination are on same device,
# add to the provided buffer
elif src_tensor.device == dest_tensor.device:
dest_tensor.add_(src_tensor)
# if source and destination are on different device, copy first to src
# then add and move back to the destination. This seems to run faster
# when src is gpu and dest is cpu
# adding directly to cpu is very slow
else:
acc_tensor = torch.empty(src_tensor.numel(), dtype=param.dtype, device=param.device)
acc_tensor.copy_(dest_tensor)
acc_tensor.add_(src_tensor)
dest_tensor.copy_(acc_tensor)
# partition_buffer.view(-1).narrow(
# 0,
# 0,
# elements).copy_(param.grad.view(-1).narrow(0,
# start,
# elements))
#print("after partition gradients")
param.grad.data = dest_tensor_full_buffer.data
see_memory_usage("After partitioning gradients", force=False)
def get_partition_dp_group(self, param):
return param.ds_process_group
def get_partition_rank(self):
"""subclass can overload to specify different relative rank in
parameter partition group"""
return self.rank
@property
def num_partitions(self):
return self.dp_world_size
def get_dp_process_group(self):
""" Return the communication group with all data-parallel ranks """
return self.ds_process_group
class GatheredParameters:
def __init__(self, params, modifier_rank=None, fwd_module=None, enabled=True):
"""A context that collects parameters that were partitioned via a
:class:`deepspeed.zero.Init` context. The parameters are partitioned
again upon exit.
Args:
params (``torch.nn.Parameter``): A single parameter, or an iterable of parameters (list, tuple, generator) of parameters to collect.
It's assumed that all parameters are zero params.
modifier_rank (int, optional): If specified, this rank's parameter will be
broadcasted on exit from the context. This argument is required if ``params`` are
modified, so that all processes have a consistent view of the data. Defaults
to ``None``.
fwd_module (``torch.nn.Module``, optional): If specified, ``params`` will be
registered as external parameters of ``fwd_module``. See :meth:`deepspeed.zero.register_external_parameter`.
enabled (bool, optional): If ``False``, this context is a no-op. Defaults to ``True``.
Important: Make sure to use ``modifier_rank`` that is not ``None`` (e.g., ``modifier_rank=0``)
if you need the GPU memory allocated by gather to be released upon exit from the context manager.
Important: if ``params`` isn't an iterable of parameters or a single parameter it'll be silently ignored!
Examples
========
#. Allocate a partitioned module, initialize its weight on rank 0, and update all
processes.
.. code-block:: python
with deepspeed.zero.Init():
linear = torch.nn.Linear(1000,1000)
with deepspeed.zero.GatheredParameters(linear.weight,
modifier_rank=0):
if deepspeed.comm.get_rank() == 0:
linear.weight.zero_()
with deepspeed.zero.GatheredParameters(linear.weight,
modifier_rank=0):
if deepspeed.comm.get_rank() == 0:
linear.weight.zero_()
#. Collect a partitioned weight to pass to another module during
training. The parameter will be registered as an external parameter
and made available during the backward pass.
.. code-block:: python
:emphasize-lines: 6
def forward(self, input):
x = self.layer1(input)
# self.layer1.weight is required by self.layer2.forward
with deepspeed.zero.GatheredParameters(self.layer1.weight,
fwd_module=self):
y = self.layer2(x, self.layer1.weight)
return y
#. Pretrained model loading
.. code-block:: python
with deepspeed.zero.Init():
model = MyModel()
state_dict = torch.load(model_path, map_location="cpu")
def load(module: nn.Module, prefix=""):
# because zero3 puts placeholders in model params, this context
# manager gathers (unpartitions) the params of the current layer, then loads from
# the state dict and then re-partitions them again
with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
if deepspeed.comm.get_rank() == 0:
module._load_from_state_dict(state_dict, prefix)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
load(model, prefix="")
If this approach is not used, then the full model will first be copied to each GPU. For models
bigger than the memory of a single GPU, this method is required.
"""
self.enabled = enabled
if not enabled:
return
if isinstance(params, Iterable) and not isinstance(params, torch.Tensor):
# deal with generators like model.parameters()
# must convert to list to be able to iterate more than once if we get a generator
params = list(params)
else:
# single param
params = [params]
# enable if at least one is zero-param, otherwise a noop
if not any(is_zero_param(p) for p in params):
self.enabled = False
return
self.params = [p for p in params if hasattr(p, "ds_id")]
self.params = sorted(
set(self.params), key=lambda x: x.ds_id
) # remove the duplicates to prevent racing condition, we must also make sure the order is the same on all ranks otherwise we'll get deadlocks
self.src_rank = None
if modifier_rank is not None:
if self.params[0].ds_process_group == dist.get_world_group():
self.src_rank = modifier_rank
else:
# A group was specified; convert DP rank to global rank
self.src_rank = dist.get_global_rank(self.params[0].ds_process_group, modifier_rank)
self.fwd_module = fwd_module
if self.fwd_module is not None:
# is a no-op if already registered
for p in self.params:
register_external_parameter(self.fwd_module, p)
def __enter__(self):
if not self.enabled:
return
self.params[0].all_gather(param_list=self.params)
def __exit__(self, *exc):
if not self.enabled:
return
if self.src_rank is None:
self.params[0].partition(param_list=self.params, has_been_updated=False)
return
handles = [dist.broadcast(p, self.src_rank, group=p.ds_process_group, async_op=True) for p in self.params]
for h in handles:
h.wait()
self.params[0].partition(param_list=self.params, has_been_updated=True)
| 87,015 | 44.062662 | 182 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/zero/offload_config.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from pydantic import Field, validator
from enum import Enum
from pathlib import Path
from deepspeed.runtime.config_utils import DeepSpeedConfigModel, pp_int
class OffloadDeviceEnum(str, Enum):
""" Enum for valid offload devices """
none = "none"
cpu = "cpu"
nvme = "nvme"
class DeepSpeedZeroOffloadParamConfig(DeepSpeedConfigModel):
""" Set options for parameter offload. Valid only with stage 3. """
device: OffloadDeviceEnum = "none"
"""
Device memory to offload model parameters. Supported options are `cpu` and
`nvme`.
"""
nvme_path: Path = None
""" Filesystem path for NVMe device for parameter offloading. """
buffer_count: int = Field(5, ge=0)
""" Number of buffers in buffer pool for parameter offloading to NVMe. """
buffer_size: int = Field(pp_int(1e8), ge=0)
""" Size of buffers in buffer pool for parameter offloading to NVMe. """
max_in_cpu: int = Field(pp_int(1e9), ge=0)
"""
Number of parameter elements to maintain in CPU memory when offloading to
NVMe is enabled.
"""
pin_memory: bool = False
"""
Offload to page-locked CPU memory. This could boost throughput at the cost
of extra memory overhead.
"""
class DeepSpeedZeroOffloadOptimizerConfig(DeepSpeedConfigModel):
""" Set options for optimizer offload. Valid with stage 1, 2, and 3. """
device: OffloadDeviceEnum = "none"
"""
Device memory to offload optimizer state. Supported options are `cpu` and
`nvme`. Optimizer computation is offload to CPU regardless of device option.
"""
nvme_path: Path = None
""" Filesystem path for NVMe device for optimizer state offloading. """
buffer_count: int = Field(4, ge=0)
"""
Number of buffers in buffer pool for optimizer state offloading to NVMe.
This should be at least the number of states maintained per parameter by
the optimizer. For example, Adam optimizer has 4 states (parameter,
gradient, momentum, and variance).
"""
pin_memory: bool = False
"""
Offload to page-locked CPU memory. This could boost throughput at the cost
of extra memory overhead.
"""
pipeline_read: bool = False
"""
For tile-based optimizer step processing, overlap read of next tile with
computation of current tile. Used in ZeRO-Infinity.
"""
pipeline_write: bool = False
"""
For tile-based optimizer step processing, overlap write of previous tile
with computation of current tile.
"""
fast_init: bool = False
""" Enable fast optimizer initialization when offloading to NVMe. """
@validator("pipeline_read", "pipeline_write", always=True)
def set_pipeline(cls, field_value, values):
values["pipeline"] = field_value or values.get("pipeline", False)
return field_value
| 2,931 | 29.863158 | 80 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/zero/stage3.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import sys
import gc
import collections
from typing import Deque, Dict, Tuple
from deepspeed import comm as dist
from deepspeed.utils import groups
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from deepspeed.runtime import ZeROOptimizer
from deepspeed.utils import logger
from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler
from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced, all_to_all_quant_reduce
from deepspeed.runtime.utils import inf, get_global_norm, is_model_parallel_parameter
from deepspeed.runtime.zero.partition_parameters import *
from deepspeed.runtime.zero.config import ZeroStageEnum
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.zero.parameter_offload import DeepSpeedZeRoOffload
from deepspeed.ops.adam import DeepSpeedCPUAdam
from deepspeed.runtime.swap_tensor.partitioned_param_swapper import PartitionedParamStatus
from deepspeed.runtime.swap_tensor.partitioned_optimizer_swapper import PartitionedOptimizerSwapper
from deepspeed.runtime.swap_tensor.pipelined_optimizer_swapper import PipelinedOptimizerSwapper
from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, FP32_FLAT_GROUPS, PARTITION_COUNT, ZERO_STAGE
from deepspeed.accelerator import get_accelerator
# Toggle this to true to enable correctness test
# with gradient partitioning and without
pg_correctness_test = False
def print_rank_0(message, debug=False, force=False):
rank = dist.get_rank()
if rank == 0 and (debug or force):
logger.info(message)
# other variations
# - print for all ranks w/o interleaving
# printflock(f"[{rank}] {message}")
# - print to log file per rank
# log_rank_file(rank, message)
def input(msg):
return
def isclose(a, b, rtol=1e-09, atol=0.0):
return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol)
def lcm(x, y):
from fractions import gcd # or can import gcd from `math` in Python 3
return x * y // gcd(x, y)
def move_to_cpu(tensor_list):
for tensor in tensor_list:
tensor.data = tensor.data.cpu()
INITIAL_MICRO_STEP_ID = -1
class DeepSpeedZeroOptimizer_Stage3(ZeROOptimizer):
"""
DeepSpeedZeroOptimizer designed to reduce the memory footprint
required for training large deep learning models.
For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models
https://arxiv.org/abs/1910.02054
For usage examples, refer to TODO: DeepSpeed Tutorial
"""
def __init__(self,
module,
init_optimizer,
timers,
ds_config,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True,
contiguous_gradients=True,
reduce_bucket_size=500000000,
prefetch_bucket_size=50000000,
max_reuse_distance=1000000000,
max_live_parameters=1000000000,
param_persistence_threshold=100000,
model_persistence_threshold=sys.maxsize,
dp_process_group=None,
reduce_scatter=True,
overlap_comm=False,
offload_optimizer_config=None,
offload_param_config=None,
sub_group_size=1000000000000,
mpu=None,
clip_grad=0.0,
communication_data_type=torch.float16,
postscale_gradients=True,
gradient_predivide_factor=1.0,
gradient_accumulation_steps=1,
elastic_checkpoint=False,
aio_config=None,
all2all_process_group=None,
zero_hpz_partition_size=1,
zero_quantized_weights=False):
see_memory_usage("Stage 3 initialize beginning", force=True)
print_rank_0(f"initialized {__class__.__name__} with args: {locals()}", force=False)
if dist.get_rank() == 0:
logger.info(f"Reduce bucket size {reduce_bucket_size}")
logger.info(f"Prefetch bucket size {prefetch_bucket_size}")
# The fused optimizer does all the work. We need this layer for two reason:
# 1. maintain same user API from apex.fp16_utils
# 2. keep common stuff here in case we need to add ne552w fused optimizer later
# differences from apex.fp16_utils:
# - assume all model params in fp16
# - assume all params requires grad
# - flat by groups, not keeping state. TODO: remove state explicitly?
# - master grad and unflat master weight never exist. TODO: a way to save out unflat master?
if not get_accelerator().is_available():
raise SystemError("Cannot use fp16 without accelerator.")
self.optimizer = init_optimizer
# Use torch (un)flatten ops
self.flatten = _flatten_dense_tensors
self.unflatten = _unflatten_dense_tensors
self.dtype = self.optimizer.param_groups[0]['params'][0].dtype
self._global_grad_norm = 0.
self.custom_loss_scaler = False
self.external_loss_scale = None
self.optimizer_swapper = None
self.swap_optimizer = False
self.offload_optimizer = False
self.offload_optimizer_pin_memory = False
self.offload_optimizer_fast_init = False
self.offload_param = False
self.offload_param_pin_memory = False
self.params_in_nvme_and_cpu = False
self.max_params_in_cpu = 0
#num of ranks in a ZeRO param partitioning group
self.zero_hpz_partition_size = zero_hpz_partition_size
zpg = groups._get_zero_param_intra_parallel_group()
print_rank_0(f"ZeRO Stage 3 param partitioning group {self.zero_hpz_partition_size} {zpg}", force=False)
if self.zero_hpz_partition_size > 1 and zpg is None:
self._set_zero_group_parallelism()
zpg = groups._get_zero_param_intra_parallel_group()
self.parameter_offload = self.initialize_ds_offload(module=module,
timers=timers,
ds_config=ds_config,
overlap_comm=overlap_comm,
prefetch_bucket_size=prefetch_bucket_size,
max_reuse_distance=max_reuse_distance,
max_live_parameters=max_live_parameters,
param_persistence_threshold=param_persistence_threshold,
model_persistence_threshold=model_persistence_threshold,
offload_param_config=offload_param_config,
mpu=mpu,
zpg=zpg,
zero_quantized_weights=zero_quantized_weights)
self.persistent_parameters = self.parameter_offload.persistent_parameters
self._configure_offloading(offload_optimizer_config, offload_param_config)
self.module = module
self.elastic_checkpoint = elastic_checkpoint
self.inf_or_nan_tracker: Tensor = torch.zeros(1,
dtype=torch.bool,
device=get_accelerator().current_device_name(),
requires_grad=False)
self.deepspeed_adam_offload = (self.offload_optimizer and type(init_optimizer) == DeepSpeedCPUAdam)
self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu
### streams used for overlapping computation with communication
self.reduce_and_partition_stream = get_accelerator().Stream() if overlap_comm else get_accelerator(
).default_stream()
############################################################################
self.n_caching_allocator_flushes = 0
#-------------Stage 3 Setup-------------------#
self.timers = timers
self.all2all_process_group = all2all_process_group
self.reduce_scatter = reduce_scatter
self.dp_process_group = dp_process_group
self.all2all_process_group = all2all_process_group
self.partition_count = dist.get_world_size(group=self.dp_process_group)
if mpu is None:
self.model_parallel_group = None
self.model_parallel_rank = 0
else:
self.model_parallel_group = mpu.get_model_parallel_group()
self.model_parallel_rank = mpu.get_model_parallel_rank()
self.overflow = False
self.clip_grad = clip_grad
self.communication_data_type = communication_data_type
self.gradient_predivide_factor = gradient_predivide_factor
self.postscale_gradients = postscale_gradients
self.gradient_accumulation_steps = gradient_accumulation_steps
self.micro_step_id = 0
self.reduce_bucket_size = int(reduce_bucket_size)
if self.all2all_process_group is not None:
assert self.all2all_process_group is not None and self.reduce_scatter == True, "when enable all_to_all_reduce, reduce_scatter should also be enabled for data type checks."
if self.reduce_scatter:
valid_reduce_scatter_dtypes = (torch.float16, torch.bfloat16, torch.float32)
assert self.communication_data_type in valid_reduce_scatter_dtypes, f"ZeRO-3 supports {valid_reduce_scatter_dtypes} communication_data_type with reduce scatter enabled. Got: '{self.communication_data_type}'"
assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with ZeRO-3 with reduce scatter enabled"
assert self.postscale_gradients, "pre-scale gradients is not yet supported with ZeRO-3 with reduce scatter enabled"
# Holds the mode parameter
# The param.data may not hold any meaningful data
# when param's status is NOT_AVAILABLE or IN_FLGHT
self.fp16_groups = []
# Hold partitioned parameters
self.fp16_partitioned_groups = []
# Holds a fused and flattened copy of the parameters
self.fp16_partitioned_groups_flat = []
self.fp16_partitioned_groups_flat_numel = []
#defragmented pinned memory
self.param_groups_fp16_flat_cpu_memory = []
#a single 32-bit partition of the parallel partitioned parameters
#that this process will update
self.fp32_partitioned_groups_flat = []
self.next_swappable_fp32_partitioned_groups = []
# number of elements per partition in each group
self.partition_size = []
self.all_reduce_print = False
self.prefetch_elements = int(prefetch_bucket_size)
self.contiguous_gradients = contiguous_gradients
# padding on each partition for alignment purposes
self.groups_padding = []
self.sub_group_size = sub_group_size
self.sub_group_to_group_id = {}
# Trainable parameters
self.trainable_param_groups = self._get_trainable_parameter_groups()
see_memory_usage("Before creating fp16 partitions", force=True)
self._create_fp16_partitions_with_defragmentation(self.trainable_param_groups)
num_fp16_subgroups = len(self.fp16_partitioned_groups_flat)
see_memory_usage(f"After creating fp16 partitions: {num_fp16_subgroups}", force=True)
# Optimizer tensor swapping
if self.swap_optimizer:
self._configure_tensor_swapping(offload_optimizer_config, aio_config)
self.is_gradient_accumulation_boundary: bool = True
self.param_reduce_events: Deque[get_accelerator().Event] = collections.deque()
# TODO. make this configurable via JSON
self.max_param_reduce_events: int = 2
self.param_dict = {}
# map between param_id and bool to specify if a param is in this partition
self.is_param_in_current_partition = {}
self.extra_large_param_to_reduce = None
self.grads_in_ipg_bucket = []
self.params_in_ipg_bucket = []
self.is_gradient_accumulation_boundary = True
self._release_ipg_buffers()
self.previous_reduced_grads = None
# simplified param id
self.param_id = {}
count = 0
for i, params_group in enumerate(self.fp16_groups):
for param in params_group:
unique_id = id(param)
self.param_id[unique_id] = count
self.param_dict[count] = param
count = count + 1
#Largest partitioned param
largest_partitioned_param_numel = max([
max([max(tensor.numel(), tensor.ds_numel) for tensor in fp16_partitioned_group])
for fp16_partitioned_group in self.fp16_partitioned_groups
])
print_rank_0(f'Largest partitioned param numel = {largest_partitioned_param_numel}', force=False)
self._setup_for_real_optimizer()
self.grad_position = {}
self.set_grad_positions()
if self.offload_optimizer:
self.norm_for_param_grads = {}
# stores if a partition has been reduced in this step
self.is_partition_reduced = {}
# stores if a grad in a partition has been computed or not
self.is_grad_computed = {}
# will store the averaged gradients required by this partition
self.averaged_gradients = {}
#creates backward hooks for gradient partitioning
###Calls all gather param
self.create_reduce_and_remove_grad_hooks()
#exit(0)
# we may have a way of fusing dynamic scale. Do not support for now
self.loss_scaler = CreateLossScaler(dtype=self.dtype,
static_loss_scale=static_loss_scale,
dynamic_scaling=dynamic_loss_scale,
dynamic_loss_args=dynamic_loss_args)
self.dynamic_loss_scale = self.loss_scaler.dynamic
self.debug_fp16_grads = [{} for _ in self.fp16_groups]
self._link_all_hp_params()
if dist.get_rank(group=self.dp_process_group) == 0:
see_memory_usage(f"After initializing ZeRO optimizer", force=True)
def destroy(self):
self.parameter_offload.destroy()
del self.__ipg_bucket_flat_buffer
def initialize_ds_offload(
self,
module,
timers,
ds_config,
overlap_comm,
prefetch_bucket_size,
max_reuse_distance,
max_live_parameters,
param_persistence_threshold,
model_persistence_threshold,
offload_param_config,
mpu,
zpg,
zero_quantized_weights,
):
return DeepSpeedZeRoOffload(module=module,
timers=timers,
ds_config=ds_config,
overlap_comm=overlap_comm,
prefetch_bucket_size=prefetch_bucket_size,
max_reuse_distance=max_reuse_distance,
max_live_parameters=max_live_parameters,
param_persistence_threshold=param_persistence_threshold,
model_persistence_threshold=model_persistence_threshold,
offload_param_config=offload_param_config,
mpu=mpu,
zero_param_parallel_group=zpg,
zero_quantized_weights=zero_quantized_weights)
def _get_trainable_parameter_groups(self):
param_groups = []
for param_group in self.optimizer.param_groups:
trainable_params = {"params": [p for p in param_group["params"] if p.requires_grad]}
param_groups.append(trainable_params)
return param_groups
def _set_zero_group_parallelism(self):
groups._create_zero_param_parallel_group(self.zero_hpz_partition_size)
def invalidate_secondary_tensor(self):
for fpg in self.fp16_groups:
for param in fpg:
if param.ds_secondary_tensor is not None:
param.ds_secondary_tensor = None
def _setup_for_real_optimizer(self):
see_memory_usage("Before creating fp32 partitions", force=True)
self._create_fp32_partitions()
see_memory_usage("After creating fp32 partitions", force=True)
dist.barrier()
# To support pipelined optimizer swapping
self._create_next_swappable_fp32_groups()
see_memory_usage("Before initializing optimizer states", force=True)
self.initialize_optimizer_states()
see_memory_usage("After initializing optimizer states", force=True)
dist.barrier()
if dist.get_rank() == 0:
logger.info(f"optimizer state initialized")
# IPG
if self.contiguous_gradients:
self.__ipg_bucket_flat_buffer: Tensor = torch.empty(self.reduce_bucket_size,
dtype=self.dtype,
device=get_accelerator().current_device_name())
self.grad_partitions_flat_buffer = None
self.__param_id_to_grad_partition: Dict[int, Tensor] = {}
all_params = list(itertools.chain.from_iterable(self.fp16_groups))
self.grad_partitions_flat_buffer: Tensor = torch.zeros(sum(p.partition_numel() for p in all_params),
dtype=self.dtype,
device=self.device)
if self.offload_optimizer_pin_memory:
self.grad_partitions_flat_buffer = get_accelerator().pin_memory(self.grad_partitions_flat_buffer)
offset = 0
for param in all_params:
self.__param_id_to_grad_partition[param.ds_id] = self.grad_partitions_flat_buffer.narrow(
0, offset, param.partition_numel())
offset += param.partition_numel()
def _link_all_hp_params(self):
for p in self.module.parameters():
p._z3_optimizer = self
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]["lr"]
# TODO. factor out to a utility outside of stage3
@staticmethod
def defragment(tensors: List[Tensor]) -> Tensor:
"""move provided tensors into a contiguous flat buffer, with some additional
measures taken to reduce memory fragmentation"""
assert len(set(t.dtype for t in tensors)) == 1
assert len(set(t.device for t in tensors)) == 1
cpu_buffer = torch.empty(sum(p.numel() for p in tensors),
dtype=get_only_unique_item(t.dtype for t in tensors),
device="cpu")
tensor_infos: List[Tuple[Tensor, int, int]] = []
orig_device = get_only_unique_item(t.device for t in tensors)
offset = 0
for tensor in tensors:
tensor_numel = tensor.numel()
# move the tensor from device memory to host memory
cpu_buffer.narrow(0, offset, tensor_numel).copy_(tensor)
tensor.data = torch.empty(0, dtype=tensor.dtype, device=tensor.device)
# record some data so we can restore the device tensor later
tensor_infos.append((tensor, offset, tensor_numel))
offset += tensor_numel
gc.collect()
get_accelerator().empty_cache()
# copy tensors (now flattened and contiguous) back to GPU
device_buffer = cpu_buffer.to(orig_device)
# restore device tensors
for tensor, offset, tensor_numel in tensor_infos:
tensor.data = device_buffer.narrow(0, offset, tensor_numel)
return device_buffer
def _get_param_coordinator(self, training):
return self.parameter_offload.get_param_coordinator(training)
def _configure_offloading(self, offload_optimizer_config, offload_param_config):
###################### offload optimizer setup ##################################
if offload_optimizer_config is not None and offload_optimizer_config.device != OffloadDeviceEnum.none:
self.offload_optimizer = True
self.offload_optimizer_pin_memory = offload_optimizer_config.pin_memory
self.swap_optimizer = offload_optimizer_config.device == OffloadDeviceEnum.nvme
self.offload_optimizer_fast_init = offload_optimizer_config.fast_init
###################### offload param setup ##################################
if offload_param_config is not None and offload_param_config.device != OffloadDeviceEnum.none:
self.offload_param = True
self.offload_param_pin_memory = offload_param_config.pin_memory
self.params_in_nvme_and_cpu = offload_param_config.device == OffloadDeviceEnum.nvme
self.max_params_in_cpu = offload_param_config.max_in_cpu
print_rank_0(
f"FP16 params swapping is {self.params_in_nvme_and_cpu}, Max params in CPU is {self.max_params_in_cpu}",
force=False)
def _configure_tensor_swapping(self, offload_optimizer_config, aio_config):
nvme_swap_folder = os.path.join(offload_optimizer_config.nvme_path, 'zero_stage_3')
os.makedirs(nvme_swap_folder, exist_ok=True)
if dist.get_rank() == 0:
logger.info(f'Tensor Swapping: Adding optimizer tensors')
swapper_type = PipelinedOptimizerSwapper if offload_optimizer_config.pipeline else PartitionedOptimizerSwapper
self.optimizer_swapper = swapper_type(swap_config=offload_optimizer_config,
aio_config=aio_config,
base_folder=nvme_swap_folder,
optimizer=self.optimizer,
largest_numel=max(self.fp16_partitioned_groups_flat_numel),
device=self.device,
dtype=torch.float32,
timers=self.timers)
@property
def elements_in_ipg_bucket(self):
return sum(p.ds_numel for p in self.params_in_ipg_bucket)
def _move_to_flat_buffer(self, param_list, flat_buffer, avoid_copy=False):
'''If flat buffer is None then the parameters in the param_list are
not copied to the flat buffer. This is because they exceed the number of max_params_in_cpu
Some of these parameters may already be in CPU in unflattened buffers
or they maybe in GPU, or they maybe in NVME. If they are in NVME, then
they will be marked as NOT_AVAILABLE, and will be moved to CPU when they are
needed during training.'''
if flat_buffer is None:
# this dst buffer is on NVMe, so skip this
return
start = 0
for param in param_list:
src = param.ds_tensor
dest = flat_buffer.narrow(0, start, src.ds_numel)
start = start + src.ds_numel
'''if the parameter was initialized in nvme then bring it to the destination buffer directly'''
if src.status == PartitionedParamStatus.NOT_AVAILABLE:
print_rank_0(
f"Swapping in {param.ds_id} with partition size {param.partition_numel()} permanently to CPU")
param.nvme_swapper.swap_into_buffer(param, dest)
src.data = dest.data
src.status = PartitionedParamStatus.AVAILABLE
else:
assert src.status == PartitionedParamStatus.AVAILABLE, "Partitioned Param must be available here"
if not avoid_copy:
dest.data.copy_(src.data)
src.data = dest.data
# Final location must be gpu/cpu in this case
param.ds_tensor.final_location = 'not-nvme'
def _create_param_groups_fp16_flat_cpu_memory(self):
aggregate_params_count = 0
for j, param_group in enumerate(self.trainable_param_groups):
params_in_group = sum([p.partition_numel() for p in param_group['params']])
flat_buffer_size = params_in_group
if self.params_in_nvme_and_cpu and \
aggregate_params_count + params_in_group > self.max_params_in_cpu:
flat_buffer_size = max(0, self.max_params_in_cpu - aggregate_params_count)
aggregate_params_count += params_in_group
if flat_buffer_size > 0:
print_rank_0(f"group {j} flat buffer size {flat_buffer_size}", force=False)
self.param_groups_fp16_flat_cpu_memory.append(get_accelerator().pin_memory(
torch.empty(int(flat_buffer_size), dtype=self.dtype)))
else:
print_rank_0(f"No flat buffer size. Param group size was {params_in_group}", force=False)
self.param_groups_fp16_flat_cpu_memory.append(torch.empty(1, dtype=self.dtype))
def _create_fp16_partitions_with_defragmentation(self, fp16_param_groups):
dist.barrier()
param_groups: List[List[Parameter]] = tuple(
self._create_fp16_sub_groups(param_group["params"]) for param_group in fp16_param_groups)
# bookkeeping related to param groups
for param_group_idx, param_group in enumerate(param_groups):
for sub_group in param_group:
sub_group_idx = len(self.fp16_groups)
# record sub group and partitions
self.fp16_groups.append(sub_group)
self.fp16_partitioned_groups.append([param.ds_tensor for param in sub_group])
# record sub group -> group mapping
self.sub_group_to_group_id[sub_group_idx] = param_group_idx
# record total elements of parameter partitions in sub group
self.fp16_partitioned_groups_flat_numel.append(sum(p.partition_numel() for p in sub_group))
# record padding required to align group to world size (only applies to last rank)
rank_requires_padding = dist.get_rank(
self.dp_process_group) == dist.get_world_size(self.dp_process_group) - 1
self.groups_padding.append([p.padding_size() if rank_requires_padding else 0 for p in sub_group])
# move parameters to flattened buffer
if not self.offload_param: # partitioned params remain in GPU during training
# move parameter partitions into a single contiguous flat buffer
parameter_partitions: List[Tensor] = []
for sub_group in self.fp16_groups:
for param in sub_group:
parameter_partitions.append(param.ds_tensor)
device_buffer = __class__.defragment(parameter_partitions)
# setup flat buffers per subgroup, these are each just sections of the
# contiguous flat buffer for all parameters that we created earlier
offset = 0
for sub_group in self.fp16_groups:
sub_group_numel = sum(param.partition_numel() for param in sub_group)
self.fp16_partitioned_groups_flat.append(device_buffer.narrow(0, offset, sub_group_numel))
offset += sub_group_numel
else: # partitioned params offloaded to CPU when not in use
# create a flat CPU memory allocation for each param group
self._create_param_groups_fp16_flat_cpu_memory()
for param_group_idx, param_group in enumerate(param_groups):
flat_offset = 0
for i, sub_group in enumerate(param_group):
total_elements = sum(p.partition_numel() for p in sub_group)
print_rank_0(f"Params in nvme and cpu {self.params_in_nvme_and_cpu}")
#Flat buffer may not be available for parameters that reside in NVME
if not self.params_in_nvme_and_cpu or flat_offset + total_elements <= self.param_groups_fp16_flat_cpu_memory[
param_group_idx].numel():
fp16_partitioned_group_flat = self.param_groups_fp16_flat_cpu_memory[param_group_idx].narrow(
0, flat_offset, total_elements)
print_rank_0(
f"Creating a flat buffer for subgroup {i} requiring {total_elements} elements, and cumulative CPU elements {flat_offset + total_elements}",
force=False)
elif self.params_in_nvme_and_cpu:
fp16_partitioned_group_flat = None
print_rank_0(f"No flat buffer for sub group {i} of {total_elements} elements", force=False)
else:
assert False, "Either params are in nvme, or they are in CPU memory. This code path should not be triggered. Please see you max_params_in_cpu and params_in_nvme configs"
self.fp16_partitioned_groups_flat.append(fp16_partitioned_group_flat)
flat_offset += total_elements
self._move_to_flat_buffer(sub_group,
fp16_partitioned_group_flat,
avoid_copy=not self.offload_param)
# if necessary, create a pinned memory buffer to be used for swapping out
# params to NVME after optimizer step
should_create_fp16_flat_reuse_buffer = any(flattened_partition_group is None
for flattened_partition_group in self.fp16_partitioned_groups_flat)
if should_create_fp16_flat_reuse_buffer:
max_partition_numel, largest_partition_numel = 0, None
for sub_group in self.fp16_groups:
total_elements = sum(t.partition_numel() for t in sub_group)
if total_elements > max_partition_numel:
largest_partition_numel = [t.ds_numel for t in sub_group]
max_partition_numel = total_elements
assert len(largest_partition_numel) > 0, f'Unexpected that largest partition is empty'
self.fp16_groups[0][0].nvme_swapper.reserve_partitioned_swap_space(largest_partition_numel)
def _swap_in_sub_group_to_flat_buffer(self, flat_buffer, sub_group_id):
offset = 0
elements_in_sub_group = sum([t.ds_numel for t in self.fp16_partitioned_groups[sub_group_id]])
assert (flat_buffer.numel() == elements_in_sub_group)
for param, partitioned_param in zip(self.fp16_groups[sub_group_id],
self.fp16_partitioned_groups[sub_group_id]):
dest = flat_buffer.narrow(0, offset, partitioned_param.ds_numel)
if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE:
print_rank_0(
f"Swapping in {param.ds_id} with elements {param.ds_numel} and partition {param.partition_numel()}"
)
param.nvme_swapper.swap_in([param], async_op=False)
dest.data.copy_(partitioned_param.data)
param.nvme_swapper.remove_partition_and_release_buffers([param])
print_rank_0(f"Swapping in {param.ds_id} done")
else:
dest.data.copy_(partitioned_param.data)
offset += partitioned_param.ds_numel
def _create_next_swappable_fp32_groups(self):
reverse_order_indices = [i for i in range(len(self.fp32_partitioned_groups_flat))]
reverse_order_indices.reverse()
next_group = None
for i in reverse_order_indices:
self.next_swappable_fp32_partitioned_groups.append(next_group)
if self._swappable_optimizer_subgroup(i):
next_group = self.fp32_partitioned_groups_flat[i]
self.next_swappable_fp32_partitioned_groups.reverse()
def _get_sub_group_partitions(self, sub_group_id):
sub_group_partitions = []
for param, partitioned_param in zip(self.fp16_groups[sub_group_id],
self.fp16_partitioned_groups[sub_group_id]):
if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE:
swap_path = param.nvme_swapper.get_path(param, True)
sub_group_partitions.append((partitioned_param, param.partition_numel(), swap_path))
else:
sub_group_partitions.append((partitioned_param, partitioned_param.ds_numel, None))
return sub_group_partitions
def _create_fp32_partitions(self):
cpu_memory_usage = 0
cpu_memory_sub_groups = 0
nvme_memory_usage = 0
num_swappable_partitions = 0
num_swap_from_nvme_partitions = 0
num_swap_from_cpu_partitions = 0
swap_from_nvme_memory_usage = 0
swap_from_cpu_memory_usage = 0
GIGA_BYTES = (1024**3)
swappable_fp32_tensors = []
swappable_fp16_src_tensors = []
nvme_fp16_partitions_info = []
nvme_fp16_num_elems = []
nvme_fp32_dest_tensors = []
fp32_element_size = torch.tensor([], dtype=torch.float32).element_size()
for i, tensor in enumerate(self.fp16_partitioned_groups_flat):
num_elements = self.fp16_partitioned_groups_flat_numel[i]
# a partition of the fp32 master weights that will be updated by this process
if self._swappable_optimizer_subgroup(i):
self.fp32_partitioned_groups_flat.append(torch.Tensor())
nvme_memory_usage += (fp32_element_size * num_elements)
num_swappable_partitions += 1
if self.params_in_nvme_and_cpu and tensor is None:
num_swap_from_nvme_partitions += 1
swap_from_nvme_memory_usage += (fp32_element_size * num_elements)
if self.offload_optimizer_fast_init:
sub_group_partitions = self._get_sub_group_partitions(i)
nvme_fp16_partitions_info.append(sub_group_partitions)
nvme_fp16_num_elems.append(num_elements)
nvme_fp32_dest_tensors.append(self.fp32_partitioned_groups_flat[i])
else:
unpinned_fp32_buffer = torch.empty(num_elements, device=self.device, dtype=torch.float)
self._swap_in_sub_group_to_flat_buffer(unpinned_fp32_buffer, i)
self.optimizer_swapper.initialize_parameters(parameters=[self.fp32_partitioned_groups_flat[i]],
src_tensors=[unpinned_fp32_buffer])
else:
num_swap_from_cpu_partitions += 1
swap_from_cpu_memory_usage += (fp32_element_size * num_elements)
swappable_fp32_tensors.append(self.fp32_partitioned_groups_flat[i])
swappable_fp16_src_tensors.append(self.fp16_partitioned_groups_flat[i])
else:
cpu_memory_usage += (fp32_element_size * num_elements)
cpu_memory_sub_groups += 1
if self.params_in_nvme_and_cpu and tensor is None:
unpinned_fp32_buffer = torch.empty(num_elements, device=self.device, dtype=torch.float)
self._swap_in_sub_group_to_flat_buffer(unpinned_fp32_buffer, i)
self.fp32_partitioned_groups_flat.append(unpinned_fp32_buffer)
else:
self.fp32_partitioned_groups_flat.append(self.fp16_partitioned_groups_flat[i].to(
self.device).clone().float().detach())
self.fp32_partitioned_groups_flat[i].requires_grad = True # keep this in case internal optimizer uses it
if len(swappable_fp32_tensors) > 0:
self.optimizer_swapper.initialize_parameters(parameters=swappable_fp32_tensors,
src_tensors=swappable_fp16_src_tensors)
if len(nvme_fp32_dest_tensors) > 0:
fp16_pinned_buffers = self.fp16_groups[0][0].nvme_swapper.reserve_available_buffers()
assert len(fp16_pinned_buffers) > 0
self.optimizer_swapper.initialize_from_swapped_fp16_params(fp16_partitions_info=nvme_fp16_partitions_info,
fp16_num_elems=nvme_fp16_num_elems,
fp16_pinned_buffers=fp16_pinned_buffers,
fp32_parameters=nvme_fp32_dest_tensors)
self.fp16_groups[0][0].nvme_swapper.release_reserved_buffers()
nvme_gigabytes = nvme_memory_usage / GIGA_BYTES
print_rank_0(f'Swappable FP32 Partitions: count={num_swappable_partitions} size={nvme_gigabytes:5.2f} GB',
force=False)
if self.params_in_nvme_and_cpu:
print_rank_0(
f'Swap from NVMe Partitions: count = {num_swap_from_nvme_partitions}, size = {swap_from_nvme_memory_usage/GIGA_BYTES:5.2f}GB',
force=False)
print_rank_0(
f'Swap from CPU Partitions: count = {num_swap_from_cpu_partitions}, size = {swap_from_cpu_memory_usage/GIGA_BYTES:5.2f}GB',
force=False)
cpu_memory_gigabytes = cpu_memory_usage / GIGA_BYTES
print_rank_0(f'In-Memory FP32 Partitions: count={cpu_memory_sub_groups} size={cpu_memory_gigabytes:5.2f} GB',
force=False)
# Clear for on-the-fly population before the optimizer step
for param_group in self.optimizer.param_groups:
param_group['params'] = []
def _create_fp16_sub_groups(self, params_group):
params_group_numel = sum([param.partition_numel() for param in params_group])
sub_group_size = self.sub_group_size
if sub_group_size is None or sub_group_size >= params_group_numel:
return [params_group]
sub_groups = []
sub_group = []
local_sub_group_size = 0
for param in params_group:
sub_group.append(param)
local_sub_group_size += param.partition_numel()
if local_sub_group_size >= sub_group_size or id(param) == id(params_group[-1]):
sub_groups.append(sub_group)
sub_group = []
local_sub_group_size = 0
return sub_groups
def _release_ipg_buffers(self):
if self.contiguous_gradients:
self.ipg_buffer = None
def _optimizer_step(self, sub_group_id):
param_group_id = self.sub_group_to_group_id[sub_group_id]
fp32_param = self.fp32_partitioned_groups_flat[sub_group_id]
self.optimizer.param_groups[param_group_id]['params'] = [fp32_param]
self.optimizer.step()
self.optimizer.param_groups[param_group_id]['params'] = []
def _swappable_optimizer_subgroup(self, sub_group_id):
if not self.swap_optimizer:
return False
return self.optimizer_swapper.swappable_tensor(None,
numel=self.fp16_partitioned_groups_flat_numel[sub_group_id])
def _partitioned_params_swap_out(self, i):
offset = 0
fp32_param = self.fp32_partitioned_groups_flat[i]
assert fp32_param is not None, \
f'fp32 parameters of sub_group {i} is None'
swap_fp16_params = []
swap_fp32_params = []
for param, partitioned_param in zip(self.fp16_groups[i], self.fp16_partitioned_groups[i]):
src = fp32_param.narrow(0, offset, partitioned_param.ds_numel)
if partitioned_param.status == PartitionedParamStatus.AVAILABLE:
partitioned_param.data.copy_(src.data)
else:
swap_fp32_params.append(src)
swap_fp16_params.append(param)
offset += partitioned_param.ds_numel
if len(swap_fp16_params):
swap_fp16_params[0].nvme_swapper.swap_out_partitioned_params(dst_fp16_params=swap_fp16_params,
src_fp32_params=swap_fp32_params)
def initialize_optimizer_states(self):
num_subgroups = len(self.fp16_groups)
largest_numel = max([sum([p.ds_numel for p in psg]) for psg in self.fp16_partitioned_groups])
gradient_dtype = self.fp32_partitioned_groups_flat[0].dtype
gradient_buffer = torch.zeros(int(largest_numel), dtype=gradient_dtype, device=self.device)
timer_names = set()
# State initialization for the Adagrad optimizer occurs at construction as opposed to other optimizers
# which do lazy initialization of the state at the first call to step.
is_adagrad = isinstance(self.optimizer, torch.optim.Adagrad)
if self.swap_optimizer:
self.optimizer_swapper.init_timers()
INIT_OPTIMIZER_TIMER = 'init_optimizer_state'
timer_names.add(INIT_OPTIMIZER_TIMER)
self.start_timers([INIT_OPTIMIZER_TIMER])
for i, group in enumerate(self.fp16_groups):
swappable_optimizer_subgroup = self._swappable_optimizer_subgroup(i)
swappable_param_subgroup = self.fp16_partitioned_groups_flat[i] is None
num_elements = int(self.fp16_partitioned_groups_flat_numel[i])
see_memory_usage(
f'[Begin] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}',
force=False)
if swappable_optimizer_subgroup:
self._optimizer_states_and_gradient_swap_in(i, timer_names)
if self.offload_optimizer and not swappable_optimizer_subgroup:
subgroup_gradient_buffer = torch.zeros(num_elements, dtype=gradient_dtype, device=self.device)
if self.offload_optimizer_pin_memory:
subgroup_gradient_buffer = get_accelerator().pin_memory(subgroup_gradient_buffer)
self.fp32_partitioned_groups_flat[i].grad = subgroup_gradient_buffer
else:
self.fp32_partitioned_groups_flat[i].grad = gradient_buffer.narrow(0, 0, num_elements)
# Initialize the optimizer states with the flattened fp32 partition.
if not is_adagrad:
self._optimizer_step(i)
if swappable_param_subgroup:
self._partitioned_params_swap_out(i)
if swappable_optimizer_subgroup:
self._optimizer_states_and_gradient_swap_out(i, timer_names)
see_memory_usage(
f'[End] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}',
force=False)
# Initialize the optimizer states with the flattened fp32 partition.
if is_adagrad:
self.optimizer = torch.optim.Adagrad(self.fp32_partitioned_groups_flat, **self.optimizer.defaults)
self.stop_timers([INIT_OPTIMIZER_TIMER])
self.log_timers(timer_names)
if self.swap_optimizer:
self.optimizer_swapper.log_timers()
if not self.offload_optimizer:
for group in self.fp32_partitioned_groups_flat:
group.grad = None
# Reset steps
return
#########################################################################
#########################ZeRO Partition Gradients########################
#########################################################################
def get_first_param_index(self, group_id, param_group, partition_id):
for index, param in enumerate(param_group):
param_id = self.get_param_id(param)
if partition_id in self.param_to_partition_ids[group_id][param_id]:
return index
return None
def initialize_gradient_partitioning_data_structures(self):
total_partitions = dist.get_world_size(group=self.dp_process_group)
for i, param_group in enumerate(self.fp16_groups):
self.param_to_partition_ids[i] = {}
self.is_partition_reduced[i] = {}
self.total_grads_in_partition[i] = {}
self.remaining_grads_in_partition[i] = {}
self.is_grad_computed[i] = {}
self.grad_partition_insertion_offset[i] = {}
self.grad_start_offset[i] = {}
self.first_param_index_in_partition[i] = {}
for partition_id in range(total_partitions):
self.is_grad_computed[i][partition_id] = {}
self.grad_partition_insertion_offset[i][partition_id] = {}
self.grad_start_offset[i][partition_id] = {}
self.initialize_gradient_partition(i, param_group, partition_id)
self.is_partition_reduced[i][partition_id] = False
self.first_param_index_in_partition[i][partition_id] = self.get_first_param_index(
i, param_group, partition_id)
@instrument_w_nvtx
def independent_gradient_partition_epilogue(self):
self.report_ipg_memory_usage(f"In ipg_epilogue before reduce_ipg_grads", 0)
self.__reduce_and_partition_ipg_grads()
self.report_ipg_memory_usage(f"In ipg_epilogue after reduce_ipg_grads", 0)
self.reduce_and_partition_stream.synchronize()
#in case of cpu offload, averaged gradients are already in fp32_partitioned_groups_flat.grad
#TODO: use a similar code path for both cpu_offload and non-cpu offload
if not self.offload_optimizer:
for i, sub_group in enumerate(self.fp16_groups):
self.averaged_gradients[i] = [
self.__param_id_to_grad_partition[param.ds_id]
if param.requires_grad else torch.zeros_like(param.ds_tensor) for param in sub_group
]
# self.averaged_gradients[i] = self.get_flat_partition(
# self.fp16_groups[i],
# 0,
# self.fp32_partitioned_groups_flat[i].numel(),
# return_tensor_list=True)
# this method gets called after every backward. need to increment
# here because if it gets incremented in backward() the micro step
# id will be off by one when we do the reduce and partition at the.
# start of this method.
# TODO. make this less error prone
self.micro_step_id += 1
def overlapping_partition_gradients_reduce_epilogue(self):
self.independent_gradient_partition_epilogue()
def create_reduce_and_remove_grad_hooks(self):
print_rank_0(f'[Begin] Create gradient reduction hooks')
self.grad_accs = []
for i, param_group in enumerate(self.fp16_groups):
for param in param_group:
if param.requires_grad:
#print_rank_0(f" Before all gather {param.device}, {param.shape}")
print_rank_0(f"Before all gather {param.device}, {param.shape}", force=False)
# The hook must be created in un-partitioned parameter
param.all_gather()
#print(f"After all gather {param.device}, {param.shape}")
def wrapper(param, i):
param_tmp = param.expand_as(param)
grad_acc = param_tmp.grad_fn.next_functions[0][0]
@instrument_w_nvtx
def reduce_partition_and_remove_grads(*notneeded):
self.reduce_ready_partitions_and_remove_grads(param, i)
grad_acc.register_hook(reduce_partition_and_remove_grads)
self.grad_accs.append(grad_acc)
#print(f"param grad fn {param.expand_as(param).grad_fn}")
wrapper(param, i)
# Partition the parameter after creating the hook
param.partition()
print_rank_0(f'[End] Create gradient reduction hooks')
def get_param_id(self, param):
unique_id = id(param)
return self.param_id[unique_id]
def report_ipg_memory_usage(self, tag, param_elems):
elem_count = self.elements_in_ipg_bucket + param_elems
percent_of_bucket_size = (100.0 * elem_count) // self.reduce_bucket_size
see_memory_usage(
f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}",
force=False)
###############Independent Partition Gradient ########################
def reduce_independent_p_g_buckets_and_remove_grads(self, param, i):
#print_rank_0(f"Inside reduce ipg buckets. {debug_param2name_id_shape(param)}, ipg elements {self.elements_in_ipg_bucket}, reduce bucket size {self.reduce_bucket_size}", force=True)
# Because the ipg bucket is initialized with a random place holder tensor, we must
# explicitly check that the bucket has any real data in it (self.elements_in_ipg_bucket >
# 0). Otherwise if the incoming param.ds_numel is large, this branch may get triggered on a
# garbage data and `self.average_tensor()` will crash because its params_to_reduce will be
# empty, while reduction_list will have that garbage data.
if self.elements_in_ipg_bucket + param.ds_numel > self.reduce_bucket_size and self.elements_in_ipg_bucket > 0:
self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads", param.ds_numel)
self.__reduce_and_partition_ipg_grads()
self.__add_grad_to_ipg_bucket(param)
@instrument_w_nvtx
@torch.no_grad()
def __add_grad_to_ipg_bucket(self, param: Parameter) -> None:
self.reduce_and_partition_stream.wait_stream(get_accelerator().default_stream())
if self.contiguous_gradients and self.elements_in_ipg_bucket + param.grad.numel() < self.reduce_bucket_size:
# move the gradient to a contiguous buffer
with get_accelerator().stream(self.reduce_and_partition_stream):
# move the parameter's gradient to the contiguous flat buffer
new_grad_tensor = self.__ipg_bucket_flat_buffer.narrow(0, self.elements_in_ipg_bucket,
param.grad.numel()).view_as(param.grad)
new_grad_tensor.copy_(param.grad, non_blocking=True)
param.grad.record_stream(get_accelerator().current_stream())
param.grad.data = new_grad_tensor
self.params_in_ipg_bucket.append(param)
@instrument_w_nvtx
@torch.no_grad()
def __reduce_and_partition_ipg_grads(self, safe_mode: bool = False) -> None:
if not self.params_in_ipg_bucket:
return
for param in self.params_in_ipg_bucket:
if param.grad.numel() != param.ds_numel:
raise RuntimeError(f"{param.grad.numel()} != {param.ds_numel} Cannot reduce scatter "
f"gradients whose size is not same as the params")
assert len(set(p.ds_id for p in self.params_in_ipg_bucket)) == len(self.params_in_ipg_bucket)
while self.param_reduce_events and self.param_reduce_events[0].query():
self.param_reduce_events.popleft()
if len(self.param_reduce_events) > self.max_param_reduce_events:
self.param_reduce_events.popleft().synchronize()
with get_accelerator().stream(self.reduce_and_partition_stream):
if safe_mode:
assert_ints_same_as_other_ranks([p.ds_id for p in self.params_in_ipg_bucket])
if self.contiguous_gradients and self.elements_in_ipg_bucket <= self.reduce_bucket_size and not self.reduce_scatter:
grad_bucket = self.__ipg_bucket_flat_buffer.narrow(0, 0, self.elements_in_ipg_bucket)
grad_partitions = self.__avg_scatter_contiguous_grads(grad_bucket)
else:
self.params_in_ipg_bucket.sort(key=lambda p: p.ds_id)
grad_partitions = self.__avg_scatter_grads(self.params_in_ipg_bucket)
self.partition_grads(self.params_in_ipg_bucket, grad_partitions)
self.params_in_ipg_bucket.clear()
event = get_accelerator().Event()
event.record()
self.param_reduce_events.append(event)
@instrument_w_nvtx
def __avg_scatter_contiguous_grads(self, buffer_to_reduce: Tensor) -> List[Tensor]:
dtype = buffer_to_reduce.dtype
if self.communication_data_type == self.dtype:
buffer_to_reduce = buffer_to_reduce.to(self.communication_data_type)
if self.postscale_gradients and self.gradient_predivide_factor != 1.0:
buffer_to_reduce = buffer_to_reduce.div_(self.gradient_predivide_factor)
world_sz = dist.get_world_size(self.dp_process_group)
rank = dist.get_rank(self.dp_process_group)
buffer_to_reduce.div_(world_sz)
dist.all_reduce(buffer_to_reduce, group=self.dp_process_group)
if self.postscale_gradients and self.gradient_predivide_factor != world_sz:
buffer_to_reduce = buffer_to_reduce.mul(self.gradient_predivide_factor)
if self.communication_data_type != self.dtype:
buffer_to_reduce = buffer_to_reduce.to(self.dtype)
grad_partitions = []
grad_offset_in_buffer = 0
for param in self.params_in_ipg_bucket:
grad = param.grad
chunk_sz = math.ceil(grad.numel() / world_sz)
start_offset = grad_offset_in_buffer + min(rank * chunk_sz, grad.numel())
end_offset = grad_offset_in_buffer + min(rank * chunk_sz + chunk_sz, grad.numel())
partition = buffer_to_reduce[start_offset:end_offset]
if param.partition_numel() != partition.numel():
padded_partition = torch.zeros(param.partition_numel(), device=grad.device, dtype=grad.dtype)
if partition.numel() > 0:
padded_partition[:partition.numel()] = partition
grad_partitions.append(padded_partition)
else:
grad_partitions.append(partition)
grad_offset_in_buffer += grad.numel()
return grad_partitions
@instrument_w_nvtx
def __avg_scatter_grads(self, params_to_reduce: List[Parameter]) -> List[Tensor]:
"""average gradients and scatter partitions across ranks"""
full_grads_for_rank = [p.grad for p in params_to_reduce]
if self.communication_data_type != self.dtype:
full_grads_for_rank = [g.to(self.communication_data_type) for g in full_grads_for_rank]
if self.postscale_gradients and self.gradient_predivide_factor != 1.0:
full_grads_for_rank = [g.div(self.gradient_predivide_factor) for g in full_grads_for_rank]
local_world_size = get_accelerator().device_count()
global_world_size = dist.get_world_size()
num_nodes = global_world_size // local_world_size
if self.all2all_process_group is not None and num_nodes > 1:
grad_partitions_for_rank = all_to_all_quant_reduce(full_grads_for_rank, self.all2all_process_group)
else:
grad_partitions_for_rank = reduce_scatter_coalesced(full_grads_for_rank, self.dp_process_group)
if self.postscale_gradients and self.gradient_predivide_factor != 1.0 and self.gradient_predivide_factor != dist.get_world_size(
self.dp_process_group):
grad_partitions_for_rank = [g.mul(self.gradient_predivide_factor) for g in grad_partitions_for_rank]
if self.communication_data_type != self.dtype:
grad_partitions_for_rank = [g.to(self.dtype) for g in grad_partitions_for_rank]
return grad_partitions_for_rank
def set_grad_positions(self):
for i, group in enumerate(self.fp16_groups):
current_offset = 0
for param in group:
param_id = self.get_param_id(param)
num_elements = param.partition_numel()
self.grad_position[param_id] = [int(i), int(current_offset), int(num_elements)]
#print(f"param id {param_id} i:{i}, ds_tensor {num_elements} numel {param.numel()}")
current_offset += num_elements
see_memory_usage(f"After Set Grad positions", force=False)
def _constant_buffered_norm2(self, input, buffer_size=250000000):
norm = None
for part in input.view(-1).split(buffer_size):
if norm is None:
norm = part.data.double().norm(2)**2.0
else:
norm += part.data.double().norm(2)**2.0
return norm**0.5
def set_norm_for_param_grad_in_gpu(self, param):
param_id = self.get_param_id(param)
#self.norm_for_param_grads[param_id] = param.grad.data.double().norm(2)
#Using a more memory efficient version
self.norm_for_param_grads[param_id] = self._constant_buffered_norm2(param.grad)
def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param, fp32_grad_tensor):
with get_accelerator().stream(self.copy_grad_stream):
param_id = self.get_param_id(param)
src_tensor = param.grad.view(-1).float()
#print(f"src_tensor {src_tensor.size()} and fp32 grad {fp32_grad_tensor.size()}")
fp32_grad_tensor.copy_(src_tensor, non_blocking=True)
param.grad = None
def complete_grad_norm_calculation_for_cpu_offload(self, params):
total_norm = 0.0
norm_type = 2.0
for p in params:
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
param_id = self.get_param_id(p)
if param_id in self.norm_for_param_grads.keys():
param_norm = self.norm_for_param_grads[param_id]
total_norm += param_norm.item()**2
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM)
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
@instrument_w_nvtx
def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None:
offload_fp32_gradients = {}
offload_fp32_offsets = {}
buffers = []
for param, grad_partition in zip(params_to_release, grad_partitions):
contains_real_data = param.partition_numel() * dist.get_rank(self.dp_process_group) < param.ds_numel
if not contains_real_data:
# this grad partition is empty - don't need to do anything
param.grad = None
continue
# move or accumulate gradient partition to target buffer
grad_buffer = self.__param_id_to_grad_partition[param.ds_id].narrow(0, 0, grad_partition.numel())
buffers.append(grad_buffer)
if self.micro_step_id == 0: # don't accumulate
grad_buffer.copy_(grad_partition, non_blocking=True)
# ensure grad buffer is a CUDA buffer to speed up the next few
# operations and so it can be used asynchronously
grad_buffer = grad_buffer.to(grad_partition.device, non_blocking=True)
elif get_accelerator().on_accelerator(grad_buffer):
grad_buffer.add_(grad_partition)
else:
# if dst is CPU, copy first to src device, do the addition
# there, then move back to dst. adding directly to cpu is very slow
cuda_grad_buffer = grad_buffer.to(grad_partition.device, non_blocking=True)
cuda_grad_buffer.add_(grad_partition)
grad_buffer.copy_(cuda_grad_buffer, non_blocking=True)
# ensure grad buffer is a CUDA buffer to speed up the next few
# operations and so it can be used asynchronously
grad_buffer = cuda_grad_buffer
# offload the gradient partition if applicable
if self.offload_optimizer:
i, dest_offset, _ = self.grad_position[self.get_param_id(param)]
offload_fp32_gradients = {}
offload_fp32_offsets = {}
if self.is_gradient_accumulation_boundary:
self.norm_for_param_grads[self.get_param_id(param)] = self._constant_buffered_norm2(grad_buffer)
if self._swappable_optimizer_subgroup(i):
if not i in offload_fp32_gradients.keys():
offload_fp32_gradients[i] = []
offload_fp32_offsets[i] = []
offload_fp32_gradients[i].append(grad_buffer.float())
offload_fp32_offsets[i].append(dest_offset)
else:
fp32_grad_tensor = self.fp32_partitioned_groups_flat[i].grad.narrow(
0, dest_offset, grad_buffer.numel())
fp32_grad_tensor.copy_(grad_buffer)
# free the gradient
param.grad.record_stream(get_accelerator().current_stream())
param.grad = None
if self.offload_optimizer and self.swap_optimizer:
for i in offload_fp32_gradients.keys():
self.optimizer_swapper.swap_out_gradients(parameter=self.fp32_partitioned_groups_flat[i],
gradient_offsets=offload_fp32_offsets[i],
gradient_tensors=offload_fp32_gradients[i])
return buffers
def reduce_ready_partitions_and_remove_grads(self, param, i):
#print_rank_0(f"Backward {debug_param2name_id_shape(param)}", force=True)
self.reduce_independent_p_g_buckets_and_remove_grads(param, i)
def zero_reduced_gradients(self, partition_id, i):
def are_all_related_partitions_reduced(params_id):
for partition_id in self.param_to_partition_ids[i][params_id]:
if not self.is_partition_reduced[i][partition_id]:
return False
return True
for params_id in self.is_grad_computed[i][partition_id]:
if are_all_related_partitions_reduced(params_id):
self.param_dict[params_id].grad = None
def flatten_and_print(self, message, tensors, start=0, n=5):
flatten_tensor = self.flatten(tensors)
def print_func():
logger.info(flatten_tensor.contiguous().view(-1).narrow(0, start, n))
self.sequential_execution(print_func, message)
def get_grads_to_reduce(self, i, partition_id):
def get_reducible_portion(key):
grad = self.param_dict[key].grad
total_elements = grad.numel()
start = self.grad_start_offset[i][partition_id][key]
num_elements = min(total_elements - start,
self.partition_size[i] - self.grad_partition_insertion_offset[i][partition_id][key])
if not pg_correctness_test:
if num_elements == total_elements:
return grad
else:
return grad.contiguous().view(-1).narrow(0, int(start), int(num_elements))
else:
if num_elements == total_elements:
return grad.clone()
else:
return grad.clone().contiguous().view(-1).narrow(0, int(start), int(num_elements))
grads_to_reduce = []
for key in self.is_grad_computed[i][partition_id]:
grad = get_reducible_portion(key)
grads_to_reduce.append(grad)
return grads_to_reduce
def sequential_execution(self, function, message, group=None):
if group is None:
group = self.dp_process_group
if dist.get_rank(group=group) == 0:
logger.info(message)
for id in range(dist.get_world_size(group=group)):
if id == dist.get_rank(group=group):
function()
dist.barrier(group=group)
def set_none_gradients_to_zero(self, i, partition_id):
for param_id in self.is_grad_computed[i][partition_id]:
param = self.param_dict[param_id]
if param.grad is None:
param.grad = torch.zero_like(param)
######################Reduction Related Methods##############################
def allreduce_bucket(self, bucket, rank=None, log=None):
rank = None
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if pg_correctness_test:
communication_data_type = torch.float32
else:
communication_data_type = self.communication_data_type
if communication_data_type != tensor.dtype:
tensor_to_allreduce = tensor.to(communication_data_type)
tensor_to_allreduce.div_(dist.get_world_size(group=self.dp_process_group))
if rank is None:
# "All Reducing"
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
else:
global_rank = dist.get_global_rank(self.dp_process_group, rank)
dist.reduce(tensor_to_allreduce, global_rank, group=self.dp_process_group)
if communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce:
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
tensor.copy_(tensor_to_allreduce)
return tensor
# if rank is specified do a reduction instead of an allreduce
def allreduce_and_copy(self, small_bucket, rank=None, log=None):
with get_accelerator().stream(self.reduction_stream):
allreduced = self.allreduce_bucket(small_bucket, rank=rank, log=log)
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self, bucket, numel_per_bucket=500000000, rank=None, log=None):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket, rank=rank, log=None)
small_bucket = []
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket, rank=rank, log=log)
#############################################################################
#############################################################################
#############################################################################
# views the tensor as multiple partitions and returns
# those partitions
def get_data_parallel_partitions(self, tensor):
partitions = []
dp = dist.get_world_size(group=self.dp_process_group)
dp_id = dist.get_rank(group=self.dp_process_group)
total_num_elements = tensor.numel()
base_size = total_num_elements // dp
remaining = total_num_elements % dp
start = 0
for id in range(dp):
partition_size = base_size
if id < remaining:
partition_size = partition_size + 1
partitions.append(tensor.narrow(0, start, partition_size))
start = start + partition_size
return partitions
def get_partition_info(self, tensor_list, partition_size, partition_id):
params_in_partition = []
params_not_in_partition = []
start_index = partition_size * partition_id
end_index = partition_size * (partition_id + 1)
current_index = 0
first_offset = 0
for tensor in tensor_list:
tensor_size = tensor.numel()
if (current_index >= start_index and current_index < end_index):
params_in_partition.append(tensor)
elif start_index > current_index and start_index < (current_index + tensor_size):
params_in_partition.append(tensor)
assert (first_offset == 0
), "This can happen either zero or only once as this must be the first tensor in the partition"
first_offset = start_index - current_index
else:
params_not_in_partition.append(tensor)
current_index = current_index + tensor_size
return params_in_partition, params_not_in_partition, first_offset
@instrument_w_nvtx
def zero_grad(self, set_to_none=False):
"""
Zero FP16 parameter grads.
"""
self.micro_step_id = 0
# FP32 grad should never exist.
# For speed, set model fp16 grad to None by default
for group in self.fp16_groups:
for p in group:
if set_to_none:
if p.grad is not None and get_accelerator().on_accelerator(p.grad):
p.grad.record_stream(get_accelerator().current_stream())
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def _model_parallel_all_reduce(self, tensor, op):
""" Perform all reduce within model parallel group, if any.
"""
if self.model_parallel_group is None:
pass
else:
dist.all_reduce(tensor=tensor, op=op, group=self.model_parallel_group)
@instrument_w_nvtx
def get_grad_norm_direct(self, gradients, params, norm_type=2):
"""Clips gradient norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(g.data.abs().max() for g in gradients)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=self.dp_process_group)
# Take max across all GPUs.
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX)
total_norm = total_norm_cuda[0].item()
else:
# if dist.get_rank() == 0:
# logger.info(f"Total Norm beginning {total_norm}")
grad_norms = []
for g, p in zip(gradients, params):
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
grad_norms.append(g.to(get_accelerator().device_name(), non_blocking=True).double().norm(2))
# Sum across all model parallel GPUs.
if len(grad_norms) == 0:
# FIX https://github.com/microsoft/DeepSpeed/issues/3564
total_norm_cuda = torch.tensor(0,
dtype=gradients[0].dtype).to(get_accelerator().device_name()).double()
else:
total_norm_cuda = torch.sum(torch.pow(torch.stack(grad_norms), 2))
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM)
total_norm = total_norm_cuda.item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
# creates a flat fused tensor from the tensor list starting at the first_offset
# in the first tensor of the list. If there are not enough elements in the tensor
# list then the flat tensor will be padded with zeros
def get_flat_partition(self, tensor_list, first_offset, partition_size, return_tensor_list=False):
flat_tensor_list = []
current_size = 0
for i, tensor in enumerate(tensor_list):
if tensor.grad is None:
tensor.grad = torch.zeros_like(tensor)
tensor = tensor.grad
num_elements = tensor.numel()
tensor_offset = 0
# we need to offset to get to the right element
if i == 0 and first_offset > 0:
tensor_offset = first_offset
num_elements = num_elements - tensor_offset
# we dont need all elements of the tensor
if num_elements > (partition_size - current_size):
num_elements = partition_size - current_size
# we need a narrow view of the tensor based on the tensor offset and number of elements that
# we need from this tensor
if tensor_offset > 0 or num_elements < tensor.numel():
flat_tensor_list.append(tensor.contiguous().view(-1).narrow(0, int(tensor_offset), int(num_elements)))
else:
flat_tensor_list.append(tensor)
current_size = current_size + num_elements
# this means its the last partition and does not align with the dp boundary. We need to pad before flattening
if current_size < partition_size:
flat_tensor_list.append(
torch.zeros(int(partition_size - current_size),
dtype=tensor_list[0].dtype,
device=tensor_list[0].device))
if return_tensor_list:
return flat_tensor_list
return self.flatten(flat_tensor_list)
def free_grad_in_param_list(self, param_list):
for p in param_list:
p.grad = None
def reset_cpu_buffers(self):
self.norm_for_param_grads = {}
def log_timers(self, timer_names):
if self.timers is None:
return
self.timers.log(names=list(timer_names))
def start_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).start()
def stop_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).stop()
def _pre_step(self):
self.micro_step_id = 0
print_rank_0(f"Inside Step function")
see_memory_usage(f"In step before checking overflow", force=False)
print_rank_0("Finished Tracing at Beginning of Step")
self._get_param_coordinator(training=True).hierarchy = 0
print_rank_0("Finished Tracing at Beginning of Step")
@instrument_w_nvtx
def _get_norm_groups(self):
norm_groups = []
for i, group in enumerate(self.fp16_groups):
if self.offload_optimizer:
norm_groups.append(self.complete_grad_norm_calculation_for_cpu_offload(self.fp16_groups[i]))
else:
norm_groups.append(self.get_grad_norm_direct(self.averaged_gradients[i], self.fp16_groups[i]))
return norm_groups
@instrument_w_nvtx
def _prepare_fp32_grad_for_sub_group(self, sub_group_id):
partition_id = dist.get_rank(group=self.dp_process_group)
single_grad_partition = self.flatten(self.averaged_gradients[sub_group_id]).to(
self.fp32_partitioned_groups_flat[sub_group_id].dtype)
assert single_grad_partition.numel() == self.fp32_partitioned_groups_flat[sub_group_id].numel(), \
"averaged gradients have different number of elements that partition size {} {} {} {}".format(
single_grad_partition.numel(), self.fp32_partitioned_groups_flat[sub_group_id].numel(), sub_group_id, partition_id)
self.fp32_partitioned_groups_flat[sub_group_id].grad = single_grad_partition
# release all the gradient since we have already created a necessary copy in dp_grad_partition
self.zero_grad(set_to_none=True)
for grad in filter(lambda g: get_accelerator().on_accelerator(g), self.averaged_gradients[sub_group_id]):
grad.record_stream(get_accelerator().current_stream())
self.averaged_gradients[sub_group_id] = None
@instrument_w_nvtx
def _prepare_sub_group(self, sub_group_id, timer_names=set()):
see_memory_usage(f'Before prepare optimizer sub group {sub_group_id}', force=False)
if self._swappable_optimizer_subgroup(sub_group_id):
self._optimizer_states_and_gradient_swap_in(sub_group_id, timer_names)
elif not self.offload_optimizer:
self._prepare_fp32_grad_for_sub_group(sub_group_id)
see_memory_usage(f'After prepare optimizer sub group {sub_group_id}', force=False)
def _optimizer_states_and_gradient_swap_in(self, sub_group_id, timer_names=set()):
param_length = self.fp16_partitioned_groups_flat_numel[sub_group_id]
fp32_param_id = id(self.fp32_partitioned_groups_flat[sub_group_id])
assert self._swappable_optimizer_subgroup(sub_group_id), \
f'Parameter {fp32_param_id} of numel={param_length} is not swappable'
OPTIMIZER_SWAP_IN_STATE = 'optimizer_swap_in_state'
see_memory_usage(f'pre-step Before swapping in optimizer tensors {sub_group_id}', force=False)
self.start_timers([OPTIMIZER_SWAP_IN_STATE])
self.optimizer_swapper.swap_in_optimizer_state(
parameter=self.fp32_partitioned_groups_flat[sub_group_id],
async_parameter=self.next_swappable_fp32_partitioned_groups[sub_group_id])
self.stop_timers([OPTIMIZER_SWAP_IN_STATE])
timer_names.add(OPTIMIZER_SWAP_IN_STATE)
see_memory_usage(f'pre-step After swapping in optimizer tensors {sub_group_id}', force=False)
@instrument_w_nvtx
def _release_sub_group(self, sub_group_id, timer_names=set()):
see_memory_usage(f'Before release optimizer sub group {sub_group_id}', force=False)
# get rid of the fp32 gradients. Not needed anymore
if not self.offload_optimizer:
self.fp32_partitioned_groups_flat[sub_group_id].grad = None
if self._swappable_optimizer_subgroup(sub_group_id):
self._optimizer_states_and_gradient_swap_out(sub_group_id, timer_names)
see_memory_usage(f'After release optimizer sub group {sub_group_id}', force=False)
# create a flat tensor aligned at the alignment boundary
@instrument_w_nvtx
def flatten_dense_tensors_aligned(self, tensor_list, alignment):
num_elements = 0
for tens in tensor_list:
num_elements = num_elements + tens.numel()
remaining = num_elements % alignment
if remaining:
elements_to_add = alignment - remaining
pad_tensor = torch.zeros(elements_to_add, device=tensor_list[0].device, dtype=tensor_list[0].dtype)
padded_tensor_list = tensor_list + [pad_tensor]
num_elements = num_elements + elements_to_add
else:
padded_tensor_list = tensor_list
return self.flatten(padded_tensor_list)
def _optimizer_states_and_gradient_swap_out(self, sub_group_id, timer_names=set()):
param_length = self.fp16_partitioned_groups_flat_numel[sub_group_id]
fp32_param_id = id(self.fp32_partitioned_groups_flat[sub_group_id])
assert self._swappable_optimizer_subgroup(sub_group_id), \
f'Parameter {fp32_param_id} of numel={param_length} is not swappable'
OPTIMIZER_SWAP_OUT_STATE = 'optimizer_swap_out_state'
see_memory_usage(f'post-step Before swapping out optimizer tensors {sub_group_id}', force=False)
self.start_timers([OPTIMIZER_SWAP_OUT_STATE])
self.optimizer_swapper.swap_out_optimizer_state(
parameter=self.fp32_partitioned_groups_flat[sub_group_id],
async_swap=self.next_swappable_fp32_partitioned_groups[sub_group_id] is not None)
self.stop_timers([OPTIMIZER_SWAP_OUT_STATE])
see_memory_usage(f'post-step After swapping out optimizer tensors {sub_group_id}', force=False)
timer_names.add(OPTIMIZER_SWAP_OUT_STATE)
# get rid of the fp32 gradients. Not needed anymore
self.fp32_partitioned_groups_flat[sub_group_id].grad = None
def _unflatten_partitioned_parameters(self, sub_group_id):
updated_params = self.unflatten(self.fp16_partitioned_groups_flat[sub_group_id],
self.fp16_partitioned_groups[sub_group_id])
for partitioned_param, q in zip(self.fp16_partitioned_groups[sub_group_id], updated_params):
partitioned_param.data = q.data
def _overflow_clean_up(self, prev_scale):
see_memory_usage('After overflow before clearing gradients', force=False)
self.zero_grad(set_to_none=True)
if self.offload_optimizer:
self.reset_cpu_buffers()
else:
self.averaged_gradients = {}
see_memory_usage('After overflow after clearing gradients', force=False)
@instrument_w_nvtx
def _overflow_check_and_loss_scale_update(self):
# First compute norm for all group so we know if there is overflow
self.check_overflow()
#loss scaling related computation
prev_scale = self.loss_scale
self._update_scale(self.overflow)
if self.overflow:
self._overflow_clean_up(prev_scale)
return self.overflow
@instrument_w_nvtx
def _post_step(self, timer_names=set()):
if self.offload_optimizer:
self.reset_cpu_buffers()
#Gathering persisting parameters
if len(self.persistent_parameters) > 0:
self.persistent_parameters[0].all_gather(self.persistent_parameters)
if self.swap_optimizer:
self.optimizer_swapper.log_timers()
self.invalidate_secondary_tensor()
self.log_timers(timer_names)
see_memory_usage('After zero_optimizer step', force=False)
print_rank_0(f"------------------Finishing Step-----------------------")
@instrument_w_nvtx
def _reassign_or_swap_out_partitioned_parameters(self, sub_group_id):
if self.fp16_partitioned_groups_flat[sub_group_id] is not None:
self.fp16_partitioned_groups_flat[sub_group_id].data.copy_(
self.fp32_partitioned_groups_flat[sub_group_id].data)
#unflatten fp16 parameter subgroup
self._unflatten_partitioned_parameters(sub_group_id)
else:
self._partitioned_params_swap_out(sub_group_id)
def override_loss_scale(self, loss_scale):
if loss_scale != self.external_loss_scale:
logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}')
self.custom_loss_scaler = True
self.external_loss_scale = loss_scale
@instrument_w_nvtx
def step(self, closure=None):
"""
Not supporting closure.
"""
self._pre_step()
self._partition_all_parameters()
#checks for overflow, adjust the loss scale accordingly
if self._overflow_check_and_loss_scale_update():
if self.swap_optimizer:
self.optimizer_swapper.log_timers()
return
norm_groups = self._get_norm_groups()
scaled_global_grad_norm = get_global_norm(norm_list=norm_groups)
# Stash unscaled gradient norm
self._global_grad_norm = scaled_global_grad_norm / self.loss_scale
timer_names = set()
timer_names.add('optimizer_step')
self.start_timers(['optimizer_step'])
#update parameters one sub group at a time
for sub_group_id, group in enumerate(self.fp16_groups):
#prepare optimizer states, gradients and fp32 parameters for update
self._prepare_sub_group(sub_group_id, timer_names)
#scale the fp32 gradients
self.unscale_and_clip_grads(sub_group_id, scaled_global_grad_norm)
#apply the optimizer step on the sub group and copy fp32 parameters to fp16
self._optimizer_step(sub_group_id)
#put fp16 parameters in appropriate location
self._reassign_or_swap_out_partitioned_parameters(sub_group_id)
#release memory or swap out optimizer states of fp32 parameters
self._release_sub_group(sub_group_id, timer_names)
self.stop_timers(['optimizer_step'])
self._post_step(timer_names)
# warn user about caching allocator flushes
memory_stats = get_accelerator().memory_stats()
alloc_retries = memory_stats["num_alloc_retries"] if memory_stats is not None else 0
if alloc_retries > self.n_caching_allocator_flushes:
if dist.get_rank() == 0:
logger.warning(
"%d pytorch allocator cache flushes since last step. this happens "
"when there is high memory pressure and is detrimental to "
"performance. if this is happening frequently consider adjusting "
"settings to reduce memory consumption. If you are unable to "
"make the cache flushes go away consider adding "
"get_accelerator().empty_cache() calls in your training loop to ensure "
"that all ranks flush their caches at the same time",
alloc_retries - self.n_caching_allocator_flushes)
self.n_caching_allocator_flushes = alloc_retries
def dump_pre_step_gradients(self, debug_fp32_grads):
# Dump gradient norms for debugging
for i, _ in enumerate(self.fp16_groups):
print(f'Pre-Step Dump Norms for Group {i} FP16P, FP16G, FP32G, FP32GUC')
for fp16_param, fp32_grad in zip(self.fp16_groups[i], debug_fp32_grads[i]):
param_id = self.get_param_id(fp16_param)
fp16_grad_norm = self.debug_fp16_grads[i][param_id]
fp32_grad_norm = [float(t.data.float().norm(2)) for t in fp32_grad]
norm_list = [fp16_grad_norm, fp32_grad_norm]
print(f'Pre-Step Norms {i} {param_id} = {norm_list}')
def dump_post_step_gradients(self):
# Dump gradient norms for debugging
for i, group in enumerate(self.fp16_groups):
print(f'Post-Step Dump Norms for Group {i} FP16P, FP16DS, FP16FLAT, FP32FLAT')
unflat_fp16 = self.unflatten(self.fp16_groups_flat[i], self.fp16_groups[i])
unflat_fp32 = self.unflatten(self.fp32_partitioned_groups_flat[i], self.fp16_groups[i])
for j, p in enumerate(self.fp16_groups[i]):
param_id = self.get_param_id(p)
param_norm = float(p.data.float().norm(2))
ds_norm = float(p.ds_tensor.data.float().norm(2))
unflat_norm = [float(t.data.float().norm(2)) for t in [unflat_fp16[j], unflat_fp32[j]]]
norm_list = [param_norm, ds_norm] + unflat_norm
print(f'Post-Step Norms {i} {param_id} = {norm_list}')
@instrument_w_nvtx
def unscale_and_clip_grads(self, sub_group_id, total_norm):
# compute combined scale factor for this group
combined_scale = self.loss_scale
if self.clip_grad > 0.:
# norm is in fact norm*scale
clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad
if clip > 1:
combined_scale = clip * self.loss_scale
self.fp32_partitioned_groups_flat[sub_group_id].grad.mul_(1. / combined_scale)
def _check_overflow(self, partition_gradients=True):
self.overflow = self.has_overflow(partition_gradients)
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params, is_grad_list=False):
for p in params:
if p.grad is not None and self._has_inf_or_nan(p.grad.data):
return True
return False
def has_overflow_partitioned_grads_serial(self):
for i in range(len(self.fp16_groups)):
for j, grad in enumerate(self.averaged_gradients[i]):
if grad is not None and self._has_inf_or_nan(grad.data, j):
return True
return False
@instrument_w_nvtx
def has_overflow(self, partition_gradients=True):
if partition_gradients:
with get_accelerator().stream(self.reduce_and_partition_stream):
if hasattr(self.inf_or_nan_tracker, "logical_or_"):
self.inf_or_nan_tracker.logical_or_(torch.isinf(self.grad_partitions_flat_buffer).any())
self.inf_or_nan_tracker.logical_or_(torch.isnan(self.grad_partitions_flat_buffer).any())
else:
# logical_or_ not available in older versions of pytorch
self.inf_or_nan_tracker += torch.isinf(self.grad_partitions_flat_buffer).any()
self.inf_or_nan_tracker += torch.isnan(self.grad_partitions_flat_buffer).any()
self.inf_or_nan_tracker = self.inf_or_nan_tracker > 0
overflow_gpu = self.inf_or_nan_tracker.clone().to(torch.uint8)
self.inf_or_nan_tracker.zero_()
get_accelerator().default_stream().wait_stream(self.reduce_and_partition_stream)
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.dp_process_group)
else:
params = []
for group in self.fp16_groups:
for param in group:
params.append(param)
overflow = self.has_overflow_serial(params, is_grad_list=partition_gradients)
overflow_gpu = get_accelerator().ByteTensor([overflow])
# Since each model parallel GPU carries only part of the model,
# make sure overflow flag is synced across all the model parallel GPUs
self._model_parallel_all_reduce(tensor=overflow_gpu, op=dist.ReduceOp.MAX)
overflow = overflow_gpu[0].item()
return bool(overflow)
# `x` is a torch.Tensor
@staticmethod
def _has_inf_or_nan(x, j=None):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
@instrument_w_nvtx
def backward(self, loss, retain_graph=False):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
if self.swap_optimizer:
self.optimizer_swapper.pre_backward()
see_memory_usage(f"Before backward", force=False)
if self.custom_loss_scaler:
scaled_loss = self.external_loss_scale * loss
scaled_loss.backward()
else:
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
self._get_param_coordinator(training=True).reset_step()
if self.swap_optimizer:
self.optimizer_swapper.post_backward()
def get_fp32_grad_partitions(self) -> Dict[int, Dict[int, Tensor]]:
"""get fp32 gradient partition dictionary
accessed as grad_dict[parameter_group_index][parameter_index]
"""
self.reduce_and_partition_stream.synchronize()
grad_dict = collections.defaultdict(dict)
if self.offload_optimizer:
for group in self.fp16_groups:
for param_idx, param in enumerate(group):
group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)]
fp32_grad = self.fp32_partitioned_groups_flat[group_idx].grad.narrow(0, dest_offset, num_elements)
grad_dict[group_idx][param_idx] = fp32_grad
else:
for group_idx, group in self.averaged_gradients.items():
for param_idx, gradient in enumerate(group):
grad_dict[group_idx][param_idx] = gradient.float()
return grad_dict
def _fp32_state_allgather(self, param, fp32_state):
reduce_buffer = torch.zeros(self.partition_count * fp32_state.numel(),
dtype=torch.float32,
device=param.device).flatten()
my_rank = dist.get_rank(group=self.dp_process_group)
partitions = [
reduce_buffer.narrow(0,
fp32_state.numel() * i, fp32_state.numel()) for i in range(self.partition_count)
]
partitions[my_rank].data.copy_(fp32_state.data, non_blocking=False)
dist.all_gather(partitions, partitions[my_rank], group=self.dp_process_group)
return reduce_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape)
def get_fp32_grad_for_param(self, param) -> Tensor:
if not param.requires_grad:
return None
self.reduce_and_partition_stream.synchronize()
if self.offload_optimizer:
group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)]
fp32_grad = self.fp32_partitioned_groups_flat[group_idx].grad.narrow(0, dest_offset,
num_elements).to(device=param.device)
else:
fp32_grad = self.__param_id_to_grad_partition[param.ds_id].float()
return self._fp32_state_allgather(param, fp32_grad)
def get_full_hp_param(self, param, optim_state_key=None) -> Tensor:
if not param.requires_grad:
return None
self.reduce_and_partition_stream.synchronize()
group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)]
if self._swappable_optimizer_subgroup(group_idx):
self._optimizer_states_and_gradient_swap_in(group_idx)
fp32_param = self.fp32_partitioned_groups_flat[group_idx]
if optim_state_key is None:
fp32_opt_state = fp32_param.narrow(0, dest_offset, num_elements).to(device=param.device)
else:
fp32_opt_state = self.optimizer.state[fp32_param][optim_state_key].narrow(
0, dest_offset, num_elements).to(device=param.device)
hp_param = self._fp32_state_allgather(param, fp32_opt_state)
if self._swappable_optimizer_subgroup(group_idx):
self._optimizer_states_and_gradient_swap_out(group_idx)
return hp_param
@instrument_w_nvtx
def _partition_all_parameters(self):
self.parameter_offload.partition_all_parameters()
def check_overflow(self, partition_gradients=True):
self._check_overflow(partition_gradients)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
self.trainable_param_groups = self._get_trainable_parameter_groups()
param_groups = property(_get_param_groups, _set_param_groups)
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
if self.custom_loss_scaler:
return self.external_loss_scale
else:
return self.loss_scaler.cur_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
cur_scale = property(_get_loss_scale, _set_loss_scale)
def _get_lean_tensors(self, padded_flattened_tensor, group_tensors, paddings):
# Remove paddings from flattened tensor
individual_tensors = self.unflatten(padded_flattened_tensor, group_tensors)
lean_lengths = [t.numel() - pad for t, pad in zip(group_tensors, paddings)]
lean_tensors = [t[:len] for t, len in zip(individual_tensors, lean_lengths)]
#logger.info(f'rank {dist.get_rank()}: lean_tensors = {[t.numel() for t in lean_tensors]}')
return lean_tensors
#TODO REVISIT this for stage 3
def get_lean_optimizer_state(self):
# Return optimizer states after removing paddings.
# This method assumes that each param group contains a single flattened tensor.
optimizer_groups_state = []
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
lean_state = {}
for key, value in self.optimizer.state[p].items():
if torch.is_tensor(value):
padded_lens = [t.numel() for t in self.fp16_partitioned_groups[i]]
lean_state[key] = self._get_lean_tensors(value, self.fp16_partitioned_groups[i],
self.groups_padding[i])
lean_flat_len = sum([t.numel() for t in lean_state[key]])
else:
lean_state[key] = value
optimizer_groups_state.append(lean_state)
return optimizer_groups_state
def get_groups_without_padding(self, groups_with_padding):
# Return group tensor after removing paddings added for alignment to DP world size.
groups_without_padding = []
for i, group in enumerate(groups_with_padding):
lean_group = self._get_lean_tensors(group, self.fp16_partitioned_groups[i], self.groups_padding[i])
groups_without_padding.append(lean_group)
return groups_without_padding
def _set_fp32_optimizer_param_groups(self):
for sub_group_id, _ in enumerate(self.fp16_groups):
param_group_id = self.sub_group_to_group_id[sub_group_id]
self.optimizer.param_groups[param_group_id]['params'].append(
self.fp32_partitioned_groups_flat[sub_group_id])
def _clear_fp32_optimizer_param_groups(self):
for param_group in self.optimizer.param_groups:
param_group['params'] = []
def _rigid_state_dict(self):
state_dict = {}
state_dict[ZERO_STAGE] = ZeroStageEnum.weights
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict[PARTITION_COUNT] = self.partition_count
self._set_fp32_optimizer_param_groups()
state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()
state_dict[FP32_FLAT_GROUPS] = self.fp32_partitioned_groups_flat
self._clear_fp32_optimizer_param_groups()
return state_dict
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
if self.elastic_checkpoint:
raise NotImplementedError("ZeRO-3 does not yet support elastic checkpointing, please disable for now.")
if self.swap_optimizer or self.params_in_nvme_and_cpu:
raise NotImplementedError(
"ZeRO-3 does not yet support checkpointing with NVMe offloading, please disable for now.")
return self._rigid_state_dict()
# Restore base optimizer fp32 weights from checkpoint by:
# 1) Merging fp32 weights from checkpoints of all partitions
# 2) Extracting fp32 weights for current partition from merged weights
# 3) Using extracted weights to update base optimizer weights directly.
def _restore_from_fp32_weights(self, all_state_dict):
flat_local_partition = []
for i in range(len(self.fp32_partitioned_groups_flat)):
merged_partitions = [sd['fp32_groups'][i] for sd in all_state_dict]
flat_local_partition.append(self._get_flattened_partition(merged_partitions))
for current, saved in zip(self.fp32_partitioned_groups_flat, flat_local_partition):
current.data.copy_(saved.data)
# Restore base optimizer fp32 weights from ZeRO fp16 weights
def _restore_from_bit16_weights(self):
for fp16_partitions, fp32_partition in zip(self.fp16_partitioned_groups_flat,
self.fp32_partitioned_groups_flat):
fp32_partition.data.copy_(fp16_partitions.data)
# Refresh the fp32 master params from the fp16 copies.
def refresh_fp32_params(self):
self._restore_from_bit16_weights()
# Extract flattened partition for current rank from all partitions
def _get_flattened_partition(self, all_partition_states):
partition_id = dist.get_rank(group=self.dp_process_group)
alignment = dist.get_world_size(group=self.dp_process_group)
param_partitions = [[] for _ in range(len(all_partition_states[0]))]
for i, partition in enumerate(all_partition_states):
for j, param in enumerate(partition):
param_partitions[j].append(param)
local_state_partitions = []
for param_index, param_slices in enumerate(param_partitions):
flattened_merged_tensor = self.flatten_dense_tensors_aligned(param_slices, alignment)
new_partitions = self.get_data_parallel_partitions(flattened_merged_tensor)
local_state_partitions.append(new_partitions[partition_id])
if torch.is_tensor(local_state_partitions[0]):
return self.flatten_dense_tensors_aligned(local_state_partitions, alignment)
# Assume non-tensor states are not partitioned and equal across ranks, so return first one
return local_state_partitions[0]
# Restore base optimizer state from checkpoint by
# 1) Merging optimizer state from checkpoints of all partitions
# 2) Extracting optimizer state for current partition from the merged state
# 3) Using the extracted value to directly update the base optimizer.
def _restore_base_optimizer_state(self, all_state_dict):
base_optimizer_group_states = []
for i in range(len(self.optimizer.param_groups)):
partition_states = {}
all_partition_group_states = [sd['base_optimizer_state'][i] for sd in all_state_dict]
for key in all_partition_group_states[0].keys():
all_partition_states = [all_states[key] for all_states in all_partition_group_states]
partition_states[key] = self._get_flattened_partition(all_partition_states)
base_optimizer_group_states.append(partition_states)
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
for key, saved in base_optimizer_group_states[i].items():
if torch.is_tensor(self.optimizer.state[p][key]):
self.optimizer.state[p][key].data.copy_(saved.data)
else:
self.optimizer.state[p][key] = saved
def _rigid_load_state_dict(self, state_dict, load_optimizer_states=True):
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
if load_optimizer_states:
self._set_fp32_optimizer_param_groups()
self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT])
self._clear_fp32_optimizer_param_groups()
# restore fp32 partitions
for curr_param, saved_param in zip(self.fp32_partitioned_groups_flat, state_dict[FP32_FLAT_GROUPS]):
curr_param.data.copy_(saved_param.data)
# restore fp16 partitions from fp32
for sub_group_id in range(len(self.fp32_partitioned_groups_flat)):
fp32_param = self.fp32_partitioned_groups_flat[sub_group_id]
fp16_param = self.fp16_partitioned_groups_flat[sub_group_id]
fp16_param.data.copy_(fp32_param.data)
# update fp16 unflattened params
for sub_group_id in range(len(self.fp16_partitioned_groups_flat)):
updated_params = self.unflatten(self.fp16_partitioned_groups_flat[sub_group_id],
self.fp16_partitioned_groups[sub_group_id])
for partitioned_param, q in zip(self.fp16_partitioned_groups[sub_group_id], updated_params):
partitioned_param.data = q.data
# TODO: Support different/changing load/save DP degree.
def load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False,
checkpoint_folder=None):
r"""Loading a ZeRO checkpoint
Arguments:
state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition.
Note that the number of saved partitions may differ from number of loading partitions to support
changing GPU count, specifically DP world size, between saving and loading checkpoints.
load_optimizer_states: Boolean indicating whether or not to load base optimizer states
load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32
copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss).
"""
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
if self.elastic_checkpoint:
raise NotImplementedError("ZeRO-3 does not yet support elastic checkpointing, please disable for now.")
if self.swap_optimizer or self.params_in_nvme_and_cpu:
raise NotImplementedError(
"ZeRO-3 does not yet support checkpointing with NVMe offloading, please disable for now.")
self._rigid_load_state_dict(state_dict_list[dist.get_rank(group=self.dp_process_group)],
load_optimizer_states=load_optimizer_states)
if len(self.persistent_parameters) > 0:
self.persistent_parameters[0].partition(self.persistent_parameters)
self.persistent_parameters[0].all_gather(self.persistent_parameters)
def checkpoint_event_prologue(self):
self._partition_all_parameters()
def checkpoint_event_epilogue(self):
if len(self.persistent_parameters) > 0:
self.persistent_parameters[0].all_gather(self.persistent_parameters)
def empty_partition_cache(self):
self.parameter_offload.empty_partition_cache()
def _handle_overflow(cpu_sum, x, i):
import math
rank = dist.get_rank()
if rank == 0:
t_i = -1
for v_i, v in enumerate(x.data.contiguous().view(-1)):
if not math.isfinite(float(v)):
t_i = v_i
break
logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}")
def estimate_zero3_model_states_mem_needs(total_params,
largest_layer_params,
num_gpus_per_node=1,
num_nodes=1,
cpu_offload=True,
cpu_offload_params=True,
zero_init=True,
additional_buffer_factor=1.5):
total_gpus = num_nodes * num_gpus_per_node
gpus_factor = 1 / num_nodes
largest_layer_memory = (4 * largest_layer_params)
if cpu_offload:
if cpu_offload_params:
gpu_mem = largest_layer_memory
if zero_init:
cpu_mem = total_params * 18 * gpus_factor * additional_buffer_factor
else:
cpu_mem = total_params * max(4 * num_gpus_per_node, 18 * gpus_factor) * additional_buffer_factor
else:
gpu_mem = largest_layer_memory + int(2 * total_params / total_gpus)
if zero_init:
cpu_mem = total_params * 16 * gpus_factor * additional_buffer_factor
else:
cpu_mem = total_params * max(4 * num_gpus_per_node, 16 * gpus_factor) * additional_buffer_factor
else:
gpu_mem = largest_layer_memory + int(18 * total_params / total_gpus)
if zero_init:
cpu_mem = largest_layer_params * 4 * num_gpus_per_node * additional_buffer_factor
else:
cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor
return int(cpu_mem), int(gpu_mem), largest_layer_memory
def model_to_params(model):
# shared params calculated only once
total_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values())
largest_layer_params = 0
for m in model.modules():
# assuming no shared params within a single layer
layer_params = sum(p.numel() for p in m.parameters(recurse=False))
largest_layer_params = max(largest_layer_params, layer_params)
return total_params, largest_layer_params
def estimate_zero3_model_states_mem_needs_all_live(model,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 3 params, optim states and gradients
for a given ``model`` and hardware setup.
If you have an actual model object, use this function and everything will be derived
automatically.
If it's a hypothetical model, use ``estimate_zero3_model_states_mem_needs_all_cold`` where you have to pass
the ``total_params`` and ``largest_layer_params`` explicitly.
Args:
- ``model``: ``nn.Module`` object
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
total_params, largest_layer_params = model_to_params(model)
estimate_zero3_model_states_mem_needs_all_cold(total_params=total_params,
largest_layer_params=largest_layer_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
additional_buffer_factor=additional_buffer_factor)
def estimate_zero3_model_states_mem_needs_all_cold(total_params,
largest_layer_params,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 3 params, optim states and gradients
for a given ``model`` and hardware setup.
If it's a hypothetical model, use this function where you have to pass
the ``total_params`` and ``largest_layer_params`` explicitly.
If you have an actual model object, use ``estimate_zero3_model_states_mem_needs_all_live`` and everything
will be derived automatically.
Args:
- ``total_params``: total model params
- ``largest_layer_params``: largest layer's params
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
def format_options(cpu_offload, cpu_offload_params, zero_init):
enabled = []
padded_cpu_str = f'{OffloadDeviceEnum.cpu:4}'
param_device = padded_cpu_str if cpu_offload_params else "none"
enabled.append(f"offload_param={param_device}")
optimizer_device = padded_cpu_str if cpu_offload else "none"
enabled.append(f"offload_optimizer={optimizer_device}")
enabled.append(f"zero_init={1 if zero_init else 0}")
return ", ".join(enabled)
nodes_str = "nodes" if num_nodes > 1 else "node"
gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU"
print(
"Estimated memory needed for params, optim states and gradients for a:\n"
f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n"
f"SW: Model with {int(total_params/1e6)}M total params, {int(largest_layer_params/1e6)}M largest layer params."
)
print(" per CPU | per GPU | Options")
for cpu_offload in [True, False]:
for cpu_offload_params in [True, False]:
if not cpu_offload and cpu_offload_params:
continue
for zero_init in [True, False]:
cpu_mem, gpu_mem, largest_layer_memory = estimate_zero3_model_states_mem_needs(
total_params=total_params,
largest_layer_params=largest_layer_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
cpu_offload=cpu_offload,
cpu_offload_params=cpu_offload_params,
zero_init=zero_init,
additional_buffer_factor=additional_buffer_factor)
options_str = format_options(cpu_offload=cpu_offload,
cpu_offload_params=cpu_offload_params,
zero_init=zero_init)
print(f" {cpu_mem/2**30:7.2f}GB | {gpu_mem/2**30:6.2f}GB | {options_str}")
| 118,239 | 45.241689 | 219 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/zero/utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from typing import List
import torch
from deepspeed import comm as dist
from deepspeed.utils import logger
from deepspeed.ops.adam import DeepSpeedCPUAdam
from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad
from deepspeed.ops.adam import FusedAdam
from deepspeed.utils.nvtx import instrument_w_nvtx
from deepspeed.accelerator import get_accelerator
def _initialize_parameter_parallel_groups(parameter_parallel_size=None):
data_parallel_size = int(dist.get_world_size())
parameter_parallel_size = parameter_parallel_size or data_parallel_size
logger.info("data_parallel_size: %s, parameter_parallel_size: %s", data_parallel_size, parameter_parallel_size)
assert data_parallel_size % parameter_parallel_size == 0, \
'world size should be divisible by parameter parallel size'
rank = dist.get_rank()
my_group = None
for i in range(data_parallel_size // parameter_parallel_size):
ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size)
group = dist.new_group(ranks)
if rank in ranks:
my_group = group
return my_group
class ZeRORuntimeException(Exception):
pass
ZERO_SUPPORTED_OPTIMIZERS = [
torch.optim.Adam, torch.optim.AdamW, FusedAdam, DeepSpeedCPUAdam, torch.optim.Adagrad, DeepSpeedCPUAdagrad
]
# Add apex FusedAdam to supported list if apex is installed
try:
import apex
if hasattr(apex, 'optimizers') and hasattr(apex.optimizers, 'FusedAdam'):
ZERO_SUPPORTED_OPTIMIZERS.append(apex.optimizers.FusedAdam)
except ImportError:
pass
def is_zero_supported_optimizer(optimizer):
if dist.get_rank() == 0:
logger.info(f'Checking ZeRO support for optimizer={optimizer.__class__.__name__} type={type(optimizer)}')
return type(optimizer) in ZERO_SUPPORTED_OPTIMIZERS
def get_lst_from_rank0(lst: List[int]) -> None:
"""
NOTE: creates both communication and synchronization overhead so should be used
sparingly
"""
lst_tensor = torch.tensor(
lst if dist.get_rank() == 0 else [-1] * len(lst),
dtype=int,
# device=get_accelerator().current_device_name(),
device=torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])),
requires_grad=False,
)
dist.broadcast(lst_tensor, src=0, async_op=False)
return list(lst_tensor.cpu().numpy())
@instrument_w_nvtx
def assert_ints_same_as_other_ranks(ints: List[int]) -> None:
"""
NOTE: creates both communication and synchronization overhead so should be
used sparingly
takes a list of ints from each rank and ensures that they are the same
across ranks, throwing an exception if they are not.
"""
rank0_ints = get_lst_from_rank0(ints)
if ints != rank0_ints:
raise RuntimeError(f"disagreement between rank0 and rank{dist.get_rank()}: "
f"rank0: {rank0_ints}, rank{dist.get_rank()}: {ints}")
| 3,033 | 33.477273 | 115 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/zero/tiling.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
from deepspeed.runtime.utils import partition_uniform as partition
def split_tensor_along_last_dim(tensor, partitions, contiguous_split_chunks=False):
"""Split a tensor along its last dimension. Adapted from Megatron-LM.
Arguments:
tensor: input tensor.
partitions: list of partition sizes to supply to torch.split
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
# Split.
tensor_list = torch.split(tensor, partitions, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
class TiledLinear(torch.nn.Module):
def __init__(self,
in_features,
out_features,
bias=True,
in_splits=1,
out_splits=1,
input_is_already_split=False,
combine_out_splits=True,
linear_cls=torch.nn.Linear,
init_linear=None,
**kwargs):
"""A replacement for ``torch.nn.Linear`` that works with ZeRO-3 to reduce
memory requirements via tiling.
TiledLinear breaks the input and output dimensions of a linear layer
into tiles that are processed in sequence. This class enables huge
linear layers when combined with ZeRO-3 because inactive tiles can be
partitioned and offloaded.
.. note::
We recommend using as few tiles as necessary. Tiling
significantly reduces memory usage, but can reduce throughput
for inexpensive layers. This due to the smaller kernels having
less parallelism and lower arithmetic intensity, while
introducing more frequent synchronization and communication.
Args:
in_features (int): See ``torch.nn.Linear``
out_features (int): See ``torch.nn.Linear``
bias (bool, optional): See ``torch.nn.Linear``
in_splits (int, optional): The number of tiles along the input dimension. Defaults to 1.
out_splits (int, optional): The number of tiles along the output dimension. Defaults to 1.
input_is_already_split (bool, optional): If set to ``True``, assume that the ``input_`` in
to ``forward()`` is already split into ``in_splits`` chunks. Defaults to ``False``.
combine_out_splits (bool, optional): If set to ``False``, do not combine the ``out_splits`` outputs
into a single tensor. Defaults to ``True``.
linear_cls (class, optional): The underlying class to build individual tiles.
Defaults to ``torch.nn.Linear``.
init_linear (``torch.nn.Linear``, optional): If set, copy the parameters of
``init_linear``. Useful for debugging. Defaults to ``None``.
kwargs (dict, optional): additional keyword arguments to provide to ``linear_cls()``.
Raises:
RuntimeError: ``in_splits`` must be within the range [1, in_features).
RuntimeError: ``out_splits`` must be within the range of [1, out_features).
"""
super().__init__()
if (in_splits < 1) or (in_splits > in_features):
raise RuntimeError('in splits must be in range [1, in_features].')
if (out_splits < 1) or (out_splits > out_features):
raise RuntimeError('out splits must be in range [1, out_features].')
# global, not necessarily local
self.in_features = in_features
self.out_features = out_features
self.use_bias = bias
self.out_splits = out_splits
self.in_splits = in_splits
self.input_is_already_split = input_is_already_split
self.combine_out_splits = combine_out_splits
# Build partition-lists. These are CSR-style splits [0, part0, part1, ..., features]
# For example, row_parts[p] gives the start of partition p and row_parts[p+1]
# is the exclusive end.
self.in_parts = partition(num_items=in_features, num_parts=in_splits)
self.out_parts = partition(num_items=out_features, num_parts=out_splits)
assert len(self.out_parts) == out_splits + 1
assert len(self.in_parts) == in_splits + 1
assert self.out_parts[0] == 0
assert self.out_parts[out_splits] == out_features
assert self.in_parts[in_splits] == in_features
self.linears = torch.nn.ModuleList()
for out_id in range(out_splits):
self.linears.append(torch.nn.ModuleList())
local_out_dim = self.out_parts[out_id + 1] - self.out_parts[out_id]
for in_id in range(in_splits):
#if input_size is split, we only need one bias
local_bias = bias if in_id == (in_splits - 1) else False
local_in_dim = self.in_parts[in_id + 1] - self.in_parts[in_id]
local = linear_cls(local_in_dim, local_out_dim, bias=local_bias, **kwargs)
self.linears[out_id].append(local)
# Optionally initialize with a known tensor
if init_linear is not None:
self.copy_params_from(init_linear)
def forward(self, input_):
if self.in_splits > 1 and not self.input_is_already_split:
input_parts = partition(input_.shape[-1], self.in_splits)
split_sizes = [input_parts[p + 1] - input_parts[p] for p in range(self.in_splits)]
inputs = self._split_global_input(input_, split_sizes)
elif self.in_splits > 1:
inputs = input_
assert len(
inputs) == self.in_splits, f"Col splits {self.in_splits} does not match input splits {len(inputs)}"
else:
# no splits
inputs = [input_]
outputs = [None] * self.out_splits
for out_id in range(self.out_splits):
for in_id in range(self.in_splits):
local_output = self.linears[out_id][in_id](inputs[in_id])
outputs[out_id] = self._reduce_local_output(in_id=in_id,
out_id=out_id,
current_out=outputs[out_id],
new_out=local_output)
if self.combine_out_splits:
return self._combine_output_splits(outputs)
return outputs
def _split_global_input(self, input, split_sizes):
"""Partition an input tensor along the last dimension, aligned with given splits.
Subclasses should override this method to account for new input types.
Args:
input (List[Tensor]): The tensor to partition along the last dimension.
split_sizes (List[int]): The size of each partition.
Returns:
List[Any]: A list of the chunks of ``input``.
"""
return split_tensor_along_last_dim(input, split_sizes)
def _reduce_local_output(self, in_id, out_id, current_out, new_out):
"""Reduce (sum) a new local result into the existing local results.
Subclasses should override this method.
For a given ``out_id``, this method is called ``in_id-1`` times. The first input
split is a simple assignment.
Args:
in_id (int): The input split that produced ``new_out``.
out_id (int): The output split that produced ``new_out``.
current_out (Any): The reduced form of all previous ``out_id`` results.
new_out (Any): The local result from forward (``in_id``, ``out_id``)e
Returns:
Any: The combined result of ``current_out`` and ``new_out``.
"""
if current_out is None:
#this clone is necessary to preserve auto grad
#there is some issue with inplace update for outputs that are views
return new_out.clone()
else:
return current_out + new_out
def _combine_output_splits(self, outputs):
"""Join the splits of the output into a single result.
Args:
outputs (List[Any]): The reduced outputs for each output split.
Returns:
Any: The combined outputs.
"""
assert len(outputs) == self.out_splits
return torch.cat(outputs, dim=-1)
@torch.no_grad()
def copy_params_from(self, other):
"""Copy the weight and bias data from ``other``.
This is especially useful for reproducible initialization and testing.
Equivalent to:
.. code-block:: python
with torch.no_grad():
self.weight.copy_(other.weight)
if self.bias is not None:
self.bias.copy_(other.bias)
.. note::
If ZeRO-3 is enabled, this is a collective operation and the
updated parameters of data-parallel rank 0 will be visible on all
ranks. See :class:`deepspeed.zero.GatheredParameters` for more
information.
Args:
other (``torch.nn.Linear``): the linear layer to copy from.
"""
assert hasattr(other, 'weight')
assert other.weight.size() == (self.out_features, self.in_features)
if self.use_bias:
assert hasattr(other, 'bias')
assert other.bias is not None
assert other.bias.size() == (self.out_features, )
else:
assert other.bias is None
for row in range(self.out_splits):
rstart = self.out_parts[row]
rstop = self.out_parts[row + 1]
for col in range(self.in_splits):
cstart = self.in_parts[col]
cstop = self.in_parts[col + 1]
local = self.linears[row][col]
global_weight = other.weight[rstart:rstop, cstart:cstop]
with deepspeed.zero.GatheredParameters(local.weight, modifier_rank=0):
local.weight.copy_(global_weight)
if local.bias is not None:
with deepspeed.zero.GatheredParameters(local.bias, modifier_rank=0):
local.bias.data.copy_(other.bias[rstart:rstop].data)
class TiledLinearReturnBias(TiledLinear):
"""Wrapper for a Linear class that returns its own bias parameter, such as
used by Megatron-LM.
"""
def _reduce_local_output(self, in_id, out_id, current_out, new_out):
"""Reduces output tensors, but not the returned bias. """
if current_out is not None:
old_tensor, old_bias = current_out
else:
old_tensor, old_bias = None, None
assert isinstance(new_out, tuple)
assert len(new_out) == 2
tensor, bias = new_out
assert tensor is not None
tensor = super()._reduce_local_output(in_id=in_id, out_id=out_id, current_out=old_tensor, new_out=tensor)
if bias is None:
bias = old_bias
return tensor, bias
def _combine_output_splits(self, outputs):
# stack output tensors
tensors = [o[0] for o in outputs]
tensor = super()._combine_output_splits(tensors)
# stack biases if applicable
biases = [o[1] for o in outputs if o[1] is not None]
if len(biases) > 0:
bias = super()._combine_output_splits(biases)
else:
bias = None
return tensor, bias
| 11,727 | 38.488215 | 115 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/zero/partitioned_param_coordinator.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from dataclasses import dataclass
import collections
from collections import UserDict
from typing import Deque, Set
from deepspeed import comm as dist
from deepspeed.utils.logging import logger
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.zero.partition_parameters import *
from deepspeed.runtime.zero.partitioned_param_profiler import PartitionedParameterProfiler
from deepspeed.runtime.swap_tensor.partitioned_param_swapper import PartitionedParamStatus
from deepspeed.utils.debug import debug_module2name_id, debug_param2name_id
from deepspeed.accelerator import get_accelerator
import logging
def debug_rank0(message: str) -> None:
if dist.get_rank() == 0:
logger.debug(message)
@instrument_w_nvtx
def get_all_parameters(sub_module, recurse=False):
return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters())
def iter_params(module: Module, recurse=False) -> Iterable[Parameter]:
return map(lambda pair: pair[1], get_all_parameters(module, recurse))
class ZeRoTraceMode(Enum):
# Record trace of the network during a single forward+backward (for training) or forward (for inference)
RECORD = 1
# Use recorded network trace to optimize current forward+backward or forward
COMPLETE = 2
# Recorded trace does not match current forward+backward or forward pass.
INVALID = 3
class InflightParamRegistry(UserDict):
"""registry for parameters in flight"""
def __setitem__(self, param: Parameter, handle: AllGatherCoalescedHandle) -> None:
if param in self.data:
raise RuntimeError(f"{param.ds_summary()} already in registry")
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"attempted to add non-inflight parameter to registry {param.ds_summary()}")
self.data[param] = handle
class PartitionedParameterCoordinator:
FORWARD_FETCH_SUBMIT = 'forward_fetch_submit'
FORWARD_FETCH_WAIT = 'forward_fetch_wait'
FORWARD_PREFETCH_SUBMIT = 'forward_prefetch_submit'
BACKWARD_FETCH_SUBMIT = 'backward_fetch_submit'
BACKWARD_FETCH_WAIT = 'backward_fetch_wait'
BACKWARD_PREFETCH_SUBMIT = 'backward_prefetch_wait'
FORWARD_ALL_GATHER = 'forward_all_gather'
BACKWARD_ALL_GATHER = 'backward_all_gather'
"""Handles partitioning and gathering of parameters."""
@dataclass
class __ParamInTrace:
param: Parameter
step_id_last_used_at: int
def __init__(
self,
prefetch_bucket_sz: int,
max_reuse_distance_in_numel: int,
max_available_parameters_in_numel: int,
allgather_stream: get_accelerator().Stream,
inflight_param_registry: InflightParamRegistry,
prefetch_nvme: bool = False,
timers=None,
) -> None:
# mapping of param -> handle for each param that is currently in flight
self.__inflight_param_registry = inflight_param_registry
# keeps track of the number of submodules invoked so far.
self.__step_id: int = 0
# network tracing mode
self.__trace_mode: ZeRoTraceMode = ZeRoTraceMode.RECORD
# sequence of submodules/parameters in forward pass + backward pass
self.__submodule_order: Iterable[Module] = []
self.__param_order: Iterable[__class__.__ParamInTrace] = []
self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10))
self.__step_id_module_fetched_for = collections.defaultdict(lambda: collections.deque())
# number of available params, and max number of available params
self.__n_available_params: int = 0
self.__max_n_available_params: int = max_available_parameters_in_numel
# max distance between two use of the module beyond which module is released
self.__max_reuse_dist_in_numel: int = max_reuse_distance_in_numel
# queue for parameters to fetch. parameters will be popped off the left
# side of the dequeue as they are fetched
self.__param_queue: Deque[__class__.__ParamInTrace] = None
self.__prefetch_bucket_sz: int = prefetch_bucket_sz
self.__prefetch_nvme: bool = prefetch_nvme
self.hierarchy: int = 0
# stream that will be used for allgather operations
self.__allgather_stream: get_accelerator().Stream = allgather_stream
# limit the number of fetch events that can be queued at once
# otherwise, what happens is memory is allocated by the host thread at the
# time of the call, but not used until later by the asynchronous cuda stream.
# allowing an infinite number of these to queue up causes a lot of memory
# pressure that then becomes detrimental to performance.
# this is a much less elegant way of fixing this vs something like using
# cudaMallocAsync/cudaFreeAsync. Choosing to not expose this to the user now
# because ideally in the future its replaced by an async allocation
# mechanism which doesn't require any configuration by the user.
self.__ongoing_fetch_events: Deque[get_accelerator().Event] = collections.deque()
# TODO. make this configurable via JSON
self.__max_ongoing_fetch_events: int = 2
self.__profiler = PartitionedParameterProfiler(timers)
"""Tracing and Tracking
TODO. consider performing trace before initializing PartitionedParameterCoordinator
and passing trace results into constructor. This way all the code in here can
just assume that the trace is complete and the results can be entirely
immutable.
Bookkeeping operations used to track where we are in the forward/backward pass
"""
def _clear_trace_structures(self) -> None:
self.__submodule_order = []
self.__param_order = []
self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10))
self.__param_queue = None
def is_complete_trace(self) -> bool:
return self.__trace_mode == ZeRoTraceMode.COMPLETE
def is_invalid_trace(self) -> bool:
return self.__trace_mode == ZeRoTraceMode.INVALID
def is_record_trace(self) -> bool:
return self.__trace_mode == ZeRoTraceMode.RECORD
def _invalidate_trace(self) -> None:
if self.is_invalid_trace():
raise RuntimeError("attempted to invalidate already invalid trace")
self.__trace_mode = ZeRoTraceMode.INVALID
self._clear_trace_structures()
def trace_prologue(self, sub_module: Module) -> None:
if self.is_complete_trace():
# sub_module must match expectation else invalidate trace cache
if len(self.__submodule_order) <= self.__step_id:
print_rank_0(
f"Invalidate trace cache @ step {self.__step_id} and module {sub_module.id}: "
f"cache has only {len(self.__submodule_order)} modules",
force=True)
self._invalidate_trace()
return
if sub_module != self.__submodule_order[self.__step_id]:
expected_module_id = self.__submodule_order[self.__step_id].id
print_rank_0(
f"Invalidate trace cache @ step {self.__step_id}: "
f"expected module {expected_module_id}, but got module {sub_module.id}",
force=True)
self._invalidate_trace()
def record_module(self, sub_module: Module) -> None:
"""adds sub module to trace"""
if not self.is_record_trace():
raise RuntimeError(f"attempted to record trace when status = {self.__trace_mode}")
self.__submodule_order.append(sub_module)
self.__step_id_module_fetched_for[sub_module.id].append(self.__step_id)
def record_parameters(self, sub_module: Module) -> None:
"""adds sub module to trace"""
if not self.is_record_trace():
raise RuntimeError(f"attempted to record trace when status = {self.__trace_mode}")
step_id = self.__step_id_module_fetched_for[sub_module.id].popleft()
for param in sorted(set(iter_params(sub_module)), key=lambda p: p.ds_id):
self.__param_order.append(__class__.__ParamInTrace(param=param, step_id_last_used_at=step_id))
def construct_parameter_trace_from_module_trace(self):
"""use module trace to construct parameter trace"""
self.__param_order = []
for sub_module in self.__submodule_order:
self.record_parameters(sub_module)
def reset_step(self) -> None:
"""indicate that we have completed one fwd+bwd for the model"""
if self.__inflight_param_registry:
raise RuntimeError(f"still have inflight params "
f"{[p.ds_summary() for p in self.__inflight_param_registry.keys()]}")
if not self.is_complete_trace(): # not self.trace_complete:
# Make sure that recorded submodule orders are identical across ranks
assert_ints_same_as_other_ranks([m.id for m in self.__submodule_order])
if self.is_record_trace():
# Successfully recorded a trace
self.construct_parameter_trace_from_module_trace()
# Make sure that recorded parameter orders are identical across ranks
assert_ints_same_as_other_ranks([p.param.ds_id for p in self.__param_order])
assert_ints_same_as_other_ranks([p.step_id_last_used_at for p in self.__param_order])
self.__submodule_order = tuple(self.__submodule_order) # freeze
self.__param_order = tuple(self.__param_order) # freeze
self.__trace_mode = ZeRoTraceMode.COMPLETE
print_rank_0(
f"completed record trace of {len(self.__submodule_order)} sub modules: {[m.id for m in self.__submodule_order]}",
force=False)
else:
# Enable trace recording for next forward/backward pass
self.__trace_mode = ZeRoTraceMode.RECORD
else:
if self.__profiler is not None:
self.__profiler.log_events()
self.__param_queue = collections.deque(self.__param_order) # reset fetch queue
self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10))
self.__step_id_module_fetched_for = collections.defaultdict(lambda: collections.deque())
self.__step_id = 0
self.__n_available_params = 0
self.__profiler.reset_events()
def _dump_params(self, tag, sub_module, params, step_id=None):
if step_id is None:
step_id = self.__step_id
param_names = [debug_param2name_id(p) for p in params]
print_rank_0(f'{tag} step = {step_id} mod = {debug_module2name_id(sub_module)} p_names = {param_names}',
force=False)
def _dump_param_ids(self, tag, mod_id, p_ids, step_id=None):
if step_id is None:
step_id = self.__step_id
print_rank_0(f'{tag} mod = {mod_id}, step = {step_id}, p_ids = {p_ids}', force=False)
"""Fetch and Release
Fetching, prefetching, and releasing parameters
"""
@instrument_w_nvtx
@torch.no_grad()
def fetch_sub_module(self, current_submodule: Module, forward: bool) -> None:
"""This method does the following (in order):
1. kick off fetch for parameters in immediately required sub module
2. kick off fetch for next few parameters we will need later (prefetch)
3. block on parameters in immediately required sub module
"""
if logger.isEnabledFor(logging.DEBUG):
debug_rank0(
f"{self.__step_id}: M{current_submodule.id}({type(current_submodule).__name__}) P{[p.ds_id for p in iter_params(current_submodule)]} "
+ str({
"avail": f"{self.__n_available_params:.1e}",
"queue_sz": f"{len(self.__param_queue or [])}",
"inflight": [p.ds_id for p in self.__inflight_param_registry],
}))
params_to_fetch = frozenset(iter_params(current_submodule))
fetch_numel = sum(
[p.partition_numel() for p in params_to_fetch if p.ds_status == ZeroParamStatus.NOT_AVAILABLE])
if fetch_numel > 0:
event_name = __class__.FORWARD_FETCH_SUBMIT if forward else __class__.BACKWARD_FETCH_SUBMIT
self._dump_param_ids(event_name, current_submodule.id,
[p.ds_id for p in params_to_fetch if p.ds_status == ZeroParamStatus.NOT_AVAILABLE])
self.__profiler.start_event(event_name)
# kick off all gather for params in the immediately required submodule
#for param in params_to_fetch:
if logger.isEnabledFor(logging.DEBUG):
for param in params_to_fetch:
debug_rank0(f"-fetch: {param.ds_summary()}")
self.__all_gather_params(params_to_fetch, forward)
self.__profiler.stop_event(event_name, fetch_numel)
wait_numel = 0
wait_event_name = __class__.FORWARD_FETCH_WAIT if forward else __class__.BACKWARD_FETCH_WAIT
self.__profiler.start_event(wait_event_name)
# wait for parameters in the immediately needed submodule to become available
for param in params_to_fetch:
param.ds_active_sub_modules.add(current_submodule.id)
if logger.isEnabledFor(logging.DEBUG):
debug_rank0(f"-wait: {param.ds_summary()}")
if param in self.__inflight_param_registry:
wait_numel += param.partition_numel()
with get_accelerator().stream(self.__allgather_stream):
while self.__ongoing_fetch_events and self.__ongoing_fetch_events[0].query():
self.__ongoing_fetch_events.popleft()
if len(self.__ongoing_fetch_events) > self.__max_ongoing_fetch_events:
self.__ongoing_fetch_events.popleft().synchronize()
self.__inflight_param_registry.pop(param).wait()
event = get_accelerator().Event()
event.record()
self.__ongoing_fetch_events.append(event)
assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary()
get_accelerator().current_stream().wait_stream(self.__allgather_stream)
self.__profiler.stop_event(wait_event_name, wait_numel)
# kick off parameter prefetches for upcoming modules
# don't prefetch if we dont have a completed model trace
if self.is_complete_trace():
# go through the parameters we need for the current module and pop them
# off the fetch queue so that they aren't prefetched later.
# if params have already been popped off the fetch queue by earlier
# prefetches we won't look for them here
discarded_from_prefetch_queue = set()
params_not_already_fetched = set(
filter(lambda p: self.__most_recent_step_id_param_fetched_for[p] < self.__step_id, params_to_fetch))
while self.__param_queue and len(discarded_from_prefetch_queue) < len(params_not_already_fetched):
param_in_trace = self.__param_queue.popleft()
self.__most_recent_step_id_param_fetched_for[
param_in_trace.param] = param_in_trace.step_id_last_used_at
discarded_from_prefetch_queue.add(param_in_trace.param)
if discarded_from_prefetch_queue != params_not_already_fetched:
raise RuntimeError(
f"tracing error at step {self.__step_id}: \n"
f"module id: {current_submodule.id}, training: {current_submodule.training}\n"
f"expected the next {len(params_not_already_fetched)} parameters in the "
f"parameter fetch queue to be {tuple(p.ds_summary(use_debug_name=True) for p in params_not_already_fetched)} \n"
f"but got \n {tuple(p.ds_summary(use_debug_name=True) for p in discarded_from_prefetch_queue)}.")
def _is_currently_on_nvme(param):
if param.nvme_swapper is None:
return False
return param.ds_tensor.final_location == OffloadDeviceEnum.nvme \
and param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE
# kick off all gather for params in the next few submodules (prefetch)
if self.__prefetch_bucket_sz > 0:
max_params_to_prefetch = min(self.__max_n_available_params - self.__n_available_params,
self.__prefetch_bucket_sz)
params_to_prefetch = set()
numel_prefetching = 0
while self.__param_queue and numel_prefetching < max_params_to_prefetch:
param_in_trace: __class__.__ParamInTrace = self.__param_queue.popleft()
if _is_currently_on_nvme(param_in_trace.param):
# nvme prefetch is handled elsewhere. Need to break here to preserve fetch order
self.__param_queue.appendleft(param_in_trace)
break
do_prefetch = param_in_trace.param.ds_status == ZeroParamStatus.NOT_AVAILABLE
if param_in_trace.param in params_to_prefetch:
# Avoid duplicates
do_prefetch = False
self.__most_recent_step_id_param_fetched_for[param_in_trace.param] = \
max(self.__most_recent_step_id_param_fetched_for[param_in_trace.param],
param_in_trace.step_id_last_used_at)
if do_prefetch:
params_to_prefetch.add(param_in_trace.param)
numel_prefetching += param_in_trace.param.ds_numel
if numel_prefetching > 0:
event_name = __class__.FORWARD_PREFETCH_SUBMIT if forward else __class__.BACKWARD_PREFETCH_SUBMIT
self.__profiler.start_event(event_name)
if logger.isEnabledFor(logging.DEBUG):
for param in params_to_prefetch:
debug_rank0(f"-prefetch: {param.ds_summary()}")
self.__all_gather_params(params_to_prefetch, forward)
self.__profiler.stop_event(event_name, numel_prefetching)
if self.__prefetch_nvme:
self.__prefetch_nvme_param_partitions()
self.__step_id += 1
@instrument_w_nvtx
@torch.no_grad()
def release_sub_module(self, submodule: Module, backward: bool) -> None:
"""release the parameters of a sub module, assuming they meet conditions to
be released."""
params_to_release = (self.__params_to_release(submodule, self.__step_id) if self.is_complete_trace() else set(
p.ds_id for p in iter_params(submodule)))
for param in iter_params(submodule):
param.ds_active_sub_modules.discard(submodule.id)
if param.ds_id in params_to_release and not param.is_external_param:
self.__release_param(param, backward)
@instrument_w_nvtx
@torch.no_grad()
def release_and_reset_all(self, module: Module) -> None:
"""release all module parameters"""
for param in iter_params(module, recurse=True):
if param in self.__inflight_param_registry:
raise RuntimeError(f"param {param.ds_summary()} still in flight")
# TODO. make this throw if if there are still active submodules. currently
# there's a hook execution issue
param.ds_active_sub_modules.clear()
self.__release_param(param, backward=False)
for param in iter_params(module, recurse=True):
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(f"{param.ds_summary()} expected to be released")
@instrument_w_nvtx
def __all_gather_params(self, params: Set[Parameter], forward: bool) -> None:
"""for each partitioned parameter, kick off an async allgather and store
the work handle for the in flight parameters."""
partitioned_params = []
all_gather_numel = 0
for param in params:
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
partitioned_params.append(param)
all_gather_numel += param.ds_numel
if partitioned_params:
self.__n_available_params += all_gather_numel
with get_accelerator().stream(self.__allgather_stream):
event_name = __class__.FORWARD_ALL_GATHER if forward else __class__.BACKWARD_ALL_GATHER
self.__profiler.start_event(event_name)
handle = partitioned_params[0].all_gather_coalesced(partitioned_params, forward)
self.__profiler.stop_event(event_name, all_gather_numel)
for param in partitioned_params:
assert param.ds_status == ZeroParamStatus.INFLIGHT, param.ds_summary()
self.__inflight_param_registry[param] = handle
# Release swap buffers for persisted params on nvme since they will never be partitioned or evicted from GPU
swap_persisted_params = [
p for p in partitioned_params if p.ds_persist and p.ds_tensor.final_location == OffloadDeviceEnum.nvme
]
if swap_persisted_params:
swap_persisted_params[0].nvme_swapper.remove_partition_and_release_buffers(swap_persisted_params)
@instrument_w_nvtx
def __release_param(self, param: Parameter, backward: bool) -> None:
if param.ds_status == ZeroParamStatus.AVAILABLE and not param.ds_active_sub_modules:
if logger.isEnabledFor(logging.DEBUG):
debug_rank0(f"-release: {param.ds_summary()}")
param.partition(backward=backward)
self.__n_available_params -= param.ds_numel
@instrument_w_nvtx
@functools.lru_cache(maxsize=None)
def __params_to_release(self, submodule_to_release: Module, step_id: int) -> Set[int]:
if not self.is_complete_trace():
raise RuntimeError("expected trace to be complete")
params_to_release = set(p.ds_id for p in iter_params(submodule_to_release) if not p.ds_persist)
# Problem: When prefetcher scans the param trace, it skips AVAILABLE params.
# This creates issues if those params are released before the skipped uses:
# 1) It hurts performance as the skipped uses are never prefetched.
# 2) For nvme params, we run out of swap buffers because the prefetch order
# diverges from the trace.
# Solution: Don't release params whose reuse was skipped by prefetch. This is
# possible because we detect such skips during prefetch and mark those params.
for param in iter_params(submodule_to_release):
if self.__most_recent_step_id_param_fetched_for[param] > step_id:
params_to_release.discard(param.ds_id)
# examine all modules within `max_reuse_dist_in_numel` of the current step,
# if we see any of the candidate parameters to be released reoccur while
# doing this, remove them from the set of parameters to release.
params_traversed = 0
for module in self.__submodule_order[step_id:]:
if params_traversed >= self.__max_reuse_dist_in_numel:
break
for param in iter_params(module):
params_to_release.discard(param.ds_id)
params_traversed += param.ds_numel
return params_to_release
@instrument_w_nvtx
def __prefetch_nvme_param_partitions(self) -> None:
"""swap in parameter partitions from nvme for those parameters that will be used
after the ones that are already being prefetched into full parameters
"""
if not self.is_complete_trace():
return
numel_in_flight = sum(param.ds_numel for param in self.__inflight_param_registry)
numel_considered = 0
swap_in_params = []
for param_in_trace in self.__param_queue:
param = param_in_trace.param
if param.nvme_swapper is None:
continue
if (numel_considered > 2 * numel_in_flight
or len(swap_in_params) >= param.nvme_swapper.available_swap_in_buffers()):
break
if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE:
swap_in_params.append(param)
numel_considered += param.ds_numel
if swap_in_params:
swap_in_params[0].nvme_swapper.swap_in(swap_in_params, async_op=True)
| 25,138 | 48.978131 | 150 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/zero/linear.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#Linear Module to use with ZeRO Stage 3 to allow for parameter memory release
#after the module execution during forward
#Instead of saving variables using save_for_backward, we save variable ids
#Allowing us to retrieve the variable without creating pointer to it
#Which allows for underlying tensor to be garbage collected
#When partitioned as needed by the Zero Stage 3 optimizer
#TODO instead of patching Linear module, we could patch the ctx.save_for_backward
#ctx.saved_tensors so that this approach works for all nn modules that are built upon
#torch.nn.function. However the issue is that many modules uses C++ implementations
#which does not have pytorch implementation. Eg torch.addmm which acts as a functional
#when implemented outside of torch.autograd.Function
import math
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn.modules.module import Module
from deepspeed.runtime.utils import noop_decorator
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
def print_rank_0(message, debug=False, force=False):
if dist.get_rank() == 0 and (debug or force):
print(message)
try:
autocast_custom_fwd = get_accelerator().amp().custom_fwd
autocast_custom_bwd = get_accelerator().amp().custom_bwd
except (ImportError, AttributeError) as exp:
autocast_custom_fwd = noop_decorator
autocast_custom_bwd = noop_decorator
class LinearFunctionForZeroStage3(torch.autograd.Function):
# Note that both forward and backward are @staticmethods
@staticmethod
@autocast_custom_fwd
# bias is an optional argument
def forward(ctx, input, weight, bias=None):
ctx.save_for_backward(input, weight, bias)
if input.dim() == 2 and bias is not None:
# fused op is marginally faster
ret = torch.addmm(bias, input, weight.t())
else:
output = input.matmul(weight.t())
if bias is not None:
output += bias
ret = output
return ret
# This function has only a single output, so it gets only one gradient
@staticmethod
@autocast_custom_bwd
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
input, weight, bias = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
#print(f"backward shaped grad_output {grad_output.shape}, input {input.shape}, weight {weight.shape} and bias {bias.shape if bias is not None else None}")
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
#print(f"Computing grad input weight {weight.shape} grad_output {grad_output.shape}")
grad_input = grad_output.matmul(weight)
#print(f"Computed grad input {grad_input.shape}")
if ctx.needs_input_grad[1]:
#print("Computing grad weight")
dim = grad_output.dim()
if dim > 2:
grad_weight = grad_output.reshape(-1,
grad_output.shape[-1]).t().matmul(input.reshape(-1, input.shape[-1]))
else:
grad_weight = grad_output.t().matmul(input)
#print(f"Computed grad weight grad_weight {grad_weight.shape}")
if bias is not None and ctx.needs_input_grad[2]:
#print("Computing grad bias")
grad_bias = grad_output.sum(0)
#print("Done computing grad bias")
#print("needs bias")
#print(f"backward shaped grad_input {grad_input.shape}, grad_weight {grad_weight.shape}, grad_bias {grad_bias.shape if grad_bias is not None else None}")
return grad_input, grad_weight, grad_bias
def zero3_linear_wrap(input, weight, bias=None):
if bias is None:
return LinearFunctionForZeroStage3.apply(input, weight)
else:
return LinearFunctionForZeroStage3.apply(input, weight, bias)
class LinearModuleForZeroStage3(Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
The weights are pre-transposed and stored as A^T instead of transposing during each
forward. Memory savings proportional to the parameter size.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \text{in\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\text{out\_features}, \text{in\_features})`. The values are
initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in\_features}}`
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{1}{\text{in\_features}}`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super(LinearModuleForZeroStage3, self).__init__()
print("Building ZeRO module")
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return LinearFunctionForZeroStage3.apply(input, self.weight, self.bias)
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, self.bias
is not None)
| 7,403 | 41.068182 | 162 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/zero/partitioned_param_profiler.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from dataclasses import dataclass
from deepspeed.utils import log_dist
class PartitionedParameterProfiler(object):
@dataclass
class EventCounter:
name: str
count: int
num_elem: int
def reset(self):
self.count = 0
self.num_elem = 0
def increment(self, numel):
self.count += 1
self.num_elem += numel
def __init__(self, timers):
self.timers = timers
self.event_counters = {}
def reset_events(self):
for event_ctr in self.event_counters.values():
event_ctr.reset()
def start_event(self, name):
if self.timers is None:
return
if name not in self.event_counters:
self.event_counters[name] = __class__.EventCounter(name=name, count=0, num_elem=0)
self.timers(name).start()
def stop_event(self, name, num_elem):
if self.timers is None:
return
assert name in self.event_counters, f'unknown event {name}'
self.event_counters[name].increment(num_elem)
self.timers(name).stop()
def _log_timers(self):
if self.timers is None:
return
self.timers.log(names=list(self.event_counters.keys()))
def _log_event_counters(self):
for event_ctr in self.event_counters.values():
log_dist(
f'{event_ctr.name}: count = {event_ctr.count}, numel = {event_ctr.num_elem}',
#f'{event_ctr.name}: time = {self._log_timers()},count = {event_ctr.count}, numel = {event_ctr.num_elem}',
ranks=[0])
def log_events(self):
self._log_event_counters()
self._log_timers()
| 1,801 | 27.15625 | 122 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/zero/config.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from pydantic import Field, validator
import sys
from typing import Optional
from enum import Enum
from deepspeed.runtime.config_utils import get_scalar_param, pp_int, DeepSpeedConfigModel
from deepspeed.utils import logger
from .offload_config import DeepSpeedZeroOffloadParamConfig, DeepSpeedZeroOffloadOptimizerConfig, OffloadDeviceEnum
# ZeRO optimization. By default, this optimization is not enabled.
# Users have to configure the desired optimization (0 means disabled) in params.json as below example:
ZERO_FORMAT = """
ZeRO optimization should be enabled as:
"session_params": {
"zero_optimization": {
"stage": [0|1|2],
"stage3_max_live_parameters" : 1000000000,
"stage3_max_reuse_distance" : 1000000000,
"allgather_partitions": [true|false],
"allgather_bucket_size": 500000000,
"reduce_scatter": [true|false],
"contiguous_gradients" : [true|false]
"overlap_comm": [true|false],
"reduce_bucket_size": 500000000,
"load_from_fp32_weights": [true|false],
"cpu_offload": [true|false] (deprecated),
"cpu_offload_params" : [true|false] (deprecated),
"cpu_offload_use_pin_memory": [true|false] (deprecated),
"sub_group_size" : 1000000000000,
"offload_param": {...},
"offload_optimizer": {...},
"ignore_unused_parameters": [true|false],
"round_robin_gradients": [true|false],
"zero_hpz_partition_size": 1,
"zero_quantized_weights": [true|false],
"zero_quantized_gradients": [true|false],
"memory_efficient_linear": [true|false]
}
}
"""
ZERO_OPTIMIZATION = "zero_optimization"
def read_zero_config_deprecated(param_dict):
zero_config_dict = {}
zero_config_dict["stage"] = 1 if param_dict[ZERO_OPTIMIZATION] else 0
if zero_config_dict["stage"] > 0:
zero_config_dict["allgather_bucket_size"] = get_scalar_param(param_dict, "allgather_size", 5e8)
logger.warning(
"DeepSpeedConfig: this format of ZeRO optimization setup is deprecated. Please use the following format: {}".
format(ZERO_FORMAT))
return zero_config_dict
def get_zero_config(param_dict):
if ZERO_OPTIMIZATION in param_dict:
zero_config_dict = param_dict[ZERO_OPTIMIZATION]
if isinstance(zero_config_dict, bool):
zero_config_dict = read_zero_config_deprecated(param_dict)
else:
zero_config_dict = {}
return DeepSpeedZeroConfig(**zero_config_dict)
class ZeroStageEnum(int, Enum):
""" Enum class for possible zero stages """
disabled = 0
optimizer_states = 1
gradients = 2
weights = 3
max_stage = 3
class DeepSpeedZeroConfig(DeepSpeedConfigModel):
"""
Sets parameters for ZeRO optimizations.
"""
stage: ZeroStageEnum = 0
"""
Chooses different stages of ZeRO Optimizer. Stage 0, 1, 2, and 3 refer
to disabled, optimizer state partitioning, and optimizer+gradient state
partitioning, and optimizer+gradient+parameter partitioning, respectively.
"""
contiguous_gradients: bool = True
"""
Copies the gradients to a contiguous buffer as they are produced. Avoids
memory fragmentation during backward pass.
"""
reduce_scatter: bool = True
"""
Uses reduce or reduce scatter instead of allreduce to average gradients
"""
reduce_bucket_size: int = Field(pp_int(5e8), ge=0)
"""
Number of elements reduced/allreduced at a time. Limits the memory required
for the allgather for large model sizes
"""
allgather_partitions: bool = True
"""
Chooses between allgather collective or a series of broadcast collectives
to gather updated parameters from all the GPUs at the end of each step
"""
allgather_bucket_size: int = Field(pp_int(5e8), ge=0)
"""
Number of elements allgathered at a time. Limits the memory required for
the allgather for large model sizes
"""
overlap_comm: bool = None # None for dynamic default value (see validator `overlap_comm_valid` below)
"""
Attempts to overlap the reduction of the gradients with backward computation
"""
load_from_fp32_weights: bool = True
"""
Boolean indicating whether to initialize fp32 master weights from fp32
copies in checkpoint (no precision loss) or from model's fp16 copies (with
precision loss). This can be used to initialize optimizer state even when
checkpoint is missing optimizer state.
"""
elastic_checkpoint: bool = False
"""
Enable loading checkpoint that was saved by job with different GPU count.
No longer supported.
"""
offload_param: Optional[DeepSpeedZeroOffloadParamConfig] = None
"""
Enable offloading of model parameters to CPU or NVMe. This frees up GPU
memory for larger models or batch sizes. Valid only with stage 3. Expects a
dictionary containing values for :any:`DeepSpeedZeroOffloadParamConfig`.
"""
offload_optimizer: Optional[DeepSpeedZeroOffloadOptimizerConfig] = None
"""
Enable offloading of optimizer state to CPU or NVMe, and optimizer
computation to CPU. This frees up GPU memory for larger models or batch
sizes. Valid for ZeRO stage 1, 2, 3. Expects a dictionary containing values
for :any:`DeepSpeedZeroOffloadOptimizerConfig`.
"""
sub_group_size: int = Field(pp_int(1e9), ge=0)
"""
Tile size for parameter processing to fit massive models (with trillions of
parameters). Used by ZeRO3-Offload and ZeRO-Infinity
"""
cpu_offload_param: bool = Field(
None,
deprecated=True,
new_param="offload_param",
new_param_fn=(lambda val: DeepSpeedZeroOffloadParamConfig(device=OffloadDeviceEnum.cpu) if val else None),
)
""" Deprecated, please use ``offload_param`` """
cpu_offload_use_pin_memory: bool = Field(
None,
deprecated=True,
new_param="offload_param or offload_optimizer",
set_new_param=False,
)
""" Deprecated, please use ``offload_param`` or ``offload_optimizer`` """
cpu_offload: bool = Field(
None,
deprecated=True,
new_param="offload_optimizer",
new_param_fn=(lambda val: DeepSpeedZeroOffloadOptimizerConfig(device=OffloadDeviceEnum.cpu) if val else None),
)
""" Deprecated, please use ``offload_optimizer`` """
prefetch_bucket_size: int = Field(pp_int(5e7), ge=0, alias="stage3_prefetch_bucket_size")
"""
Maximum number of parameter elements to fetch ahead of use. Used by ZeRO3,
ZeRO3-Offload, ZeRO-Infinity, and ZeRO-Inference.
"""
param_persistence_threshold: int = Field(pp_int(1e5), ge=0, alias="stage3_param_persistence_threshold")
"""
Do not partition parameters smaller than this threshold. Smaller values use
less memory, but can greatly increase communication (especially
latency-bound messages).
"""
model_persistence_threshold: int = Field(pp_int(sys.maxsize, "sys.maxsize"),
ge=0,
alias="stage3_model_persistence_threshold")
"""
Maximum number of parameter elements that can be persisted in GPU and not
partitioned. This imposes an upper bound on the number of unpartitioned
parameters resulting from param_persistence_threshold setting. Used by
ZeRO3-Offload, ZeRO-Infinity and ZeRO-Inference.
"""
max_live_parameters: int = Field(pp_int(1e9), ge=0, alias="stage3_max_live_parameters")
"""
The maximum number of parameters resident per GPU before releasing. Smaller
values use less memory, but perform more communication.
"""
max_reuse_distance: int = Field(pp_int(1e9), ge=0, alias="stage3_max_reuse_distance")
"""
Do not release a parameter if it will be reused within this threshold of
parameters. Smaller values use less memory, but perform more communication.
"""
gather_16bit_weights_on_model_save: bool = Field(False, alias="stage3_gather_16bit_weights_on_model_save")
"""
Consolidate the weights before saving the model by ``save_16bit_model()``.
Since the weights are partitioned across GPUs, they aren’t part of
``state_dict``, so this function automatically gathers the weights when
this option is enabled and then saves the fp16 model weights.
"""
stage3_gather_fp16_weights_on_model_save: bool = Field(False,
deprecated=True,
new_param="gather_16bit_weights_on_model_save")
""" Deprecated, please use ``gather_16bit_weights_on_model_save`` """
ignore_unused_parameters: bool = True
"""
Unused parameters in modules may be unexpected in static networks, but
could be normal in dynamic networks. This controls whether or not training
should terminate with an error message when unused parameters are detected.
This is set to ``False`` by default, which means unused parameters are
ignored and training continues. Now is just used in stage 2.
"""
legacy_stage1: bool = False
"""
For backward-compatibility enable old ZeRO stage 1 implementation. Use at
your own risk, will be deprecated soon.
"""
round_robin_gradients: bool = False
"""
Stage 1 and 2 optimization for CPU offloading that parallelizes gradient
copying to CPU memory among ranks by fine-grained gradient partitioning.
Performance benefit grows with gradient accumulation steps (more copying
between optimizer steps) or GPU count (increased parallelism).
"""
zero_hpz_partition_size: int = Field(1, ge=0)
"""
Number of ranks in zero parameters partitioning secondary group
"""
zero_quantized_weights: bool = False
"""
Boolean indicating whether to quantized zero parameters (weights)
for efficient all_gather comm
"""
zero_quantized_gradients: bool = False
"""
Boolean indicating whether to use quantized zero gradients
for efficient all_2_all_reduce comm
"""
mics_shard_size: int = Field(-1, new_param="mics_shard_size")
mics_hierarchical_params_gather: bool = False
memory_efficient_linear: bool = True
"""
Use memory efficient linear implementation, for Stage 3.
"""
# Validators
@validator("overlap_comm")
def overlap_comm_valid(cls, field_value, values):
if field_value is None:
assert ("stage" in values), "DeepSpeedZeroConfig: 'stage' must be defined before 'overlap_comm'"
field_value = values["stage"] == ZeroStageEnum.weights
return field_value
| 10,692 | 36.651408 | 118 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/zero/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .partition_parameters import ZeroParamType
from .partition_parameters import ZeroParamStatus
from .partition_parameters import Init
from .partition_parameters import GatheredParameters
from .partition_parameters import register_external_parameter
from .tiling import TiledLinear
from .tiling import TiledLinearReturnBias
from .mics import MiCS_Init
| 452 | 27.3125 | 61 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/zero/mics.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import sys
from typing import List
import deepspeed
import torch
from deepspeed import comm as dist
from deepspeed.runtime.zero.mics_utils import (MiCS_CommGroups, create_mics_comm_groups, scale_tensors)
from deepspeed.runtime.zero.parameter_offload import (DeepSpeedZeRoOffload, is_zero_param)
from deepspeed.runtime.zero.partition_parameters import Init, AllGatherCoalescedHandle, ZeroParamStatus
from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3
from deepspeed.utils import instrument_w_nvtx, log_dist
from deepspeed.accelerator import get_accelerator
from torch import Tensor
from torch.nn import Parameter
def has_hierarchical_all_gather_groups(comm_groups: MiCS_CommGroups):
result = False
if comm_groups.param_intra_node_group is not None and comm_groups.param_inter_node_shard_group is not None:
result = True
return result
class MiCS_AllGatherCoalescedHandle(AllGatherCoalescedHandle):
""" This handle assumes that no need to
copy data out from a contiguous tensor
"""
def __init__(self, allgather_handle, params: List[Parameter], partitions: List[Tensor], world_size: int) -> None:
super().__init__(allgather_handle, params, partitions, world_size)
def wait(self) -> None:
"""
"""
# let the current stream to op
instrument_w_nvtx(self.allgather_handle.wait)()
if self.complete:
return
for _, param in enumerate(self.params):
assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight"
param.ds_status = ZeroParamStatus.AVAILABLE
self.complete = True
class MiCS_Init(Init):
def __init__(self,
module=None,
data_parallel_group=None,
mem_efficient_linear=True,
remote_device=None,
pin_memory=False,
config_dict_or_path=None,
config=None,
enabled=True,
dtype=None,
mpu=None):
"""A context manager to partition the model parameters during the model
construction with MiCS partition strategy. Model states are partitioned
to the number of devices specified via ``mics_shard_size`` field in the
deepspeed config json file. The context manager also introduces
hierarchical communication method to reduce the cost of inter-node
communications, which can be enabled with
``mics_hierarchical_params_gather`` field in deepspeed config.
Args:
module (``torch.nn.Module``, optional): If provided, partition the model as
if it was constructed in the context.
data_parallel_group (``deepspeed.comm`` process group, optional):
The group of processes to partition among. Defaults to all processes.
mem_efficient_linear (bool, optional): Replace
torch.nn.functional.linear with an implementation that allows
DeepSpeed to partition parameters. Defaults to ``True``.
remote_device (string, optional): The initial device to store model
weights e.g., ``cpu``, ``nvme``. Passing ``"cpu"`` will create the model in CPU
memory. The model may still be moved to GPU based on the
offload settings for training. Defaults to param offload device if a config is
defined, otherwise GPU.
pin_memory (bool, optional): Potentially increase performance by
using pinned memory for model weights. ``remote_device`` must be
``"cpu"``. Defaults to pin_memory value in config, otherwise ``False``.
config_dict_or_path (dict or ``json file``, optional): If provided, provides configuration
for swapping fp16 params to NVMe.
config (dict or ``json file``, optional): Deprecated, use config_dict_or_path instead.
enabled (bool, optional): If ``False``, this context has no
effect. Defaults to ``True``.
dtype (``dtype``, optional): Can be used to change the data type of the parameters.
Supported options are ``torch.half`` and ``torch.float``. Defaults to ``None``
mpu (``object``, optional): A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}.
This context follows the same logic as ``deepspeed.zero.Init()``, but
with the modification for partition size of each parameter.
Examples
--------
#. Allocate a model and partition it among all processes:
.. code-block:: python
# the config_dict_or_path is required to let the context manager know
# how partition the parameters.
# The configuration has to include the field ``mics_shard_size``
with deepspeed.zero.MiCS_Init(config_dict_or_path=ds_config):
model = MyLargeModel()
#. Allocate a model in pinned CPU memory and partition it among a subgroup of processes:
.. code-block:: python
with deepspeed.zero.MiCS_Init(data_parallel_group=mpu.get_data_parallel_group(),
remote_device="cpu",
pin_memory=True
config_dict_or_path=ds_config):
model = MyLargeModel()
#. Partition an already-allocated model in CPU memory:
.. code-block:: python
model = deepspeed.zero.MiCS_Init(module=model,
config_dict_or_path=ds_config)
"""
assert config_dict_or_path is not None, "Must provide configuration for MiCS Initialization"
_ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, mpu)
if not dist.is_initialized():
dist.init_distributed()
assert dist.is_initialized(), "Parameters cannot be scattered without initializing deepspeed.comm"
self.mics_comm_groups = create_mics_comm_groups(
_ds_config.mics_shard_size,
data_parallel_group,
hierarchical_allgather=_ds_config.mics_hierarchial_params_gather,
mpu=mpu)
super().__init__(module, data_parallel_group, mem_efficient_linear, remote_device, pin_memory,
config_dict_or_path, config, enabled, dtype, mpu)
def _convert_to_deepspeed_param(self, param):
super()._convert_to_deepspeed_param(param)
# attach communication groups to every param
param.comm = self.mics_comm_groups
# record existing all_gather_coalesced implementation
# so that we can fallback later
old_all_gather_coalesced = param.all_gather_coalesced
def _param_all_gather_coalesced(params, safe_mode=False, param_buffers=None):
""""""
mics_comm_groups: MiCS_CommGroups = params[0].comm
hierarchical_all_gather = has_hierarchical_all_gather_groups(mics_comm_groups)
if dist.has_coalescing_manager() and hierarchical_all_gather:
return self._hierarchical_all_gather_params(params, param_buffers)
elif dist.has_coalescing_manager():
return self._flat_all_gather_with_coalescing_manager(params, param_buffers)
else:
return old_all_gather_coalesced(params, safe_mode)
# change the all_gather_coalesced method
param.all_gather_coalesced = _param_all_gather_coalesced
def _pre_all_gather(self, params, params_buffers=None):
# fetches from nvme if the partition is not available and in nvme
self._ensure_availability_of_partitioned_params(params)
for param in params:
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(param.ds_summary())
param.ds_status = ZeroParamStatus.INFLIGHT
# ensure that each rank has params in same order. the allgather
# is done by flattening the parameter list into a single tensor that
# can be allgathered in a single call - this means that if each rank
# gives a list of the same parameters in a different order we will
# silently get incorrect parameter values, and have very difficult
# to debug correctness issues.
params = sorted(params, key=lambda p: p.ds_id)
return params, params_buffers
def _flat_all_gather_with_coalescing_manager(self, params, params_buffers=None):
""""""
# must have to change the status of the param
# and ensure they are on the device
params, params_buffers = self._pre_all_gather(params, params_buffers)
mics_comm_groups: MiCS_CommGroups = params[0].comm
param_shard_size = mics_comm_groups.param_shard_size
output_tensors = []
input_tensors = []
for i, p in enumerate(params):
t_size = p.ds_tensor.ds_numel * param_shard_size
if params_buffers is not None and params_buffers[i] is not None:
assert params_buffers[i].numel(
) == t_size, f'params_to_gather_buffers[{i}] size {params_buffers[i].numel()} does not match with t_size {t_size}'
flat_out = params_buffers[i]
else:
flat_out = torch.empty(t_size, dtype=p.dtype, device=self.local_device, requires_grad=False).view(-1)
output_tensors.append(flat_out)
_flat_input = p.ds_tensor.data.view(-1)
input_tensors.append(_flat_input)
all_gather_handle = dist.all_gather_coalesced(output_tensors,
input_tensors,
group=mics_comm_groups.param_shard_group,
async_op=True)
for idx, param in enumerate(params):
param.data = output_tensors[idx].narrow(0, 0, param.ds_numel).view(param.ds_shape).data
return MiCS_AllGatherCoalescedHandle(allgather_handle=all_gather_handle,
params=params,
partitions=[],
world_size=param_shard_size)
def _hierarchical_all_gather_params(self, params, params_buffers=None):
""""""
params, params_buffers = self._pre_all_gather(params, params_buffers)
mics_comm_groups: MiCS_CommGroups = params[0].comm
local_rank = dist.get_rank(group=mics_comm_groups.param_intra_node_group)
inter_node_comm_group = mics_comm_groups.param_inter_node_shard_group
intra_node_comm_group = mics_comm_groups.param_intra_node_group
param_shard_size = mics_comm_groups.param_shard_size
inter_node_size = dist.get_world_size(group=inter_node_comm_group)
intra_node_size = dist.get_world_size(group=intra_node_comm_group)
param_tensors = []
for i, p in enumerate(params):
param_size = p.ds_tensor.ds_numel * param_shard_size
if params_buffers is not None and params_buffers[i] is not None:
assert params_buffers[i].numel(
) == param_size, f'param_buffers[{i}] size {params_buffers[i].numel()} does not match with param_size {param_size}'
param_tensor = params_buffers[i]
else:
param_tensor = torch.empty(param_size, dtype=p.dtype, device=self.local_device,
requires_grad=False).view(-1)
param_tensors.append(param_tensor)
# inter node all-gather
inter_outputs = []
inter_inputs = []
for i, p in enumerate(params):
inter_size = p.ds_tensor.ds_numel * inter_node_size
_out = param_tensors[i].narrow(0, local_rank * inter_size, inter_size)
inter_outputs.append(_out)
inter_inputs.append(p.ds_tensor.data.view(-1).to(self.local_device))
# sync enqueue
dist.all_gather_coalesced(inter_outputs, inter_inputs, group=inter_node_comm_group, async_op=False)
# intra node all-gather
intra_outputs = []
intra_inputs = []
for i, p in enumerate(params):
# partition param into multiple chunks for allgather
# because inter-node all-gather outputs are in a continues memory
# while in param memory, those inter-node data are placed in different
# location.
# each chunk is an intra-node output
param_chunk = param_tensors[i].view(
(inter_node_size, intra_node_size, p.ds_tensor.ds_numel)).narrow(1, local_rank, 1)
param_chunk.copy_(inter_outputs[i].detach().clone().view(param_chunk.size()))
output_chunks = torch.chunk(param_tensors[i], inter_node_size)
for j, _out in enumerate(output_chunks):
intra_chunk_size = intra_node_size * p.ds_tensor.ds_numel
local_offset = local_rank * p.ds_tensor.ds_numel
_in = param_tensors[i].narrow(0, j * intra_chunk_size + local_offset, p.ds_tensor.ds_numel)
intra_outputs.append(_out)
intra_inputs.append(_in)
all_gather_handle = dist.all_gather_coalesced(intra_outputs,
intra_inputs,
group=intra_node_comm_group,
async_op=True)
for i, param in enumerate(params):
param.data = param_tensors[i].narrow(0, 0, param.ds_numel).view(param.ds_shape).data
return MiCS_AllGatherCoalescedHandle(
allgather_handle=all_gather_handle,
params=params,
partitions=[],
world_size=param_shard_size,
)
def get_partition_dp_group(self, param):
return param.comm.param_shard_group
def get_partition_rank(self):
return self.mics_comm_groups.param_shard_rank
@property
def num_partitions(self):
return self.mics_comm_groups.param_shard_size
class MiCS_Offload(DeepSpeedZeRoOffload):
""" Wrapper to change the behavior for parameter sharding
"""
def __init__(self,
module,
timers,
ds_config,
overlap_comm=True,
prefetch_bucket_size=50000000,
max_reuse_distance=1000000000,
max_live_parameters=1000000000,
param_persistence_threshold=100000,
model_persistence_threshold=sys.maxsize,
offload_param_config=None,
mpu=None):
super().__init__(module, timers, ds_config, overlap_comm, prefetch_bucket_size, max_reuse_distance,
max_live_parameters, param_persistence_threshold, model_persistence_threshold,
offload_param_config, mpu)
def _convert_to_zero_parameters(self, ds_config, module, mpu):
""" overload the parent class function for convert the parameters
"""
log_dist(f'Convert to zero parameters from MiCS Offload manager', ranks=[0])
non_zero_params = [p for p in module.parameters() if not is_zero_param(p)]
if non_zero_params:
zero_params = [p for p in module.parameters() if is_zero_param(p)]
if zero_params:
zero_params[0].convert_to_zero_parameters(param_list=non_zero_params)
else:
group = None
if mpu:
group = mpu.get_data_parallel_group()
MiCS_Init(module=module,
data_parallel_group=group,
dtype=self.dtype,
config_dict_or_path=ds_config,
remote_device=self.offload_device,
pin_memory=self.offload_param_pin_memory,
mpu=mpu)
class MiCS_Optimizer(DeepSpeedZeroOptimizer_Stage3):
"""
MiCS Optimizer
"""
def __init__(self,
module,
init_optimizer,
timers,
ds_config,
static_loss_scale=1,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True,
contiguous_gradients=True,
reduce_bucket_size=500000000,
prefetch_bucket_size=50000000,
max_reuse_distance=1000000000,
max_live_parameters=1000000000,
param_persistence_threshold=100000,
model_persistence_threshold=sys.maxsize,
dp_process_group=None,
reduce_scatter=True,
overlap_comm=False,
offload_optimizer_config=None,
offload_param_config=None,
sub_group_size=1000000000000,
mpu=None,
clip_grad=0,
communication_data_type=torch.float16,
postscale_gradients=True,
gradient_predivide_factor=1,
gradient_accumulation_steps=1,
elastic_checkpoint=False,
aio_config=None):
log_dist("Init MiCS optimizer", ranks=[0])
super().__init__(module, init_optimizer, timers, ds_config, static_loss_scale, dynamic_loss_scale,
dynamic_loss_args, verbose, contiguous_gradients, reduce_bucket_size, prefetch_bucket_size,
max_reuse_distance, max_live_parameters, param_persistence_threshold,
model_persistence_threshold, dp_process_group, reduce_scatter, overlap_comm,
offload_optimizer_config, offload_param_config, sub_group_size, mpu, clip_grad,
communication_data_type, postscale_gradients, gradient_predivide_factor,
gradient_accumulation_steps, elastic_checkpoint, aio_config)
first_param = next(module.parameters())
# overload the dp_process_group and partition_count
assert hasattr(first_param, "comm"), " ".join([
"Sharded parameters don't have the MiCS_CommGroups attached.",
"Might due to the use of deepspeed.zero.Init context for initializing the weights.",
"To use MiCS sharding, please use deepspeed.zero.MiCS_Init instead for initializing parameter."
])
self.dp_process_group = first_param.comm.param_shard_group
self.partition_count = first_param.comm.param_shard_size
def initialize_ds_offload(
self,
module,
timers,
ds_config,
overlap_comm,
prefetch_bucket_size,
max_reuse_distance,
max_live_parameters,
param_persistence_threshold,
model_persistence_threshold,
offload_param_config,
mpu,
zpg=None,
zero_quantized_weights=False,
):
assert not zero_quantized_weights and zpg is None, "MiCS is mutually exclusive with ZeRO++"
return MiCS_Offload(module, timers, ds_config, overlap_comm, prefetch_bucket_size, max_reuse_distance,
max_live_parameters, param_persistence_threshold, model_persistence_threshold,
offload_param_config, mpu)
def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None:
grad_buffers = super().partition_grads(params_to_release, grad_partitions)
# perform all-reduce among replication groups
# the function will perform accumulation boundary check
self.allreduce_mics_shard_grads(params_to_release, grad_buffers)
@instrument_w_nvtx
def allreduce_mics_shard_grads(self, params, partitioned_grads_buffers: List[Tensor]):
"""
"""
# TODO: improve the condition check
if not self.is_gradient_accumulation_boundary or \
len(partitioned_grads_buffers) == 0:
return
mics_comm_groups: MiCS_CommGroups = params[0].comm
param_repli_group = mics_comm_groups.param_repli_group
param_repli_size = mics_comm_groups.param_repli_size
if param_repli_size is None or param_repli_size <= 1:
return
if not get_accelerator().on_accelerator(partitioned_grads_buffers[0]):
raise RuntimeError("Local sharding has no support for CPU offloading")
if dist.has_all_reduce_coalesced():
scale_tensors(partitioned_grads_buffers, param_repli_size)
dist.all_reduce_coalesced(tensors=partitioned_grads_buffers, group=param_repli_group)
else:
# manually coalescing all-reduce
aggregated_buffer: Tensor = torch.cat(partitioned_grads_buffers)
aggregated_buffer.div_(param_repli_size)
dist.all_reduce(aggregated_buffer, group=param_repli_group)
offset = 0
for grad_buff in partitioned_grads_buffers:
grad_buff.view(-1).copy_(aggregated_buffer.narrow(0, offset, grad_buff.numel()))
offset += grad_buff.numel()
def load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False,
checkpoint_folder=None):
r""" Loading the ZeRO-3/MiCS partitioned checkpoints
Because the self.dp_process_group is replaced with the communicator for
partition group we can call the load_state_dict logic from ZeRO-3.
"""
super().load_state_dict(state_dict_list, load_optimizer_states, load_from_fp32_weights, checkpoint_folder)
| 22,259 | 46.061311 | 138 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/checkpoint_engine/checkpoint_engine.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
class CheckpointEngine(object):
# init checkpoint engine for save/load
def __init__(self, config_params=None):
pass
def create(self, tag):
# create checkpoint on give tag for save/load.
pass
def makedirs(self, path, exist_ok=False):
os.makedirs(path, exist_ok=exist_ok)
def save(self, state_dict, path: str):
pass
def load(self, path: str, map_location=None):
pass
def commit(self, tag):
# to tell checkpoint services if all files are ready.
pass
| 653 | 20.096774 | 61 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/checkpoint_engine/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
| 140 | 22.5 | 44 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.utils import logger, log_dist
from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \
CheckpointEngine
class TorchCheckpointEngine(CheckpointEngine):
def __init__(self, config_params=None):
super().__init__(config_params)
def create(self, tag):
log_dist(f"[Torch] Checkpoint {tag} is about to be saved!", ranks=[0])
def save(self, state_dict, path: str):
logger.info(f"[Torch] Saving {path}...")
torch.save(state_dict, path)
logger.info(f"[Torch] Saved {path}.")
return None
def load(self, path: str, map_location=None):
logger.info(f"[Torch] Loading checkpoint from {path}...")
partition = torch.load(path, map_location=map_location)
logger.info(f"[Torch] Loaded checkpoint from {path}.")
return partition
def commit(self, tag):
logger.info(f"[Torch] Checkpoint {tag} is ready now!")
return True
| 1,060 | 29.314286 | 78 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import torch_nebula
from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \
CheckpointEngine
from deepspeed.utils import logger, log_dist
from deepspeed.nebula.constants import *
def _get_tag_from_path(path):
return os.path.basename(os.path.dirname(path))
class NebulaCheckpointEngine(CheckpointEngine):
def __init__(self, config_params=None):
super().__init__(config_params)
self.checkpoint = None
self.tag_flag = None
self.enable_nebula_load = config_params.enable_nebula_load
self.nebula_load_path = config_params.load_path
if self.nebula_load_path is None:
self.nebula_load_path = config_params.persistent_storage_path
nebula_config_params = {
NEBULA_PERSISTENT_STORAGE_PATH: config_params.persistent_storage_path,
NEBULA_PERSISTENT_TIME_INTERVAL: config_params.persistent_time_interval,
NEBULA_NUM_OF_VERSION_IN_RETENTION: config_params.num_of_version_in_retention,
}
torch_nebula.init(**nebula_config_params)
def create(self, tag):
log_dist(f"[Nebula] Start Checkpoint for tag:{tag}", ranks=[0])
# -2 means: customer needs to explicitly tell nebula
# current checkpoint is complete by commit method.
self.checkpoint = torch_nebula.Checkpoint(tag, -2)
def save(self, state_dict, path: str):
log_dist(f"[Nebula] Create dummy files for loading.")
torch.save("", path)
tag = _get_tag_from_path(path)
partition_name = os.path.basename(path)
logger.info(f"[Nebula] Saving {partition_name} under tag {tag}...")
self.checkpoint.save(partition_name, state_dict)
logger.info(f"[Nebula] Saved {partition_name} under tag {tag}.")
return None
def load(self, path: str, map_location=None):
tag = _get_tag_from_path(path)
first_load_flag = self.tag_flag is None or self.tag_flag == tag
if not self.enable_nebula_load and first_load_flag:
self.tag_flag = tag
logger.info(f"[Nebula] Disable nebula load. Loading checkpoint from {path} ...")
partition = torch.load(path, map_location=map_location)
logger.info(f"[Nebula] Disable nebula load. Loaded checkpoint from {path} .")
return partition
partition_name = os.path.basename(path)
logger.info(f"[Nebula] Loading {path} under tag {tag} from nebula path {self.nebula_load_path}...")
checkpoint = None
if tag in (None, 'latest', 'latest_universal'):
# In some cases, there is the inconsistent tag between deepspeed metadata (latest file)
# and nebula metadata, will lead to the failure on loading with deepspeed tag. Then we
# will try to load the valid latest checkpoint from nebula(tier3 > tier1). So, in summary
# when met failure loading for given tag, the loading priority would be like:
# nebula tier3 latest > nebula tier1 latest.
checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path)
else:
checkpoint = torch_nebula.get_checkpoint(tag=tag, persist_path=self.nebula_load_path)
if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''):
logger.info(
f"Unable to find valid checkpoint tag:{tag} from Nebula, try to get latest checkpoint again from nebula {self.nebula_load_path} path!"
)
# nebula tier3 latest
checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path)
if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''):
logger.info(
f"Unable to find latest checkpoint from Nebula tier3, try to get latest checkpoint again from nebula tier1 path!"
)
# nebula tier1 latest
checkpoint = torch_nebula.get_latest_checkpoint()
logger.warning(f"Unable to find valid checkpoint from Nebula under tag:{tag}.")
return None
tag = checkpoint.tag
self.tag_flag = -1
partition = checkpoint.load(partition_name, map_location=map_location)
logger.info(f"[Nebula] Loaded {path} under tag {tag} from {self.nebula_load_path}.")
return partition
def commit(self, tag):
# nebula commit will be call when all files under give tag are ready to be persisted in the async way.
logger.info(f"[Nebula] all files for {tag} are saved in tier1. It is ready to start persisting")
commit_rls = self.checkpoint.commit()
if not commit_rls:
logger.error(f"[Nebula] failed to commit the checkpoint, please check the log.")
return False
return commit_rls
| 4,975 | 45.074074 | 150 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/pipe/engine.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from types import MethodType
import torch
from deepspeed import comm as dist
from deepspeed.utils import logger
from deepspeed.utils.timer import ThroughputTimer
from deepspeed.accelerator import get_accelerator
from ..engine import DeepSpeedEngine, MEMORY_OPT_ALLREDUCE_SIZE
from ..utils import PartitionedTensor
from ..dataloader import RepeatingLoader
from ..zero.config import ZeroStageEnum
from ..activation_checkpointing import checkpointing as ds_checkpointing
from .module import PipelineModule, PipelineError
from . import p2p
from . import schedule
TARGET_ID = -2
LOG_STAGE = -2
DATA_PARALLEL_ID = -2
def is_even(number):
return number % 2 == 0
mem_alloced = 0
mem_cached = 0
def _tensor_bytes(tensor):
return tensor.numel() * tensor.element_size()
class PipelineEngine(DeepSpeedEngine):
""" A training engine hybrid pipeline, data, and model parallel training.
This engine is created by ``deepspeed.initialize()`` when a :class:`PipelineModule`
is provided.
"""
ID_TO_DTYPE = [
torch.float32, torch.float64, torch.complex64, torch.complex128, torch.float16, torch.bfloat16, torch.uint8,
torch.int8, torch.int16, torch.int32, torch.int64, torch.bool
]
DTYPE_TO_ID = {dtype: id_ for id_, dtype in enumerate(ID_TO_DTYPE)}
def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs):
super().__init__(*super_args, **super_kwargs)
assert isinstance(self.module, PipelineModule), "model must base PipelineModule"
assert self.zero_optimization_stage() < 2, "ZeRO-2 and ZeRO-3 are incompatible with pipeline parallelism"
# We schedule the all-reduces, so disable it in super().backward()
self.enable_backward_allreduce = False
self.has_bool_tensors = has_bool_tensors
self.eval_return_logits = False
self.outputs = None
# used to disable the pipeline all-reduce when used with 1-bit Adam/1-bit LAMB
self.pipeline_enable_backward_allreduce = True
if self.elasticity_enabled():
if not self.is_elastic_model_parallel_supported():
assert not self.elasticity_enabled(), "Elasticity is not currently supported" \
" with pipeline parallelism."
# pipeline step for logging
self.log_batch_step_id = -1
self.micro_batch_size = self.train_micro_batch_size_per_gpu()
self.micro_batches = self.gradient_accumulation_steps()
# Set Grid and Communication Groups
self.grid = self.module._grid
if self.grid.get_global_rank() == 0:
logger.info(f'CONFIG: micro_batches={self.micro_batches} '
f'micro_batch_size={self.micro_batch_size}')
self.global_rank = self.grid.get_global_rank()
assert self.dp_world_size == self.grid.data_parallel_size
assert self.train_batch_size() == \
self.micro_batch_size * self.micro_batches * self.grid.data_parallel_size
# Set Stage Inf
self.num_stages = self.grid.pipe_parallel_size
self.stage_id = self.grid.get_stage_id()
self.prev_stage = self.stage_id - 1
self.next_stage = self.stage_id + 1
self.data_iterator = None
self.batch_fn = None
self._force_grad_boundary = False
self.batch_timer = ThroughputTimer(batch_size=self.train_batch_size(),
logging_fn=self.tput_log,
monitor_memory=False,
steps_per_output=self.steps_per_print())
# PipelineEngine needs to handle data loading specially due to only the first
# and last stages loading inputs/labels. We construct a sampler that uses
if self.training_data:
self._build_data_iter(self.training_data)
self.is_pipe_parallel = self.grid.pipe_parallel_size > 1
self.is_data_parallel = self.grid.data_parallel_size > 1
self.is_model_parallel = self.grid.model_parallel_size > 1
# Partition input/output buffers
# XXX temporarily disable while I revert some partition hacks.
self.is_pipe_partitioned = self.is_model_parallel
self.is_grad_partitioned = self.is_model_parallel
model_parameters = filter(lambda p: p.requires_grad, self.module.parameters())
num_params = sum([p.numel() for p in model_parameters])
unique_params = num_params
# Subtract tied parameters if we don't own them
if self.module.tied_comms:
tied_params = 0
for key, d in self.module.tied_comms.items():
if self.global_rank != min(d['ranks']):
tied_params += sum(p.numel() for p in d['module'].parameters())
unique_params -= tied_params
params_tensor = torch.LongTensor(data=[num_params, unique_params]).to(self.device)
dist.all_reduce(params_tensor, group=self.grid.get_model_parallel_group())
params_tensor = params_tensor.tolist()
total_params = params_tensor[0]
unique_params = params_tensor[1]
if self.grid.data_parallel_id == 0:
logger.info(f'RANK={self.global_rank} '
f'STAGE={self.stage_id} '
f'LAYERS={self.module._local_stop - self.module._local_start} '
f'[{self.module._local_start}, {self.module._local_stop}) '
f'STAGE_PARAMS={num_params} ({num_params/1e6:0.3f}M) '
f'TOTAL_PARAMS={total_params} ({total_params/1e6:0.3f}M) '
f'UNIQUE_PARAMS={unique_params} ({unique_params/1e6:0.3f}M)')
#initialize peer-2-peer communication and allreduce groups
if self.is_pipe_parallel:
p2p.init_process_groups(self.grid)
# Pipeline buffers
self.num_pipe_buffers = 0
self.pipe_buffers = {
'inputs': [], # batch input and received activations
'labels': [], # labels from batch input
'outputs': [], # activations
'output_tensors': [], # tensor object to preserve backward graph
}
self.pipe_recv_buf = None
self.grad_layer = None
self.meta_buffer = None
self.first_output_send = True
self.first_gradient_send = True
#stores the loss for the current micro batch being processed
self.loss = torch.tensor(0.0).to(self.device)
#stores the loss for the entire batch
self.total_loss = None
self.agg_loss = torch.tensor(0.0, requires_grad=False).to(self.device)
self.dp_group_loss = torch.tensor(0.0, requires_grad=False).to(self.device)
if self._config.pipeline['activation_checkpoint_interval'] > 0:
self.module.activation_checkpoint_interval = self._config.pipeline['activation_checkpoint_interval']
self.module.checkpoint_parallel_write_pipeline = self._config.checkpoint_parallel_write_pipeline
if self.is_last_stage():
self.loss_model = self.module.loss_fn
self.has_attention_mask = self.module.__class__.__name__ == 'GPT2ModelPipe'
# Initialize pipeline communicators. Just send a 0.
if is_even(self.stage_id):
if not self.is_last_stage():
p2p.send(self.loss, self.next_stage)
if not self.is_first_stage():
p2p.recv(self.loss, self.prev_stage)
else:
if not self.is_first_stage():
p2p.recv(self.loss, self.prev_stage)
if not self.is_last_stage():
p2p.send(self.loss, self.next_stage)
# XXX look into timer reporting timing
# Initialize some timers because of early weirdness.
if self.wall_clock_breakdown():
self.timers('forward_microstep').start()
self.timers('forward_microstep').stop()
self.timers('backward_microstep').start()
self.timers('backward_microstep').stop()
self.timers('backward_inner_microstep').start()
self.timers('backward_inner_microstep').stop()
self.timers('backward_allreduce_microstep').start()
self.timers('backward_allreduce_microstep').stop()
self.timers('backward_allreduce').start()
self.timers('backward_allreduce').stop()
self.timers('step_microstep').start()
self.timers('step_microstep').stop()
def set_has_attention_mask(self, value):
assert isinstance(value, bool)
self.has_attention_mask = value
def _build_data_iter(self, dataset):
sampler = torch.utils.data.distributed.DistributedSampler(dataset,
num_replicas=self.dp_world_size,
rank=self.mpu.get_data_parallel_rank(),
shuffle=False)
# Build a loader and make it repeating.
pipe_dataloader = self.deepspeed_io(dataset, data_sampler=sampler)
pipe_dataloader = RepeatingLoader(pipe_dataloader)
self.set_dataloader(pipe_dataloader)
def _exec_reduce_tied_grads(self):
# We need to run this first to write to self.averaged_gradients;
# since this class turns `enable_backward_allreduce` off,
# `self.overlapping_partition_gradients_reduce_epilogue()` defined in the DeepSpeedEngine
# never actually runs. I suspect this is because of efficiency problems; get_flat_partition in
# stage2.py might do something expensive; someone will have to look into that later. But
# in the meantime, this fixes ZeRO2 + Pipelining enough to run a demo. Further profiling
# needed to decide if it actually breaks everything.
# (see https://github.com/EleutherAI/gpt-neox/issues/62#issuecomment-761471944)
if self.zero_optimization_partition_gradients():
self.optimizer.overlapping_partition_gradients_reduce_epilogue()
weight_group_list = self.module.get_tied_weights_and_groups()
for weight, group in weight_group_list:
grad = weight._hp_grad if self.bfloat16_enabled() else weight.grad
dist.all_reduce(grad, group=group)
def _exec_reduce_grads(self):
self._force_grad_boundary = True
if self.pipeline_enable_backward_allreduce:
if self.bfloat16_enabled():
if self.zero_optimization_stage() < ZeroStageEnum.gradients:
self._bf16_reduce_grads()
else:
raise NotImplementedError("PP+BF16 only work for ZeRO Stage 1")
else:
self.allreduce_gradients(bucket_size=MEMORY_OPT_ALLREDUCE_SIZE)
self._force_grad_boundary = False
def _bf16_reduce_grads(self):
# Make our own list of gradients from the optimizer's FP32 grads
grads = []
self.buffered_allreduce_fallback(grads=self.optimizer.get_grads_for_reduction(),
elements_per_buffer=MEMORY_OPT_ALLREDUCE_SIZE)
def _reserve_pipe_buffers(self, num_buffers):
"""Ensure that each pipeline buffer has at least ``num_buffers`` slots.
This method only reserves slots and does not allocate tensors.
Args:
num_buffers (int): The number of buffers to reserve.
"""
if self.num_pipe_buffers >= num_buffers:
return
num_added = num_buffers - self.num_pipe_buffers
for key in self.pipe_buffers:
self.pipe_buffers[key].extend([None] * num_added)
self.num_pipe_buffers = num_buffers
def reset_activation_shape(self):
"""Reset the buffers when the shape of activation and gradient change.
For example, for curriculum learning that changes the seqlen of each
sample, we need to call this whenever the seqlen is going to change.
"""
self.first_output_send = True
self.pipe_recv_buf = None
self.grad_layer = None
self.meta_buffer = None
def train_batch(self, data_iter=None):
"""Progress the pipeline to train the next batch of data. The engine will ingest
``self.train_batch_size()`` total samples collectively across all workers.
An iterator that over training data should be provided as an argument
unless ``deepspeed.initialize()`` was provided a training set. In that event,
the training data will automatically be read.
.. warning::
A total of ``self.gradient_accumulation_steps()`` entries will be pulled
from ``data_iter`` by each pipeline. There must be sufficient
data left in ``data_iter`` or else a ``StopIteration`` will halt training.
DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader`
that wraps data loaders to automatically restart upon a ``StopIteration``.
Args:
data_iter (Iterator, optional): Iterator of training data.
Returns:
The arithmetic mean of the losses computed this batch.
"""
if not torch._C.is_grad_enabled():
raise RuntimeError(f'train_batch() requires gradients enabled. Use eval_batch() instead.')
# Curriculum learning could change activation shape
if self.curriculum_enabled_legacy():
new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \
self.global_steps + 1)
if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step:
self.reset_activation_shape()
self.curriculum_scheduler_legacy.first_step = False
elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \
self.global_steps):
self.reset_activation_shape()
if data_iter:
self.set_dataiterator(data_iter)
self.module.train()
self.total_loss = None
self._compute_loss = True
# Do the work
self.timers('train_batch').start()
sched = schedule.TrainSchedule(micro_batches=self.micro_batches,
stages=self.num_stages,
stage_id=self.stage_id)
self._exec_schedule(sched)
self.agg_train_loss = self._aggregate_total_loss()
self.timers('train_batch').stop()
if self.global_steps % self.steps_per_print() == 0:
if self.global_rank == 0:
elapsed = self.timers('train_batch').elapsed(reset=True) / 1000.0
iter_time = elapsed / self.steps_per_print()
tput = self.train_batch_size() / iter_time
print(f'steps: {self.global_steps} '
f'loss: {self.agg_train_loss:0.4f} '
f'iter time (s): {iter_time:0.3f} '
f'samples/sec: {tput:0.3f}')
# Monitoring
if self.global_rank == 0 and self.monitor.enabled:
self.summary_events = [(f'Train/Samples/train_loss', self.agg_train_loss.mean().item(),
self.global_samples)]
self.monitor.write_events(self.summary_events)
if self.wall_clock_breakdown() and self.global_steps % self.steps_per_print() == 0:
self.timers.log(['pipe_send_output', 'pipe_send_grad', 'pipe_recv_input', 'pipe_recv_grad'])
# TODO: should return precisely what loss returned and allow others to be queried?
return self.agg_train_loss
def eval_batch(self, data_iter, return_logits=False, compute_loss=True, reduce_output='avg'):
"""Evaluate the pipeline on a batch of data from ``data_iter``. The
engine will evaluate ``self.train_batch_size()`` total samples
collectively across all workers.
This method is equivalent to:
.. code-block:: python
module.eval()
with torch.no_grad():
output = module(batch)
.. warning::
A total of ``self.gradient_accumulation_steps()`` entries will be pulled
from ``data_iter`` by each pipeline. There must be sufficient
data left in ``data_iter`` or else a ``StopIteration`` will halt training.
DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader`
that wraps data loaders to automatically restart upon a ``StopIteration``.
Args:
data_iter (Iterator): Iterator of data to evaluate.
Returns:
The arithmetic mean of the losses computed this batch.
"""
self.eval_return_logits = return_logits
self.module.eval()
# Curriculum learning could change activation shape
if self.curriculum_enabled_legacy():
new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \
self.global_steps + 1)
if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step:
self.reset_activation_shape()
self.curriculum_scheduler_legacy.first_step = False
elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \
self.global_steps):
self.reset_activation_shape()
eval_output = None
self._compute_loss = compute_loss
# Use the provided data iterator
train_iterator = self.data_iterator
self.set_dataiterator(data_iter)
# Do the work
sched = schedule.InferenceSchedule(micro_batches=self.micro_batches,
stages=self.num_stages,
stage_id=self.stage_id)
# prevent dead-lock with multiple evals sequence
dist.barrier()
with torch.no_grad():
self._exec_schedule(sched)
if self.is_last_stage():
eval_output = self._reduce_outputs(self.fwd_outputs, reduce=reduce_output)
if compute_loss:
eval_output = self._bcast_pipe_scalar(eval_output)
if self.global_rank == 0 and self.monitor.enabled:
self.summary_events = [(f'Train/Samples/eval_loss', eval_output.mean().item(), self.global_samples)]
self.monitor.write_events(self.summary_events)
# Restore the training iterator
self.set_dataiterator(train_iterator)
# Reset any buffers that may have been populated during the forward passes.
#ds_checkpointing.reset()
self.eval_return_logits = False
if return_logits:
outputs = self.outputs
self.outputs = None
return eval_output, outputs
return eval_output
def set_train_batch_size(self, train_batch_size):
"""Adjust the global batch size by increasing or decreasing the number of
micro-batches (i.e., gradient accumulation steps). The size of each micro-batch
(i.e., ``train_micro_batch_size_per_gpu``) is not changed.
Args:
train_batch_size (int): The new global batch size for training.
Raises:
ValueError: if ``train_batch_size`` is not divisible by the
configured micro-batch size and data parallelism.
"""
super().set_train_batch_size(train_batch_size)
self.micro_batches = self.gradient_accumulation_steps()
def is_first_stage(self):
"""True if this process is in the first stage in the pipeline."""
return self.stage_id == 0
def is_last_stage(self):
"""True if this process is in the last stage in the pipeline."""
return self.stage_id == self.num_stages - 1
def _reduce_outputs(self, outputs, reduce='avg', reduce_dp=True):
if reduce is None:
return outputs
if reduce.lower() == 'avg':
# first sum over all microbatches
if torch.is_tensor(outputs[0]):
reduced = sum(outputs)
else:
assert isinstance(outputs, (list, tuple))
reduced = [torch.zeros_like(o) for o in outputs[0]]
for idx, out in outputs:
reduced[idx] += out
# Average over the microbatches
reduced = self._scale_loss_by_gas(reduced)
# Average over DP groups
if reduce_dp and self.is_data_parallel:
if torch.is_tensor(reduced):
dist.all_reduce(reduced, group=self.mpu.get_data_parallel_group())
reduced /= self.dp_world_size
else:
for idx in range(len(reduced)):
dist.all_reduce(reduced[idx], group=self.mpu.get_data_parallel_group())
reduced[idx] /= self.dp_world_size
return reduced
else:
raise NotImplementedError(f'reduction type {reduce} not supported.')
def _bcast_pipe_scalar(self, data, src_rank=None, dtype=torch.float32):
# Default to last stage (e.g., for broadcasting loss)
if src_rank is None:
src_rank = self.grid.stage_to_global(self.num_stages - 1)
assert src_rank in self.grid.pp_group
if self.global_rank == src_rank:
result = data.clone().detach().type(dtype).to(self.device)
else:
result = torch.Tensor([0.]).type(dtype).to(self.device)
dist.broadcast(tensor=result, src=src_rank, group=self.mpu.get_pipe_parallel_group())
return result
def _aggregate_total_loss(self):
# Scale loss, average among DP ranks, and bcast loss to the rest of my DP group
if self.is_last_stage():
loss = self._scale_loss_by_gas(self.total_loss)
self.dp_group_loss = loss.clone().detach()
## Average loss across all data-parallel groups
agg_loss = self.dp_group_loss.clone().detach()
#print(f'RANK={self.global_rank} bcast SENDER src={self.global_rank} group={self.grid.pp_group}', flush=True)
if self.is_data_parallel:
dist.all_reduce(agg_loss, group=self.mpu.get_data_parallel_group())
agg_loss /= self.dp_world_size
assert self.global_rank in self.grid.pp_group
losses = torch.Tensor([self.dp_group_loss, agg_loss]).to(self.device)
if self.is_pipe_parallel:
dist.broadcast(tensor=losses, src=self.global_rank, group=self.mpu.get_pipe_parallel_group())
else:
# Get loss from last stage
src_rank = self.grid.stage_to_global(self.num_stages - 1)
assert src_rank in self.grid.pp_group
losses = torch.Tensor([0., 0.]).to(self.device)
dist.broadcast(tensor=losses, src=src_rank, group=self.grid.get_pipe_parallel_group())
self.dp_group_loss = losses[0].clone().detach()
agg_loss = losses[1].clone().detach()
return agg_loss
def set_dataloader(self, loader):
""""""
if self.is_first_stage() or self.is_last_stage():
self.training_dataloader = loader
self.data_iterator = iter(self.training_dataloader)
def set_dataiterator(self, iterator):
""" Store an iterator to sample for training data. """
if self.is_first_stage() or self.is_last_stage():
self.training_dataloader = None
self.data_iterator = iterator
def set_batch_fn(self, fn):
"""Execute a post-processing function on input data.
Args:
fn (function): The function to run.
"""
self.batch_fn = fn
def is_gradient_accumulation_boundary(self):
"""True if the engine is executing a gradient reduction or optimizer step instruction.
This is overridden from :class:`DeepSpeedEngine` to force reductions
and steps when the pipeline engine is instructed to do so.
Returns:
bool: whether reductions and optimizer steps should occur.
"""
return self._force_grad_boundary
def log_for_device(self, *msg):
if LOG_STAGE == self.stage_id or LOG_STAGE == -1:
if DATA_PARALLEL_ID == self.grid.data_parallel_id or DATA_PARALLEL_ID == -1:
print(
f'RANK={dist.get_rank()} '
f'PIPE-ID={self.stage_id} '
f'DATA-ID={self.grid.data_parallel_id} '
f'MBATCH-ID={self.microbatch_id} '
f'STEP-ID={self.log_batch_step_id} '
'::',
*msg,
flush=True)
def tput_log(self, *msg):
if self.global_rank == 0 and self.global_steps % self.steps_per_print() == 0:
print(*msg)
def _next_batch(self):
# If using 3D parallelism, only some first-stage ranks may do IO
batch = None
if self.data_iterator is not None:
batch = next(self.data_iterator)
# Any post-processing, like broadcasting across a slice-parallel group.
if self.batch_fn:
batch = self.batch_fn(batch)
return batch
def _exec_forward_pass(self, buffer_id):
self.tput_timer.start()
self.mem_status('BEFORE FWD', reset_max=True)
if isinstance(self.pipe_buffers['inputs'][buffer_id], tuple):
inputs = tuple(t.clone() for t in self.pipe_buffers['inputs'][buffer_id])
else:
inputs = self.pipe_buffers['inputs'][buffer_id].clone()
# collect the partitioned input from the previous stage
if self.is_pipe_partitioned and not self.is_first_stage():
part_input = PartitionedTensor.from_meta(meta=inputs[0],
local_part=inputs[1],
group=self.grid.get_slice_parallel_group())
inputs = (part_input.full(), *inputs[2:])
inputs[0].requires_grad = True
# skip mask
#inputs[1].requires_grad = True
part_input = None
inputs = inputs[0] if len(inputs) == 1 else inputs
self.pipe_buffers['inputs'][buffer_id] = inputs
# Zero out the gradients each time we use the tensor because only the data in
# tensor changes across batches
self._zero_grads(inputs)
outputs = super().forward(inputs)
# Reset activation checkpointing buffers.
# Need to call this between evaluation iterations
if not self.module.training:
ds_checkpointing.reset()
# Partition the outputs if we are not the last stage
if self.is_pipe_partitioned and not self.is_last_stage():
if isinstance(outputs, tuple):
first_output = outputs[0]
# TODO: Improve pipe partitioning to pass multiple tensors that require grads
assert all([torch.is_tensor(elt) and elt.requires_grad is False for elt in outputs[1:]])
outputs_tail = outputs[1:]
elif torch.is_tensor(outputs):
first_output = outputs
outputs_tail = []
else:
raise ValueError("expecting a tensor or a tuple of tensors")
part = PartitionedTensor(tensor=first_output, group=self.grid.get_slice_parallel_group())
# Clear the large output data, but save the computation graph
first_output.data = torch.zeros(1)
self.pipe_buffers['output_tensors'][buffer_id] = first_output
# Inject the partitioned tensor into the output before sending
outputs = (part.to_meta(), part.data(), *outputs_tail)
part = None
self.pipe_buffers['outputs'][buffer_id] = outputs
# Optionally compute loss on the last device
if self.is_last_stage():
if self._compute_loss and self.module.loss_fn is not None:
labels = self.pipe_buffers['labels'][buffer_id]
self.loss = self.module.loss_fn(outputs, labels)
else:
# Some models just return loss from forward()
self.loss = outputs
if self.eval_return_logits:
self.outputs = outputs
if isinstance(self.loss, torch.Tensor):
self.fwd_outputs.append(self.loss.detach())
if self.total_loss is None:
self.total_loss = torch.zeros_like(self.loss)
self.total_loss += self.loss.detach()
else:
self.fwd_outputs.append([l.detach() for l in self.loss])
if self.total_loss is None:
self.total_loss = [torch.zeros_like(l) for l in self.loss]
for idx, l in enumerate(self.loss):
self.total_loss[idx] += l.detach()
def _exec_backward_pass(self, buffer_id):
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use backward"
self.mem_status('BEFORE BWD', reset_max=True)
# The last stage just runs backward on the loss using DeepSpeed's typical
# mechanisms.
if self.is_last_stage():
super().backward(self.loss)
self.mem_status('AFTER BWD')
return
outputs = self.pipe_buffers['outputs'][buffer_id]
if self.wall_clock_breakdown():
self.timers('backward_microstep').start()
self.timers('backward').start()
self.timers('backward_inner_microstep').start()
self.timers('backward_inner').start()
# Reconstruct if we previously partitioned the output. We must be
# careful to also restore the computational graph of the tensors we partitioned.
if self.is_pipe_partitioned:
if self.is_grad_partitioned:
part_output = PartitionedTensor.from_meta(meta=outputs[0],
local_part=outputs[1],
group=self.grid.get_slice_parallel_group())
self.pipe_buffers['output_tensors'][buffer_id].data = part_output.full()
outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[2:])
else:
# Already restored from partition
self.pipe_buffers['output_tensors'][buffer_id].data = outputs[0]
outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[1:])
grad_tensors = self.grad_layer
if self.is_grad_partitioned:
#print(f'RANK={self.global_rank} BEFORE-BWD restoring grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}')
part_grad = PartitionedTensor.from_meta(meta=self.grad_layer[0],
local_part=self.grad_layer[1],
group=self.grid.get_slice_parallel_group())
grad_tensors = (part_grad.full(), *grad_tensors[2:])
part_grad = None
#print(f'RANK={self.global_rank} BEFORE-BWD restored grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}')
if self.bfloat16_enabled() and not self.is_last_stage():
# manually call because we don't call optimizer.backward()
self.optimizer.clear_lp_grads()
# This handles either a single tensor or tuple of tensors.
if isinstance(outputs, tuple):
out_tensors = [t for t in outputs if t.is_floating_point()]
assert len(out_tensors) == len(grad_tensors)
torch.autograd.backward(tensors=out_tensors, grad_tensors=grad_tensors)
else:
torch.autograd.backward(tensors=(outputs, ), grad_tensors=(grad_tensors, ))
if self.bfloat16_enabled() and not self.is_last_stage():
# manually call because we don't call optimizer.backward()
self.optimizer.update_hp_grads(clear_lp_grads=False)
# Free up the memory from the output of forward()
self.pipe_buffers['output_tensors'][buffer_id] = None
self.pipe_buffers['outputs'][buffer_id] = None
grad_tensors = None
if self.wall_clock_breakdown():
self.timers('backward_inner').stop()
self.timers('backward_inner_microstep').stop()
self.timers('backward').stop()
self.timers('backward_microstep').stop()
self.mem_status('AFTER BWD')
def _exec_load_micro_batch(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('batch_input').start()
batch = self._next_batch()
if self.is_first_stage():
loaded = None
if torch.is_tensor(batch[0]):
loaded = batch[0].clone().to(self.device).detach()
loaded.requires_grad = loaded.is_floating_point()
else:
assert isinstance(batch[0], (tuple, list))
# Assume list or tuple
loaded = []
for x in batch[0]:
assert torch.is_tensor(x)
mine = x.clone().detach().to(self.device)
mine.requires_grad = mine.is_floating_point()
loaded.append(mine)
loaded = tuple(loaded)
self.pipe_buffers['inputs'][buffer_id] = loaded
if self.is_last_stage():
loaded = batch[1]
if torch.is_tensor(batch[1]):
loaded = batch[1].to(self.device)
elif isinstance(batch[1], tuple):
loaded = []
for x in batch[1]:
assert torch.is_tensor(x)
x = x.to(self.device).detach()
loaded.append(x)
loaded = tuple(loaded)
self.pipe_buffers['labels'][buffer_id] = loaded
if self.wall_clock_breakdown():
self.timers('batch_input').stop()
def _send_tensor_meta(self, buffer, recv_stage):
""" Communicate metadata about upcoming p2p transfers.
Metadata is communicated in this order:
* type (0: tensor, 1: list)
* num_tensors if type=list
foreach tensor in buffer:
* ndims
* shape
"""
send_bytes = 0
if isinstance(buffer, torch.Tensor):
type_tensor = torch.LongTensor(data=[0]).to(self.device)
p2p.send(type_tensor, recv_stage)
send_shape = torch.LongTensor(data=buffer.size()).to(self.device)
send_ndims = torch.LongTensor(data=[len(buffer.size())]).to(self.device)
p2p.send(send_ndims, recv_stage)
p2p.send(send_shape, recv_stage)
send_bytes += _tensor_bytes(buffer)
elif isinstance(buffer, list):
assert (False)
type_tensor = torch.LongTensor(data=[1]).to(self.device)
p2p.send(type_tensor, recv_stage)
count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device)
p2p.send(count_tensor, recv_stage)
for tensor in buffer:
assert isinstance(tensor, torch.Tensor)
send_shape = torch.LongTensor(data=tensor.size()).to(self.device)
send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device)
p2p.send(send_ndims, recv_stage)
p2p.send(send_shape, recv_stage)
send_bytes += _tensor_bytes(tensor)
elif isinstance(buffer, tuple):
type_tensor = torch.LongTensor(data=[2]).to(self.device)
p2p.send(type_tensor, recv_stage)
count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device)
p2p.send(count_tensor, recv_stage)
for idx, tensor in enumerate(buffer):
assert isinstance(tensor, torch.Tensor)
send_shape = torch.LongTensor(data=tensor.size()).to(self.device)
send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device)
send_dtype = torch.LongTensor(data=[self.DTYPE_TO_ID[tensor.dtype]]).to(self.device)
p2p.send(send_dtype, recv_stage)
p2p.send(send_ndims, recv_stage)
p2p.send(send_shape, recv_stage)
# Useful for performance debugging.
'''
new_bytes = _tensor_bytes(tensor)
send_bytes += _tensor_bytes(tensor)
# Useful for performance debugging.
if self.grid.data_parallel_id == 0:
print(
f'STAGE={self.stage_id} pipe-send-volume[{idx}]: shape={send_shape} {new_bytes/1024**2:0.2f}MB'
)
'''
else:
raise NotImplementedError(f'Could not send meta type {type(buffer)}')
# Useful for performance debugging.
'''
if self.grid.data_parallel_id == 0:
print(f'STAGE={self.stage_id} pipe-send-volume: {send_bytes/1024**2:0.2f}MB')
'''
def _recv_tensor_meta(self, send_stage):
"""Receive metadata about upcoming p2p transfers and return allocated buffers.
Metadata is communicated in this order:
* type (0: tensor, 1: list)
* num_tensors if type=list
foreach tensor in buffer:
* ndims
* shape
Returns:
Allocated buffer for receiving from send_stage.
"""
type_tensor = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(type_tensor, send_stage)
recv_type = type_tensor.item()
# A single tensor will be sent.
if recv_type == 0:
recv_ndims = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(recv_ndims, send_stage)
recv_ndims = recv_ndims.item()
recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device)
p2p.recv(recv_shape, send_stage)
recv_shape = recv_shape.tolist()
return self._allocate_buffer(recv_shape, num_buffers=1)[0]
# List or tuple of tensors
elif recv_type == 1 or recv_type == 2:
count_tensor = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(count_tensor, send_stage)
num_tensors = count_tensor.item()
recv_shapes_and_dtypes = []
for idx in range(num_tensors):
recv_dtype = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(recv_dtype, send_stage)
recv_dtype = self.ID_TO_DTYPE[recv_dtype.item()]
recv_ndims = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(recv_ndims, send_stage)
recv_ndims = recv_ndims.item()
recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device)
p2p.recv(recv_shape, send_stage)
recv_shapes_and_dtypes.append((recv_shape.tolist(), recv_dtype))
buffers = self._allocate_buffers(recv_shapes_and_dtypes, num_buffers=1)[0]
# Convert to tuples if requested.
if recv_type == 2:
buffers = tuple(buffers)
return buffers
else:
raise NotImplementedError(f'Could not receive type {type(recv_type)}')
def _exec_send_activations(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_send_output').start()
outputs = self.pipe_buffers['outputs'][buffer_id]
# NCCL does not like to send torch.BoolTensor types, so cast the mask to half().
# We could do char, but with half() we can eventually flatten with other fp16
# messages (TODO)
if self.has_attention_mask or self.has_bool_tensors:
outputs = list(outputs)
outputs[-1] = outputs[-1].half()
outputs = tuple(outputs)
if self.first_output_send:
self.first_output_send = False
self._send_tensor_meta(outputs, self.next_stage)
if isinstance(outputs, torch.Tensor):
p2p.send(outputs, self.next_stage)
elif isinstance(outputs, tuple):
for idx, buffer in enumerate(outputs):
p2p.send(buffer, self.next_stage)
else:
raise NotImplementedError('Could not send output of type '
f'{type(outputs)}')
# Restore the boolean tensor
if self.has_attention_mask or self.has_bool_tensors:
outputs = list(outputs)
outputs[-1] = outputs[-1].bool()
outputs = tuple(outputs)
if self.wall_clock_breakdown():
self.timers('pipe_send_output').stop()
def _exec_send_grads(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_send_grad').start()
inputs = self.pipe_buffers['inputs'][buffer_id]
# Partition the gradient
if self.is_grad_partitioned:
if isinstance(inputs, tuple):
first_input = inputs[0]
assert all([torch.is_tensor(elt) for elt in inputs[1:]])
inputs_grad_tail = [elt.grad for elt in inputs[1:] if elt.grad is not None]
elif torch.is_tensor(inputs):
first_input = inputs
inputs_grad_tail = []
else:
raise ValueError("expecting a tensor or a tuple of tensors")
assert torch.is_tensor(first_input)
part = PartitionedTensor(tensor=first_input.grad, group=self.grid.get_slice_parallel_group())
inputs = (part.to_meta(), part.data(), *inputs_grad_tail)
# XXX Terrible hack
# Drop the attention mask from the input buffer here. It does not have
# a grad that needs to be communicated. We free the buffer immediately
# after, so no need to restore it. The receiver also has a hack that skips
# the recv. This is because NCCL does not let us send torch.BoolTensor :-(.
if self.has_attention_mask or self.has_bool_tensors:
inputs = list(inputs)
inputs.pop()
inputs = tuple(inputs)
if isinstance(inputs, torch.Tensor):
assert inputs.grad is not None
p2p.send(inputs.grad, self.prev_stage)
else:
# XXX terrible hacky branch
if self.is_grad_partitioned:
# First two sends are partitioned gradient
p2p.send(inputs[0], self.prev_stage)
p2p.send(inputs[1], self.prev_stage)
else:
for idx, buffer in enumerate(inputs):
# Skip tensors that will not produce a grad
if not buffer.is_floating_point():
assert buffer.grad is None
continue
assert buffer.grad is not None
p2p.send(buffer.grad, self.prev_stage)
# We can free up the input buffer now
self.pipe_buffers['inputs'][buffer_id] = None
if self.wall_clock_breakdown():
self.timers('pipe_send_grad').stop()
def _exec_recv_activations(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_recv_input').start()
recvd = None
# Allocate the buffer if necessary
if self.pipe_recv_buf is None:
self.pipe_recv_buf = self._recv_tensor_meta(self.prev_stage)
if isinstance(self.pipe_recv_buf, torch.Tensor):
p2p.recv(self.pipe_recv_buf, self.prev_stage)
recvd = self.pipe_recv_buf.clone().detach()
recvd.requires_grad = recvd.is_floating_point()
else:
assert isinstance(self.pipe_recv_buf, tuple)
recvd = [None] * len(self.pipe_recv_buf)
for idx, buffer in enumerate(self.pipe_recv_buf):
assert torch.is_tensor(buffer)
# XXX hardcode meta type
if self.is_pipe_partitioned and idx == 0 and buffer.dtype != torch.long:
if self.meta_buffer is None:
self.meta_buffer = torch.zeros(buffer.size(), dtype=torch.long, device=self.device)
buffer = self.meta_buffer
p2p.recv(buffer, self.prev_stage)
recvd[idx] = buffer.clone().detach()
# NCCL does not like to send torch.BoolTensor types, so un-cast the
# attention mask
if self.has_attention_mask or self.has_bool_tensors:
recvd[-1] = recvd[-1].bool()
recvd = tuple(recvd)
for buffer in recvd:
buffer.requires_grad = buffer.is_floating_point()
self.pipe_buffers['inputs'][buffer_id] = recvd
if self.wall_clock_breakdown():
self.timers('pipe_recv_input').stop()
def _exec_recv_grads(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_recv_grad').start()
outputs = self.pipe_buffers['outputs'][buffer_id]
# XXX these shapes are hardcoded for Megatron
# Restore partitioned output if it was partitioned and we are sending full gradients
if self.is_pipe_partitioned and not self.is_grad_partitioned:
part_output = PartitionedTensor.from_meta(meta=outputs[0],
local_part=outputs[1],
group=self.grid.get_slice_parallel_group())
outputs[0].data = part_output.full()
outputs = (outputs[0], *outputs[2:])
# save for backward
self.pipe_buffers['outputs'][buffer_id] = outputs
# Allocate gradient if necessary
if self.grad_layer is None:
if isinstance(outputs, torch.Tensor):
s = list(outputs.size())
self.grad_layer = self._allocate_buffer(s, dtype=outputs.dtype, num_buffers=1)[0]
else:
# XXX This is a HACK
# When we exchange activations/gradients, the two pipe stages
# need to issue the send/recv with the same buffer sizes or
# else there is a deadlock. The is_floating_point() filter is
# used to avoid sending gradients for tensors that do not
# produce gradients. When TP>1, we partition the first
# activations/gradients across TP ranks to save communication
# volume and memory. That partitioned tensor is represented as
# two tensors: a 1/TPth chunk of the original data and also a
# small LongTensor storing the metadata used to reconstruct on
# the other side. When combined, the floating point filter also
# filtered out the metadata tensor. This quick (hacky) fix just
# branches on is_grad_partitioned so we don't filter out the
# metadata tensor.
if self.is_grad_partitioned:
sizes_and_dtypes = [(list(t.size()), t.dtype)
for t in outputs[:2]] + [(list(t.size()), t.dtype)
for t in outputs[2:] if t.is_floating_point()]
else:
sizes_and_dtypes = [(list(t.size()), t.dtype) for t in outputs if t.is_floating_point()]
self.grad_layer = self._allocate_buffers(sizes_and_dtypes, num_buffers=1)[0]
if isinstance(self.grad_layer, torch.Tensor):
p2p.recv(self.grad_layer, self.next_stage)
else:
assert isinstance(outputs, tuple)
for idx, buffer in enumerate(self.grad_layer):
# XXX GPT-2 hack
if self.is_grad_partitioned and idx == 0 and buffer.dtype != torch.long:
buffer.data = torch.zeros(buffer.size(), dtype=torch.long, device=self.device)
p2p.recv(buffer, self.next_stage)
if self.wall_clock_breakdown():
self.timers('pipe_recv_grad').stop()
def _exec_optimizer_step(self, lr_kwargs=None):
if self.wall_clock_breakdown():
self.timers('step_microstep').start()
self.timers('step').start()
self.mem_status('BEFORE STEP', reset_max=True)
self._force_grad_boundary = True
self._take_model_step(lr_kwargs)
self._force_grad_boundary = False
self.mem_status('AFTER STEP')
if self.global_rank == 0 and self.monitor.enabled:
self.summary_events = [(f'Train/Samples/lr', self.get_lr()[0], self.global_samples)]
if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'):
self.summary_events.append(
(f'Train/Samples/loss_scale', self.optimizer.cur_scale, self.global_samples))
self.monitor.write_events(self.summary_events)
if self.wall_clock_breakdown():
self.timers('step_microstep').stop()
self.timers('step').stop()
if self.global_steps % self.steps_per_print() == 0:
self.timers.log([
'batch_input', 'forward_microstep', 'backward_microstep', 'backward_inner_microstep',
'backward_allreduce_microstep', 'backward_tied_allreduce_microstep', 'step_microstep'
])
if self.global_steps % self.steps_per_print() == 0:
self.timers.log(['forward', 'backward', 'backward_inner', 'backward_allreduce', 'step'])
def _zero_grads(self, inputs):
if isinstance(inputs, torch.Tensor):
if inputs.grad is not None:
inputs.grad.data.zero_()
else:
for t in inputs:
if t.grad is not None:
t.grad.data.zero_()
def _allocate_zeros(self, shape, **kwargs):
""" Allocate a tensor of zeros on the engine's device.
Arguments:
shape: the shape of the tensor to allocate
kwargs: passed to torch.zeros()
Returns:
A tensor from torch.zeros() allocated on self.device.
"""
if "dtype" not in kwargs:
if self.fp16_enabled():
kwargs["dtype"] = torch.half
if self.bfloat16_enabled():
kwargs["dtype"] = torch.bfloat16
return torch.zeros(shape, device=self.device, **kwargs)
def _allocate_buffer(self, shape, num_buffers=-1, **kwargs):
buffers = []
if num_buffers == -1:
num_buffers = self.num_pipe_buffers
for count in range(num_buffers):
buffers.append(self._allocate_zeros(shape, **kwargs))
return buffers
def _allocate_buffers(self, shapes_and_dtypes, requires_grad=False, num_buffers=-1):
buffers = []
if num_buffers == -1:
num_buffers = self.num_pipe_buffers
for count in range(num_buffers):
buffer = []
for shape, dtype in shapes_and_dtypes:
buffer.append(self._allocate_zeros(shape, dtype=dtype, requires_grad=requires_grad))
buffers.append(buffer)
return buffers
def forward(self, *args, **kwargs):
"""Disabled for pipeline parallel training. See ``train_batch()``. """
raise PipelineError("Only train_batch() is accessible in pipeline mode.")
def backward(self, *args, **kwargs):
"""Disabled for pipeline parallel training. See ``train_batch()``. """
raise PipelineError("Only train_batch() is accessible in pipeline mode.")
def step(self, *args, **kwargs):
"""Disabled for pipeline parallel training. See ``train_batch()``. """
raise PipelineError("Only train_batch() is accessible in pipeline mode.")
def mem_status(self, msg, print_rank=-1, reset_max=False):
return
global mem_alloced, mem_cached
if not self.global_steps == 0 or not self.global_steps == 9:
#return
pass
if self.mpu.get_data_parallel_rank() != 0:
return
if self.global_rank != 0:
return
rank = self.global_rank
if print_rank != -1 and rank != print_rank:
return
get_accelerator().synchronize()
if reset_max:
get_accelerator().reset_max_memory_cached()
get_accelerator().reset_max_memory_allocated()
new_alloced = get_accelerator().memory_allocated()
new_cached = get_accelerator().memory_cached()
delta_alloced = new_alloced - mem_alloced
delta_cached = new_cached - mem_cached
mem_cached = new_cached
mem_alloced = new_alloced
max_alloced = get_accelerator().max_memory_allocated()
max_cached = get_accelerator().max_memory_cached()
# convert to GB for printing
new_alloced /= 1024**3
new_cached /= 1024**3
delta_alloced /= 1024**3
delta_cached /= 1024**3
max_alloced /= 1024**3
max_cached /= 1024**3
print(
f'RANK={rank} STAGE={self.stage_id} STEP={self.global_steps} MEMSTATS', msg,
f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) '
f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)')
def module_state_dict(self):
"""Override hack to save a pipe model and return the directory path of the save.
This method should only be called by DeepSpeed's ``save_checkpoint()``. The
recommended way of saving a ``PipelineModule`` outside of ``save_checkpoint()``
is ``save_state_dict()``.
Returns:
None
"""
assert isinstance(self.module, PipelineModule)
assert self._curr_ckpt_path is not None, \
"PipelineEngine expects module_state_dict() to be called from save_checkpoint()"
self.module.save_state_dict(self._curr_ckpt_path, checkpoint_engine=self.checkpoint_engine)
return None
def load_module_state_dict(self, checkpoint, strict=True, custom_load_fn=None):
"""Override hack to instead use a directory path.
This is important because pipeline models checkpoint by layer instead of rank.
If ``state_dict`` is not ``None`` or a ``str``, we revert to ``super()`` expecting a ``dict``.
Args:
state_dict (str, None): unused
strict (bool, optional): Strict state loading. Defaults to True.
"""
assert custom_load_fn is None, "custom_load_fn not supported w. pipeline parallelism"
state_dict = checkpoint['module']
if (state_dict is not None) and (not isinstance(state_dict, str)):
super().load_module_state_dict(state_dict, strict)
return
self.module.load_state_dir(load_dir=self._curr_ckpt_path,
strict=strict,
checkpoint_engine=self.checkpoint_engine)
# A map of PipeInstruction types to methods. Each method will be executed with the
# kwargs provided to the PipeInstruction from the scheduler.
_INSTRUCTION_MAP = {
schedule.OptimizerStep: _exec_optimizer_step,
schedule.ReduceGrads: _exec_reduce_grads,
schedule.ReduceTiedGrads: _exec_reduce_tied_grads,
schedule.LoadMicroBatch: _exec_load_micro_batch,
schedule.ForwardPass: _exec_forward_pass,
schedule.BackwardPass: _exec_backward_pass,
schedule.SendActivation: _exec_send_activations,
schedule.RecvActivation: _exec_recv_activations,
schedule.SendGrad: _exec_send_grads,
schedule.RecvGrad: _exec_recv_grads,
}
def _exec_schedule(self, pipe_schedule):
# Reserve and reset buffers.
self._reserve_pipe_buffers(pipe_schedule.num_pipe_buffers())
self.fwd_outputs = []
# For each step in the schedule
for step_cmds in pipe_schedule:
# For each instruction in the step
for cmd in step_cmds:
if type(cmd) not in self._INSTRUCTION_MAP:
raise RuntimeError(f'{self.__class__.__name__} does not understand instruction {repr(cmd)}')
# Equivalent to: self._exec_forward_pass(buffer_id=0)
self._exec_instr = MethodType(self._INSTRUCTION_MAP[type(cmd)], self)
self._exec_instr(**cmd.kwargs)
| 56,886 | 42.49159 | 128 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/pipe/p2p.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pickle
import typing
import torch
from deepspeed import comm as dist
# To query whether we have send/recv support
from packaging.version import Version
from deepspeed.git_version_info import torch_info
from deepspeed.accelerator import get_accelerator
_groups = None
_grid = None
_async = []
def can_send_recv() -> bool:
torch_version = Version(torch_info['version'])
sendrecv_min = Version('1.8')
return torch_version >= sendrecv_min
#initializes adjacent process groups
#run this only after deepspeed.init_distributed() has been called
def init_process_groups(grid):
global _groups, _grid
_grid = grid
assert _grid.pipe_parallel_size > 1, "There is no pipeline parallelism"
if not can_send_recv():
_groups = [dist.new_group(ranks=group) for group in _grid.p2p_groups]
def _is_valid_send_recv(src_stage, dest_stage):
first_stage = 0
last_stage = _grid.pipe_parallel_size - 1
assert abs(src_stage-dest_stage) == 1 or \
(src_stage == first_stage and dest_stage == last_stage) or \
(src_stage == last_stage and dest_stage == first_stage), \
"Functionality currently limited to send and receive between adjacent ranks only"
def send(tensor, dest_stage, async_op=False):
global _groups
assert async_op == False, "Doesn't support async_op true"
src_stage = _grid.get_stage_id()
_is_valid_send_recv(src_stage, dest_stage)
dest_rank = _grid.stage_to_global(stage_id=dest_stage)
if async_op:
global _async
op = dist.isend(tensor, dest_rank)
_async.append(op)
else:
if can_send_recv():
return dist.send(tensor, dest_rank)
else:
group = _get_send_recv_group(src_stage, dest_stage)
src_rank = _grid.stage_to_global(stage_id=src_stage)
return dist.broadcast(tensor, src_rank, group=group, async_op=async_op)
def recv(tensor, src_stage, async_op=False):
global _groups
assert async_op == False, "Doesn't support async_op true"
dest_stage = _grid.get_stage_id()
_is_valid_send_recv(src_stage, dest_stage)
src_rank = _grid.stage_to_global(stage_id=src_stage)
if async_op:
global _async
op = dist.irecv(tensor, src_rank)
_async.append(op)
else:
if can_send_recv():
return dist.recv(tensor, src_rank)
else:
group = _get_send_recv_group(src_stage, dest_stage)
return dist.broadcast(tensor, src_rank, group=group, async_op=async_op)
def wait():
global _async
for op in _async:
op.wait()
_async = []
get_accelerator().synchronize()
def send_obj(msg: typing.Any, dest: int):
"""Send an arbitrary python object to ``dest``.
Note: ``msg`` must be pickleable.
WARN: This incurs a CPU -> GPU transfer and should be used sparingly
for performance reasons.
Args:
msg (typing.Any): The object to send.
dest (int): Destination rank.
"""
# serialize the message
msg = pickle.dumps(msg)
# construct a tensor to send
msg = torch.ByteTensor(torch.ByteStorage.from_buffer(msg)).to(get_accelerator().device_name())
# Send meta and message
length_tensor = torch.tensor([len(msg)], dtype=torch.long).to(get_accelerator().device_name())
dist.send(length_tensor, dst=dest)
dist.send(msg, dst=dest)
def recv_obj(sender: int) -> typing.Any:
"""Receive an arbitrary python object from ``sender``.
WARN: This incur a CPU <-> GPU transfers and should be used sparingly
for performance reasons.
Args:
sender (int): The rank sending the message.
"""
# Get message meta
length = torch.tensor([0], dtype=torch.long).to(get_accelerator().device_name())
dist.recv(length, src=sender)
# Receive and deserialize
msg = torch.empty(length.item(), dtype=torch.uint8).to(get_accelerator().device_name())
dist.recv(msg, src=sender)
msg = pickle.loads(msg.cpu().numpy().tobytes())
def _to(x):
"""Recursively move to the current device."""
if torch.is_tensor(x):
return x.to(get_accelerator().device_name())
if isinstance(x, (tuple, list)):
ret = [_to(x_) for x_ in x]
if isinstance(x, tuple):
ret = tuple(ret)
return ret
# handle kwargs
if isinstance(x, dict):
ret = dict()
for key, val in x.items():
ret[_to(key)] = _to(val)
return ret
# Anything else is a no-op
return x
msg = _to(msg)
return msg
def _get_send_recv_group(src_stage, dest_stage):
'''the group id is always the smaller rank unless its a wrap around'''
stage_id = None
first_stage = 0
last_stage = _grid.pipe_parallel_size - 1
if (src_stage == first_stage and dest_stage == last_stage
or dest_stage == first_stage and src_stage == last_stage):
stage_id = last_stage
elif src_stage > dest_stage:
stage_id = dest_stage
else:
stage_id = src_stage
'''group_id corresponds to group of [group_id, group_id+1]
unless group_id is the rank of the last stage
in which case group_id corresponds to group[group_id-num_stages+1, group_id]
'''
group_id = _grid.stage_to_global(stage_id=stage_id)
return _groups[group_id]
| 5,477 | 28.294118 | 98 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/pipe/topology.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed import comm as dist
from collections import namedtuple
from itertools import product as cartesian_product
class ProcessTopology:
""" Manages the mapping of n-dimensional Cartesian coordinates to linear
indices. This mapping is used to map the rank of processes to the grid
for various forms of parallelism.
Each axis of the tensor is accessed by its name. The provided ordering
of the axes defines the layout of the topology. ProcessTopology uses a "row-major"
layout of the tensor axes, and so axes=['x', 'y'] would map coordinates (x,y) and
(x,y+1) to adjacent linear indices. If instead axes=['y', 'x'] was used, coordinates
(x,y) and (x+1,y) would be adjacent.
Some methods return ProcessCoord namedtuples.
"""
def __init__(self, axes, dims):
"""Create a mapping of n-dimensional tensor coordinates to linear indices.
Arguments:
axes (list): the names of the tensor axes
dims (list): the dimension (length) of each axis of the topology tensor
"""
self.axes = axes # names of each topology axis
self.dims = dims # length of each topology axis
# This is actually a class that lets us hash {'row':3, 'col':2} mappings
self.ProcessCoord = namedtuple('ProcessCoord', axes)
self.mapping = {}
ranges = [range(d) for d in dims]
# example: 1, (0,0,1)
for global_rank, coord in enumerate(cartesian_product(*ranges)):
key = {axis: coord[self.axes.index(axis)] for axis in self.axes}
key = self.ProcessCoord(**key)
# for example, {ProcessCoord(row=0, col=1) : 1}
self.mapping[key] = global_rank
def get_rank(self, **coord_kwargs):
"""Return the global rank of a process via its coordinates.
Coordinates are specified as kwargs. For example:
>>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
>>> X.get_rank(x=0, y=1)
1
"""
if len(coord_kwargs) != len(self.axes):
raise ValueError('get_rank() does not support slices. Use filter_match())')
key = self.ProcessCoord(**coord_kwargs)
assert key in self.mapping, f'key {coord_kwargs} invalid'
return self.mapping[key]
def get_axis_names(self):
"""Return a list of the axis names in the ordering of the topology. """
return self.axes
def get_rank_repr(self, rank, omit_axes=['data', 'pipe'], inner_sep='_', outer_sep='-'):
"""Return a string representation of a rank.
This method is primarily used for checkpointing model data.
For example:
>>> topo = Topo(axes=['a', 'b'], dims=[2, 2])
>>> topo.get_rank_repr(rank=3)
'a_01-b_01'
>>> topo.get_rank_repr(rank=3, omit_axes=['a'])
'b_01'
Args:
rank (int): A rank in the topology.
omit_axes (list, optional): Axes that should not be in the representation. Defaults to ['data', 'pipe'].
inner_sep (str, optional): [description]. Defaults to '_'.
outer_sep (str, optional): [description]. Defaults to '-'.
Returns:
str: A string representation of the coordinate owned by ``rank``.
"""
omit_axes = frozenset(omit_axes)
axes = [a for a in self.get_axis_names() if a not in omit_axes]
names = []
for ax in axes:
ax_rank = getattr(self.get_coord(rank=rank), ax)
names.append(f'{ax}{inner_sep}{ax_rank:02d}')
return outer_sep.join(names)
def get_dim(self, axis):
"""Return the number of processes along the given axis.
For example:
>>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
>>> X.get_dim('y')
3
"""
if axis not in self.axes:
return 0
return self.dims[self.axes.index(axis)]
def get_coord(self, rank):
"""Return the coordinate owned by a process rank.
The axes of the returned namedtuple can be directly accessed as members. For
example:
>>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
>>> coord = X.get_coord(rank=1)
>>> coord.x
0
>>> coord.y
1
"""
for coord, idx in self.mapping.items():
if idx == rank:
return coord
raise ValueError(f'rank {rank} not found in topology.')
def get_axis_comm_lists(self, axis):
""" Construct lists suitable for a communicator group along axis ``axis``.
Example:
>>> topo = Topo(axes=['pipe', 'data', 'model'], dims=[2, 2, 2])
>>> topo.get_axis_comm_lists('pipe')
[
[0, 4], # data=0, model=0
[1, 5], # data=0, model=1
[2, 6], # data=1, model=0
[3, 7], # data=1, model=1
]
Returns:
A list of lists whose coordinates match in all axes *except* ``axis``.
"""
# We don't want to RuntimeError because it allows us to write more generalized
# code for hybrid parallelisms.
if axis not in self.axes:
return []
# Grab all axes but `axis`
other_axes = [a for a in self.axes if a != axis]
lists = []
# Construct all combinations of coords with other_axes
ranges = [range(self.get_dim(a)) for a in other_axes]
for coord in cartesian_product(*ranges):
other_keys = {a: coord[other_axes.index(a)] for a in other_axes}
# now go over all ranks in `axis`.
sub_list = []
for axis_key in range(self.get_dim(axis)):
key = self.ProcessCoord(**other_keys, **{axis: axis_key})
sub_list.append(self.mapping[key])
lists.append(sub_list)
return lists
def filter_match(self, **filter_kwargs):
"""Return the list of ranks whose coordinates match the provided criteria.
Example:
>>> X = ProcessTopology(axes=['pipe', 'data', 'model'], dims=[2, 2, 2])
>>> X.filter_match(pipe=0, data=1)
[2, 3]
>>> [X.get_coord(rank) for rank in X.filter_match(pipe=0, data=1)]
[ProcessCoord(pipe=0, data=1, model=0), ProcessCoord(pipe=0, data=1, model=1)]
Arguments:
**filter_kwargs (dict): criteria used to select coordinates.
Returns:
The list of ranks whose coordinates match filter_kwargs.
"""
def _filter_helper(x):
for key, val in filter_kwargs.items():
if getattr(x, key) != val:
return False
return True
coords = filter(_filter_helper, self.mapping.keys())
return [self.mapping[coord] for coord in coords]
def get_axis_list(self, axis, idx):
"""Returns the list of global ranks whose coordinate in an axis is idx.
For example:
>>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
>>> X.get_axis_list(axis='x', idx=0)
[0, 1, 2]
>>> X.get_axis_list(axis='y', idx=0)
[0, 3]
"""
# This could be faster by generating the desired keys directly instead of
# filtering.
axis_num = self.axes.index(axis)
ranks = [self.mapping[k] for k in self.mapping.keys() if k[axis_num] == idx]
return ranks
def world_size(self):
return len(self.mapping)
def __str__(self):
return str(self.mapping)
def _prime_factors(N):
""" Returns the prime factorization of positive integer N. """
if N <= 0:
raise ValueError("Values must be strictly positive.")
primes = []
while N != 1:
for candidate in range(2, N + 1):
if N % candidate == 0:
primes.append(candidate)
N //= candidate
break
return primes
class PipeDataParallelTopology(ProcessTopology):
""" A topology specialization for hybrid data and pipeline parallelism.
Uses data parallelism on the last dimension to encourage gradient
reductions to use high-bandwidth intra-node links and lower-volume
pipeline communications to use low-bandwidth inter-node links.
"""
def __init__(self, num_pp, num_dp):
super().__init__(axes=['pipe', 'data'], dims=[num_pp, num_dp])
class PipeModelDataParallelTopology(ProcessTopology):
""" A topology for hybrid pipeline, model, and data parallelism. """
def __init__(self, num_pp, num_mp, num_dp):
super().__init__(axes=['pipe', 'data', 'model'], dims=[num_pp, num_dp, num_mp])
class PipelineParallelGrid:
"""Implements a grid object that stores the data parallel ranks
corresponding to each of the model parallel stages
The grid object organizes the processes in a distributed pytorch job
into a 2D grid, of stage_id and data_parallel_id.
self.stage_id and self.data_parallel_id stores the stage id
and the data parallel id of current process.
self.dp_group groups the processes by stage_id.
self.dp_group[i], is a list containing all process ranks whose
stage_id is i.
self.p2p_groups stores a list of tuple, where each tuple
stores process ranks of adjacent stages for a given data_parallel_id.
For example if num_stage is 5 then a tuple [7,8] represents stages [3, 4],
with data_parallel id = 1. A stage wrap around will appear as non-adjacent ranks,
for example tuple [4,0] with representing wrap-around stage 4 and 0, for
data_parallel_id = 0, or similarly [9,5] represents wrapped around stages [4,0]
for data_parallel_id = 1.
"""
def __init__(self, topology=None, process_group=None):
# TODO use process_group if provided
self.global_rank = dist.get_rank()
self.world_size = dist.get_world_size()
if topology is not None:
if self.global_rank == 0:
print('Using topology:', topology)
self._topo = topology
else:
num_pp = 1
num_dp = 1
for idx, prime in enumerate(_prime_factors(self.world_size)):
if idx % 2 == 0:
num_pp *= prime
else:
num_dp *= prime
self._topo = PipeDataParallelTopology(num_dp=num_dp, num_pp=num_pp)
self.data_parallel_size = max(self._topo.get_dim('data'), 1)
self.pipe_parallel_size = max(self._topo.get_dim('pipe'), 1)
self.model_parallel_size = max(self._topo.get_dim('model'), 1)
self.slice_parallel_size = self.model_parallel_size
assert self._is_grid_valid(), "Invalid Grid"
self.stage_id = self.get_stage_id()
self.data_parallel_id = self.get_data_parallel_id()
# Create new ProcessGroups for all model parallelism. DeepSpeedLight uses these
# to detect overflow, etc.
self.ds_model_proc_group = None
self.ds_model_rank = -1
for dp in range(self.data_parallel_size):
ranks = sorted(self._topo.get_axis_list(axis='data', idx=dp))
if self.global_rank == 0:
#print(f'RANK={self.global_rank} building DeepSpeed model group: {ranks}')
pass
proc_group = dist.new_group(ranks=ranks)
if self.global_rank in ranks:
self.ds_model_proc_group = proc_group
self.ds_model_world_size = len(ranks)
self.ds_model_rank = ranks.index(self.global_rank)
assert self.ds_model_rank > -1
assert self.ds_model_proc_group is not None
# Create new ProcessGroup for gradient all-reduces - these are the data parallel groups
self.dp_group = []
self.dp_groups = self._topo.get_axis_comm_lists('data')
for g in self.dp_groups:
proc_group = dist.new_group(ranks=g)
if self.global_rank in g:
self.dp_group = g
self.dp_proc_group = proc_group
self.is_first_stage = (self.stage_id == 0)
self.is_last_stage = (self.stage_id == (self.pipe_parallel_size - 1))
self.p2p_groups = self._build_p2p_groups()
# Create new ProcessGroup for pipeline collectives - these are pipe parallel groups
self.pp_group = []
self.pp_proc_group = None
self.pipe_groups = self._topo.get_axis_comm_lists('pipe')
for ranks in self.pipe_groups:
if self.global_rank == 0:
#print(f'RANK={self.global_rank} building pipeline group: {ranks}')
pass
proc_group = dist.new_group(ranks=ranks)
if self.global_rank in ranks:
self.pp_group = ranks
self.pp_proc_group = proc_group
assert self.pp_proc_group is not None
# Create new ProcessGroup for model (tensor-slicing) collectives
# Short circuit case without model parallelism.
# TODO: it would be nice if topology had bcast semantics to avoid this branching
# case?
if self.model_parallel_size == 1:
for group_rank in range(self.world_size):
group_rank = [group_rank]
group = dist.new_group(ranks=group_rank)
if group_rank[0] == self.global_rank:
self.slice_group = group_rank
self.slice_proc_group = group
return
else:
self.mp_group = []
self.model_groups = self._topo.get_axis_comm_lists('model')
for g in self.model_groups:
proc_group = dist.new_group(ranks=g)
if self.global_rank in g:
self.slice_group = g
self.slice_proc_group = proc_group
def get_stage_id(self):
return self._topo.get_coord(rank=self.global_rank).pipe
def get_data_parallel_id(self):
return self._topo.get_coord(rank=self.global_rank).data
def _build_p2p_groups(self):
"""Groups for sending and receiving activations and gradients across model
parallel stages.
"""
comm_lists = self._topo.get_axis_comm_lists('pipe')
p2p_lists = []
for rank in range(self.world_size):
for l in comm_lists:
assert len(l) == self.pipe_parallel_size
if rank in l:
idx = l.index(rank)
buddy_rank = l[(idx + 1) % self.pipe_parallel_size]
p2p_lists.append([rank, buddy_rank])
break # next global rank
assert len(p2p_lists) == self.world_size
return p2p_lists
def _is_grid_valid(self):
ranks = 1
for ax in self._topo.get_axis_names():
ranks *= self._topo.get_dim(ax)
return ranks == dist.get_world_size()
#returns the global rank of the process with the provided stage id
#which has the same data_parallel_id as caller process
def stage_to_global(self, stage_id, **kwargs):
me = self._topo.get_coord(self.global_rank)
transform = me._replace(pipe=stage_id, **kwargs)._asdict()
return self._topo.get_rank(**transform)
def topology(self):
return self._topo
# MPU functions for DeepSpeed integration
def get_global_rank(self):
return self.global_rank
def get_pipe_parallel_rank(self):
""" The stage of the pipeline this rank resides in. """
return self.get_stage_id()
def get_pipe_parallel_world_size(self):
""" The number of stages in the pipeline. """
return self.pipe_parallel_size
def get_pipe_parallel_group(self):
""" The group of ranks within the same pipeline. """
return self.pp_proc_group
def get_data_parallel_rank(self):
""" Which pipeline this rank resides in. """
return self.data_parallel_id
def get_data_parallel_world_size(self):
""" The number of pipelines. """
return self.data_parallel_size
def get_data_parallel_group(self):
""" The group of ranks within the same stage of all pipelines. """
return self.dp_proc_group
# These are model parallel groups across all types of model parallelism.
# Deepspeed uses them to detect overflow, etc.
def get_model_parallel_rank(self):
return self.ds_model_rank
def get_model_parallel_world_size(self):
return self.ds_model_world_size
def get_model_parallel_group(self):
return self.ds_model_proc_group
# For Megatron-style tensor slicing
def get_slice_parallel_rank(self):
if 'model' in self._topo.get_axis_names():
return self._topo.get_coord(rank=self.global_rank).model
else:
return 0
def get_slice_parallel_world_size(self):
return self.slice_parallel_size
def get_slice_parallel_group(self):
return self.slice_proc_group
| 17,167 | 36.56674 | 116 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/pipe/module.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import glob
import re as regex
from functools import partial
import torch
import torch.nn as nn
from deepspeed import comm as dist
from deepspeed.utils import logger
from .. import utils as ds_utils
from ..activation_checkpointing import checkpointing
from .topology import PipeDataParallelTopology, PipelineParallelGrid
from deepspeed.runtime.state_dict_factory import SDLoaderFactory
from deepspeed.accelerator import get_accelerator
class PipelineError(Exception):
"""Errors related to the use of deepspeed.PipelineModule """
class LayerSpec:
"""Building block for specifying pipeline-parallel modules.
LayerSpec stores the type information and parameters for each stage in a
PipelineModule. For example:
.. code-block:: python
nn.Sequence(
torch.nn.Linear(self.in_dim, self.hidden_dim, bias=False),
torch.nn.Linear(self.hidden_hidden, self.out_dim)
)
becomes
.. code-block:: python
layer_specs = [
LayerSpec(torch.nn.Linear, self.in_dim, self.hidden_dim, bias=False),
LayerSpec(torch.nn.Linear, self.hidden_hidden, self.out_dim)]
]
"""
def __init__(self, typename, *module_args, **module_kwargs):
self.typename = typename
self.module_args = module_args
self.module_kwargs = module_kwargs
if not issubclass(typename, nn.Module):
raise RuntimeError('LayerSpec only supports torch.nn.Module types.')
if dist.is_initialized():
self.global_rank = dist.get_rank()
else:
self.global_rank = -1
def __repr__(self):
return ds_utils.call_to_str(self.typename.__name__, self.module_args, self.module_kwargs)
def build(self, log=False):
"""Build the stored specification."""
if log:
logger.info(f'RANK={self.global_rank} building {repr(self)}')
return self.typename(*self.module_args, **self.module_kwargs)
class TiedLayerSpec(LayerSpec):
def __init__(self, key, typename, *module_args, forward_fn=None, tied_weight_attr='weight', **module_kwargs):
super().__init__(typename, *module_args, **module_kwargs)
self.key = key
self.forward_fn = forward_fn
self.tied_weight_attr = tied_weight_attr
class PipelineModule(nn.Module):
"""Modules to be parallelized with pipeline parallelism.
The key constraint that enables pipeline parallelism is the
representation of the forward pass as a sequence of layers
and the enforcement of a simple interface between them. The
forward pass is implicitly defined by the module ``layers``. The key
assumption is that the output of each layer can be directly fed as
input to the next, like a ``torch.nn.Sequence``. The forward pass is
implicitly:
.. code-block:: python
def forward(self, inputs):
x = inputs
for layer in self.layers:
x = layer(x)
return x
.. note::
Pipeline parallelism is not compatible with ZeRO-2 and ZeRO-3.
Args:
layers (Iterable): A sequence of layers defining pipeline structure. Can be a ``torch.nn.Sequential`` module.
num_stages (int, optional): The degree of pipeline parallelism. If not specified, ``topology`` must be provided.
topology (``deepspeed.runtime.pipe.ProcessTopology``, optional): Defines the axes of parallelism axes for training. Must be provided if ``num_stages`` is ``None``.
loss_fn (callable, optional): Loss is computed ``loss = loss_fn(outputs, label)``
seed_layers(bool, optional): Use a different seed for each layer. Defaults to False.
seed_fn(type, optional): The custom seed generating function. Defaults to random seed generator.
base_seed (int, optional): The starting seed. Defaults to 1234.
partition_method (str, optional): The method upon which the layers are partitioned. Defaults to 'parameters'.
activation_checkpoint_interval (int, optional): The granularity activation checkpointing in terms of number of layers. 0 disables activation checkpointing.
activation_checkpoint_func (callable, optional): The function to use for activation checkpointing. Defaults to ``deepspeed.checkpointing.checkpoint``.
checkpointable_layers(list, optional): Checkpointable layers may not be checkpointed. Defaults to None which does not additional filtering.
"""
def __init__(self,
layers,
num_stages=None,
topology=None,
loss_fn=None,
seed_layers=False,
seed_fn=None,
base_seed=1234,
partition_method='parameters',
activation_checkpoint_interval=0,
activation_checkpoint_func=checkpointing.checkpoint,
checkpointable_layers=None):
super().__init__()
if num_stages is None and topology is None:
raise RuntimeError('must provide num_stages or topology')
self.micro_offset = 0
self.loss_fn = loss_fn
self.checkpointable_layers = checkpointable_layers
if checkpointable_layers is not None:
assert isinstance(checkpointable_layers, list), "param `checkpointable_layers` must be type of list."
self.seed_layers = seed_layers
self.seed_fn = seed_fn
self.base_seed = base_seed
if dist.get_rank() == 0:
try:
seed_str = self.seed_fn.__name__
except AttributeError:
seed_str = None
print(f'SEED_LAYERS={self.seed_layers} BASE_SEED={self.base_seed} SEED_FN={seed_str}')
# Setup world info
self.world_group = dist.new_group(ranks=range(dist.get_world_size()))
self.global_rank = dist.get_rank(group=self.world_group)
self.world_size = dist.get_world_size(group=self.world_group)
self.local_rank = int(os.environ.get("LOCAL_RANK", None))
assert self.local_rank is not None
if topology:
self._topo = topology
self.num_stages = self._topo.get_dim('pipe')
else:
self.num_stages = num_stages
if topology is None:
if self.world_size % self.num_stages != 0:
raise RuntimeError(
f'num_stages ({self.num_stages}) must divide distributed world size ({self.world_size})')
dp = self.world_size // num_stages
topology = PipeDataParallelTopology(num_pp=num_stages, num_dp=dp)
self._topo = topology
# Construct communicators for pipeline topology
self._grid = PipelineParallelGrid(process_group=self.world_group, topology=self._topo)
self.stage_id = self._topo.get_coord(self.global_rank).pipe
# Initialize partition information
self._layer_specs = list(layers)
self._num_layers = len(self._layer_specs)
self._local_start = 0
self._local_stop = None
self._partition_layers(method=partition_method)
self.forward_funcs = []
self.fwd_map = {}
self.tied_modules = nn.ModuleDict()
self.tied_weight_attrs = {}
# Offset the random seed by the stage ID.
#newseed = get_accelerator().initial_seed() + self._grid.get_stage_id()
#ds_utils.set_random_seed(newseed)
#with torch.random.fork_rng(devices=[get_accelerator().current_device_name()]):
self._build()
self.to(get_accelerator().device_name(self.local_rank))
self.tied_comms = self._index_tied_modules()
self._synchronize_tied_weights()
self.activation_checkpoint_interval = activation_checkpoint_interval
self.activation_checkpoint_func = activation_checkpoint_func
def _build(self):
specs = self._layer_specs
for local_idx, layer in enumerate(specs[self._local_start:self._local_stop]):
layer_idx = local_idx + self._local_start
if self.seed_layers:
if self.seed_fn:
self.seed_fn(self.base_seed + layer_idx)
else:
ds_utils.set_random_seed(self.base_seed + layer_idx)
# Recursively build PipelineModule objects
if isinstance(layer, PipelineModule):
raise NotImplementedError('RECURSIVE BUILD NOT YET IMPLEMENTED')
# LayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, nn.Module):
name = str(layer_idx)
self.forward_funcs.append(layer)
self.fwd_map.update({name: len(self.forward_funcs) - 1})
self.add_module(name, layer)
# TiedLayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, TiedLayerSpec):
# Build and register the module if we haven't seen it before.
if layer.key not in self.tied_modules:
self.tied_modules[layer.key] = layer.build()
self.tied_weight_attrs[layer.key] = layer.tied_weight_attr
if layer.forward_fn is None:
# Just use forward()
self.forward_funcs.append(self.tied_modules[layer.key])
else:
# User specified fn with args (module, input)
self.forward_funcs.append(partial(layer.forward_fn, self.tied_modules[layer.key]))
# LayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, LayerSpec):
module = layer.build()
name = str(layer_idx)
self.forward_funcs.append(module)
self.fwd_map.update({name: len(self.forward_funcs) - 1})
self.add_module(name, module)
# Last option: layer may be a functional (e.g., lambda). We do nothing in
# that case and just use it in forward()
else:
self.forward_funcs.append(layer)
# All pipeline parameters should be considered as model parallel in the context
# of our FP16 optimizer
for p in self.parameters():
p.ds_pipe_replicated = False
def _count_layer_params(self):
"""Count the trainable parameters in individual layers.
This routine will only build one layer at a time.
Returns:
A list of the number of parameters in each layer.
"""
param_counts = [0] * len(self._layer_specs)
for idx, layer in enumerate(self._layer_specs):
if isinstance(layer, LayerSpec):
l = layer.build()
params = filter(lambda p: p.requires_grad, l.parameters())
param_counts[idx] = sum(p.numel() for p in params)
elif isinstance(layer, nn.Module):
params = filter(lambda p: p.requires_grad, layer.parameters())
param_counts[idx] = sum(p.numel() for p in params)
return param_counts
def _find_layer_type(self, layername):
idxs = []
typeregex = regex.compile(layername, regex.IGNORECASE)
for idx, layer in enumerate(self._layer_specs):
name = None
if isinstance(layer, LayerSpec):
name = layer.typename.__name__
elif isinstance(layer, nn.Module):
name = layer.__class__.__name__
else:
try:
name = layer.__name__
except AttributeError:
continue
if typeregex.search(name):
idxs.append(idx)
if len(idxs) == 0:
raise RuntimeError(f"Partitioning '{layername}' found no valid layers to partition.")
return idxs
def forward(self, forward_input):
# We need to offset the seed by the microbatch ID. Save it in a local var to
# ensure it is preserved in the closure. Otherwise checkpointed forward funcs
# will see a different offset.
self.micro_offset += 1
def exec_range_func(start, end):
''' Helper function to be used with checkpoint()
Adapted from torch.utils.checkpoint:checkpoint_sequential()
'''
local_micro_offset = self.micro_offset + 1
def exec_func(*inputs):
# Single tensor inputs need to be unwrapped
if len(inputs) == 1:
inputs = inputs[0]
for idx, layer in enumerate(self.forward_funcs[start:end]):
self.curr_layer = idx + self._local_start
if self.seed_layers:
new_seed = (self.base_seed * local_micro_offset) + self.curr_layer
if self.seed_fn:
self.seed_fn(new_seed)
else:
ds_utils.set_random_seed(new_seed)
inputs = layer(inputs)
return inputs
return exec_func
if self.activation_checkpoint_interval == 0:
func = exec_range_func(0, len(self.forward_funcs))
x = func(forward_input)
else:
num_layers = len(self.forward_funcs)
x = forward_input
for start_idx in range(0, num_layers, self.activation_checkpoint_interval):
end_idx = min(start_idx + self.activation_checkpoint_interval, num_layers)
funcs = self.forward_funcs[start_idx:end_idx]
# Since we either pass tensors or tuples of tensors without unpacking, we
# need to be careful not to double-wrap tensors with tuple.
if not isinstance(x, tuple):
x = (x, )
if self._is_checkpointable(funcs):
x = self.activation_checkpoint_func(exec_range_func(start_idx, end_idx), *x)
else:
x = exec_range_func(start_idx, end_idx)(*x)
return x
def _partition_layers(self, method='uniform'):
num_stages = self._topo.get_dim('pipe')
stage_id = self._topo.get_coord(self.global_rank).pipe
if self.global_rank == 0:
logger.info(f'Partitioning pipeline stages with method {method}')
method = method.lower()
# Each stage gets a simple uniform number of layers.
if method == 'uniform':
num_layers = len(self._layer_specs)
self.parts = ds_utils.partition_uniform(num_items=num_layers, num_parts=num_stages)
elif method == 'parameters':
param_counts = self._count_layer_params()
self.parts = ds_utils.partition_balanced(weights=param_counts, num_parts=num_stages)
elif method.startswith('type:'):
layertype = method.split(':')[1]
binary_weights = [0] * len(self._layer_specs)
for idx in self._find_layer_type(layertype):
binary_weights[idx] = 1
self.parts = ds_utils.partition_balanced(weights=binary_weights, num_parts=num_stages)
elif method == 'profile':
raise NotImplementedError(f'Partitioning method {method} not implemented.')
else:
raise NotImplementedError(f'Partitioning method {method} not implemented.')
# Print some information on the partitioning.
if self.global_rank == 0:
for stage in range(num_stages):
start = self.parts[stage]
stop = self.parts[stage + 1]
print(f'stage={stage} layers={stop - start}')
for idx, layer in enumerate(self._layer_specs[start:stop]):
name = str(layer)
if isinstance(layer, LayerSpec):
name = layer.typename.__name__
if isinstance(layer, nn.Module):
name = layer.__class__.__name__
else:
try:
name = layer.__name__
except AttributeError:
pass
print(f' {idx+start:2d}: {name}')
if self.loss_fn:
try:
print(f' loss: {self.loss_fn.__name__}')
except AttributeError:
print(f' loss: {self.loss_fn.__class__.__name__}')
self._set_bounds(start=self.parts[stage_id], stop=self.parts[stage_id + 1])
def allreduce_tied_weight_gradients(self):
'''All reduce the gradients of the tied weights between tied stages'''
for key, comm in self.tied_comms.items():
weight = getattr(self.tied_modules[key], comm['weight_attr'])
dist.all_reduce(weight.grad, group=comm['group'])
def get_tied_weights_and_groups(self):
weight_group_list = []
for key, comm in self.tied_comms.items():
weight = getattr(self.tied_modules[key], comm['weight_attr'])
weight_group_list.append((weight, comm['group']))
return weight_group_list
def _synchronize_tied_weights(self):
for key, comm in self.tied_comms.items():
dist.broadcast(
getattr(comm['module'], comm['weight_attr']),
src=min(comm['ranks']),
group=comm['group'],
)
def _index_tied_modules(self):
''' Build communication structures for tied modules. '''
tied_comms = {}
if self._topo.get_dim('pipe') == 1:
return tied_comms
specs = self._layer_specs
tie_keys = set(s.key for s in specs if isinstance(s, TiedLayerSpec))
for key in tie_keys:
# Find the layers that the tied module appears in
tied_layers = []
for idx, layer in enumerate(specs):
if isinstance(layer, TiedLayerSpec) and layer.key == key:
tied_layers.append(idx)
# Find all stages with this tied module
# TODO: Would be nice to remove the nested data/model parallelism loops and
# TODO: instead generalize in some way, since we really just care about the
# TODO: stage that owns the tied layer. Then loop over each (dp, mp, ...)
# TODO: fiber to generate process groups.
tied_stages = set(self.stage_owner(idx) for idx in tied_layers)
for dp in range(self._grid.data_parallel_size):
for mp in range(self._grid.get_slice_parallel_world_size()):
tied_ranks = []
for s in sorted(tied_stages):
if self._grid.get_slice_parallel_world_size() > 1:
tied_ranks.append(self._grid.stage_to_global(stage_id=s, data=dp, model=mp))
else:
tied_ranks.append(self._grid.stage_to_global(stage_id=s, data=dp))
group = dist.new_group(ranks=tied_ranks)
# Record this tied module if we own a local copy of it.
if self.global_rank in tied_ranks:
assert key in self.tied_modules
if key in self.tied_modules:
tied_comms[key] = {
'ranks': tied_ranks,
'group': group,
'weight_attr': self.tied_weight_attrs[key],
'module': self.tied_modules[key],
}
# Only count the tied module once in the eyes of the FP16 optimizer
if self.global_rank != tied_ranks[0]:
for p in self.tied_modules[key].parameters():
p.ds_pipe_replicated = True
'''
if len(tied_comms) > 0:
print(f'RANK={self.global_rank} tied_comms={tied_comms}')
'''
return tied_comms
def partitions(self):
return self.parts
def stage_owner(self, layer_idx):
assert 0 <= layer_idx < self._num_layers
for stage in range(self._topo.get_dim('pipe')):
if self.parts[stage] <= layer_idx < self.parts[stage + 1]:
return stage
raise RuntimeError(f'Layer {layer_idx} not owned? parts={self.parts}')
def _set_bounds(self, start=None, stop=None):
"""Manually define the range of layers that will be built on this process.
These boundaries are treated as list slices and so start is inclusive and stop is
exclusive. The default of None for both results in all layers being built
locally.
"""
self._local_start = start
self._local_stop = stop
def set_checkpoint_interval(self, interval):
assert interval >= 0
self.checkpoint_interval = interval
def topology(self):
""" ProcessTopology object to query process mappings. """
return self._topo
def mpu(self):
return self._grid
def num_pipeline_stages(self):
return self._topo.get_dim('pipe')
def ckpt_prefix(self, checkpoints_path, tag):
"""Build a prefix for all checkpoint files written by this module. """
# All checkpoint files start with this
rank_name = 'module'
# Data parallelism is omitted from the naming convention because we are agnostic
# to this in the checkpoint.
omit_dims = frozenset(['data'])
axes = [a for a in self._grid._topo.get_axis_names() if a not in omit_dims]
for dim in axes:
rank = getattr(self._grid._topo.get_coord(rank=self.global_rank), dim)
rank_name += f'-{dim}_{rank:02d}'
ckpt_name = os.path.join(checkpoints_path, str(tag), rank_name)
return ckpt_name
def ckpt_layer_path(self, ckpt_dir, local_layer_idx):
"""Customize a prefix for a specific pipeline module layer. """
idx = local_layer_idx + self._local_start
layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}')
rank_repr = self._grid._topo.get_rank_repr(rank=self.global_rank)
if rank_repr != '':
layer_ckpt_path += f'-{rank_repr}'
layer_ckpt_path += '-model_states.pt'
return layer_ckpt_path
def ckpt_layer_path_list(self, ckpt_dir, local_layer_idx):
"""Get all ckpt file list for a specific pipeline module layer. """
idx = local_layer_idx + self._local_start
layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}-')
layer_ckpt_path += "*model_states.pt"
ckpt_files = glob.glob(layer_ckpt_path)
ckpt_files.sort()
return ckpt_files
def save_state_dict(self, save_dir, checkpoint_engine):
# Processes having the same model parallel rank on different data parallel instances
# have identical layer weights. We can distribute the task of saving the layer weights
# among the data parallel ranks. For example, if a pipeline stage has 9 layers and
# if there are 2 data parallel instances, rank 0 will save the first 5 layers and
# rank 1 will save the last 4.
dp_rank = self._grid.data_parallel_id
dp_size = self._grid.data_parallel_size
num_layers = len(self.forward_funcs)
if self.checkpoint_parallel_write_pipeline:
# spread layers evenly across data parallel ranks
offsets = ds_utils.partition_uniform(num_layers, dp_size)
start, end = offsets[dp_rank], offsets[dp_rank + 1]
else:
# data parallel rank 0 writes all layers
if dp_rank != 0:
return
start, end = 0, num_layers
layer_list = self.forward_funcs[start:end]
checkpoint_engine.makedirs(save_dir, exist_ok=True)
for idx, layer in enumerate(layer_list):
model_ckpt_path = self.ckpt_layer_path(save_dir, start + idx)
if not hasattr(layer, 'state_dict'):
continue
# We pass cloned tensors to torch.save() to avoid checkpoint bloat which occurs because torch.save()
# saves the underlying storage rather than the slice of the storage corresponding to individual tensors.
# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.
# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.
# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.
# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing
orig_state_dict = layer.state_dict()
final_state_dict = type(orig_state_dict)({k: v.clone() for k, v in orig_state_dict.items()})
checkpoint_engine.save(final_state_dict, model_ckpt_path)
def load_state_dir(self, load_dir, checkpoint_engine, strict=True):
for idx, layer in enumerate(self.forward_funcs):
# Functions, etc. will not have state_dicts
if not hasattr(layer, 'load_state_dict'):
continue
# get all checkpoint files for the layer.
model_ckpt_list = self.ckpt_layer_path_list(load_dir, idx)
mp_rank = self._grid.get_slice_parallel_rank()
mp_world_size = self._grid.get_slice_parallel_world_size()
sd_loader = SDLoaderFactory.get_sd_loader(model_ckpt_list,
version=2.0,
checkpoint_engine=checkpoint_engine)
load_path, checkpoint, _ = sd_loader.load(mp_world_size, mp_rank, module_key=None, is_pipe_parallel=True)
layer.load_state_dict(checkpoint)
# if self._grid.data_parallel_id == 0:
# logger.info(
# f'RANK={self.global_rank} Loaded layer={idx+self._local_start} file={load_path}'
# )
self._synchronize_tied_weights()
def _is_checkpointable(self, funcs):
# This is an unfortunate hack related to torch and deepspeed activation checkpoint implementations.
# Some layers like torch.nn.Embedding will not receive grads if checkpointed, which breaks things.
# I presume it's related to the discrete inputs that cannot require_grad? Need to revisit.
if self.__class__.__name__ in ('GPTModelPipe', 'GPT2ModelPipe'):
return all('ParallelTransformerLayerPipe' in f.__class__.__name__ for f in funcs)
if self.checkpointable_layers is not None:
return all(f.__class__.__name__ in self.checkpointable_layers for f in funcs)
params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)]
return any(len(list(p)) > 0 for p in params)
| 27,190 | 42.998382 | 171 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/pipe/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .module import PipelineModule, LayerSpec, TiedLayerSpec
from .topology import ProcessTopology
| 195 | 23.5 | 60 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/runtime/pipe/schedule.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from ..utils import call_to_str
from abc import ABC, abstractmethod
class PipeSchedule(ABC):
"""Directs the execution of a pipeline engine by generating sequences of
:class:`PipeInstruction`.
Schedules are generators that yield sequences of
:class:`PipeInstruction` to process the micro-batches in one batch.
Each yielded step is atomic in the sense that a barrier
synchronization can be placed between successive steps without
deadlock.
Below is an example schedule that implements data parallelism with gradient accumulation:
.. code-block:: python
class DataParallelSchedule(PipeSchedule):
def steps(self):
for step_id in range(self.micro_batches):
cmds = [
LoadMicroBatch(buffer_id=0),
ForwardPass(buffer_id=0),
BackwardPass(buffer_id=0),
]
if step_id == self.micro_batches - 1:
cmds.extend([
ReduceGrads(),
OptimizerStep(),
])
yield cmds
def num_pipe_buffers(self):
return 1
Args:
micro_batches (int): The number of micro-batches that comprise a batch.
stages (int): The number of pipeline stages.
stage_id (int): The pipe stage that will execute the generated schedule.
"""
def __init__(self, micro_batches, stages, stage_id):
super().__init__()
self.micro_batches = micro_batches
self.stages = stages
self.stage_id = stage_id
self.prev_stage = self.stage_id - 1
self.next_stage = self.stage_id + 1
@abstractmethod
def steps(self):
"""Yield a list of :class:`PipeInstruction` for each step in the schedule.
.. note::
Schedules must implement ``steps()`` to define the schedule.
Returns:
Instructions to be executed as one step of the pipeline
"""
pass
def num_pipe_buffers(self):
"""The number of pipeline buffers that will be used by this stage.
.. note::
Schedules should specialize ``num_pipe_buffers()`` for memory savings at scale.
Returns:
The number of buffers for the engine to allocate.
"""
return self.micro_batches
def _valid_micro_batch(self, micro_batch_id):
return 0 <= micro_batch_id < self.micro_batches
def _valid_stage(self, stage_id):
return 0 <= stage_id < self.stages
@property
def stage(self):
"""Stage index used to configure this schedule."""
return self.stage_id
@property
def num_stages(self):
"""The number of total pipeline stages used to configure this schedule."""
return self.stages
@property
def num_micro_batches(self):
"""The number of total micro_batches used to configure this schedule."""
return self.micro_batches
@property
def is_first_stage(self):
"""True if the configured ``stage_id`` is the first stage in the pipeline."""
return self.stage_id == 0
@property
def is_last_stage(self):
"""True if the configured ``stage_id`` is the last stage in the pipeline."""
return self.stage_id == self.stages - 1
def _buffer_idx(self, micro_batch_id):
"""Map a micro-batch index to a pipeline buffer index.
This method uses a cyclic allocation strategy.
Args:
micro_batch_id (int): The micro-batch index relative to the beginning of the schedule.
Returns:
int: The index of the buffer that should store data.
"""
assert self._valid_micro_batch(micro_batch_id)
return micro_batch_id % self.num_pipe_buffers()
def __iter__(self):
self.it = None
return self
def __next__(self):
if self.it is None:
self.it = self.steps()
return next(self.it)
class InferenceSchedule(PipeSchedule):
"""A schedule for inferencing batches using pipeline parallelism.
"""
def steps(self):
""""""
prev_micro_batch_id = -1
total_steps = self.micro_batches + self.stages - 1
for step_id in range(total_steps):
cmds = []
micro_batch_id = step_id - self.stage_id
# Alternate send/recv buffers
if _is_even(self.stage_id):
recv_buf = step_id % 2
send_buf = (step_id + 1) % 2
else:
recv_buf = (step_id + 1) % 2
send_buf = step_id % 2
if self.is_first_stage or self.is_last_stage:
if self._valid_micro_batch(micro_batch_id):
cmds.append(LoadMicroBatch(recv_buf))
if _is_even(self.stage_id):
if self._valid_stage(self.next_stage):
if self._valid_micro_batch(micro_batch_id - 1):
cmds.append(SendActivation(send_buf))
if self._valid_stage(self.prev_stage):
if self._valid_micro_batch(micro_batch_id):
cmds.append(RecvActivation(recv_buf))
else:
if self._valid_stage(self.prev_stage):
if self._valid_micro_batch(micro_batch_id):
cmds.append(RecvActivation(recv_buf))
if self._valid_stage(self.next_stage):
if self._valid_micro_batch(micro_batch_id - 1):
cmds.append(SendActivation(send_buf))
if self._valid_micro_batch(micro_batch_id):
cmds.append(ForwardPass(recv_buf))
yield cmds
def num_pipe_buffers(self):
"""Only two pipeline buffers are required for inferencing.
Returns:
``2``
"""
return 2
class TrainSchedule(PipeSchedule):
"""A schedule for training a batch using hybrid parallelism.
Pipeline parallelism is extracted through gradient accumulation and thus
convergence follows that of a data parallel approach with the same batch
size.
"""
def steps(self):
""""""
prev_micro_batch_id = -1
total_steps = 2 * (self.micro_batches + self.stages - 1)
for step_id in range(total_steps):
# Map the step of the pipeline to the micro-batch id and also whether it is a
# forward or backward pass step.
micro_batch_id, is_forward = self._step_to_micro_batch(step_id)
if self._valid_micro_batch(prev_micro_batch_id):
prev_buffer = self._buffer_idx(prev_micro_batch_id)
if self._valid_micro_batch(micro_batch_id):
curr_buffer = self._buffer_idx(micro_batch_id)
cmds = []
# Exchange activations
if is_forward:
if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(self.prev_stage):
cmds.append(SendGrad(prev_buffer))
if self._valid_micro_batch(micro_batch_id) and self._valid_stage(self.prev_stage):
cmds.append(RecvActivation(curr_buffer))
else:
if self._valid_micro_batch(micro_batch_id) and self._valid_stage(self.next_stage):
cmds.append(RecvGrad(curr_buffer))
if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(self.next_stage):
cmds.append(SendActivation(prev_buffer))
# First/last stage loads
if self.stage_id == 0 or self.stage_id == self.stages - 1:
if is_forward and self._valid_micro_batch(micro_batch_id):
cmds.append(LoadMicroBatch(curr_buffer))
# Computation
if self._valid_micro_batch(micro_batch_id):
if is_forward:
cmds.append(ForwardPass(curr_buffer))
else:
cmds.append(BackwardPass(curr_buffer))
# Model step at the end of the batch
if step_id == total_steps - 1:
cmds.append(ReduceTiedGrads())
cmds.append(ReduceGrads())
cmds.append(OptimizerStep())
# Prepare state for next time
prev_micro_batch_id = micro_batch_id
yield cmds
def num_pipe_buffers(self):
"""Return the number of pipeline buffers required for this stage.
This is equivalent to the maximum number of in-flight forward passes,
since we need to remember the activations of forward passes in order
to run backpropagation. For synchronous 1F1B, this is equivalent to
the index difference between this stage and the last stage.
"""
buffers = min(self.stages - self.stage_id, self.micro_batches)
return max(2, buffers)
def _step_to_micro_batch(self, step_id):
if _is_even(step_id) and _is_even(self.stage_id):
micro_batch_id = self._even_step_forward_id(step_id)
is_forward = True
elif _is_odd(step_id) and _is_odd(self.stage_id):
micro_batch_id = self._odd_step_forward_id(step_id)
is_forward = True
elif _is_even(step_id) and _is_odd(self.stage_id):
micro_batch_id = self._even_step_backward_id(step_id)
is_forward = False
elif _is_odd(step_id) and _is_even(self.stage_id):
micro_batch_id = self._odd_step_backward_id(step_id)
is_forward = False
else:
assert False
return micro_batch_id, is_forward
def _even_step_forward_id(self, step_id):
base = step_id // 2
micro_batch_id = int(base - self.stage_id // 2)
return micro_batch_id
def _odd_step_forward_id(self, step_id):
base = (step_id - 1) // 2
micro_batch_id = int(base - self.stage_id // 2)
return micro_batch_id
def _even_step_backward_id(self, step_id):
base = step_id // 2
micro_batch_id = int(base - self.stages + (self.stage_id + 1) // 2)
return micro_batch_id
def _odd_step_backward_id(self, step_id):
base = ((step_id - 1) // 2) - self.stages + 1
micro_batch_id = int(base + self.stage_id // 2)
return micro_batch_id
class DataParallelSchedule(PipeSchedule):
"""An example schedule that trains using traditional data parallelism with gradient
accumulation.
"""
def steps(self):
""""""
for step_id in range(self.micro_batches):
cmds = [
LoadMicroBatch(buffer_id=0),
ForwardPass(buffer_id=0),
BackwardPass(buffer_id=0),
]
if step_id == self.micro_batches - 1:
cmds.extend([
ReduceGrads(),
OptimizerStep(),
])
yield cmds
def num_pipe_buffers(self):
"""Only one pipeline buffer needed.
"""
return 1
class PipeInstruction:
"""Base class for all instructions to be executed by the pipeline engine.
All keyword arguments are stored as members similar to a ``namedtuple``. These are
then accessible to the :class:`PipeEngine` during execution.
Args:
kwargs (optional): keyword arguments to store as members
"""
def __init__(self, **kwargs):
self.name = self.__class__.__name__
self.kwargs = kwargs
for key, val in kwargs.items():
setattr(self, key, val)
def __repr__(self):
return call_to_str(self.name, **self.kwargs)
class OptimizerStep(PipeInstruction):
"""Performs one step with the optimizer and zeros gradients.
.. note:: Should be issued after :class:`ReduceGrads` and :class:`ReduceTiedGrads`.
.. note:: Can be a synchronization point among data-parallel ranks.
"""
pass
class ReduceGrads(PipeInstruction):
"""Reduce the computed gradients among data-parallel processes within the stage.
"""
pass
class ReduceTiedGrads(PipeInstruction):
"""Reduce the computed gradients of tied modules within a pipeline-parallel group.
.. warning::
The stages included in this synchronization point are not known until
the model is partitioned among pipeline stages. In the worst case, it
includes all pipeline stages. This instruction should be scheduled
carefully to avoid deadlocks.
"""
pass
class BufferOpInstruction(PipeInstruction):
"""A pipeline instruction that operates on pipeline buffer(s).
Args:
buffer_id (int): the index of the pipeline buffer() to modify.
"""
def __init__(self, buffer_id, **kwargs):
super().__init__(buffer_id=buffer_id, **kwargs)
# IO
class LoadMicroBatch(BufferOpInstruction):
"""Load a micro-batch into a buffer.
Roughly:
.. code-block:: python
buffers['inputs'][buffer_id] = next(data_iter)
"""
pass
# Compute
class ForwardPass(BufferOpInstruction):
"""Compute a forward pass.
Roughly:
.. code-block:: python
buffers['outputs'][buffer_id] = forward(buffers['inputs'][buffer_id])
"""
pass
class BackwardPass(BufferOpInstruction):
"""Compute a backward pass and accumulate gradients.
Roughly:
.. code-block:: python
outputs = buffers['outputs'][buffer_id]
gradients = buffers['gradients'][buffer_id]
torch.autograd.backward(tensors=outputs,
grad_tensors=gradients)
"""
pass
# Communication
class SendActivation(BufferOpInstruction):
"""Send activations to the next stage in the pipeline.
Roughly:
.. code-block:: python
send(buffers['outputs'][buffer_id])
.. note::
The communication is blocking and must be paired with a :class:`RecvActivation`
on the next pipeline stage to avoid deadlock.
"""
pass
class RecvActivation(BufferOpInstruction):
"""Receive activations from the previous stage in the pipeline.
Roughly:
.. code-block:: python
buffers['inputs'][buffer_id] = recv()
.. note::
The communication is blocking and must be paired with a :class:`SendActivation`
on the previous pipeline stage to avoid deadlock.
"""
pass
class SendGrad(BufferOpInstruction):
"""Send computed gradients to the previous pipeline stage.
with respect to the received activations
.. note::
Only received tensors with ``requires_grad==True`` will produce gradients.
Missing gradients will be replaced with ``None`` on the receiving stage.
.. note::
The communication is blocking and must be paired with a :class:`RecvGrad`
on the previous pipeline stage to avoid deadlock.
"""
pass
class RecvGrad(BufferOpInstruction):
"""Receive computed gradients the next pipeline stage.
.. note::
Only activations with ``requires_grad==True`` will produce gradients.
Missing gradients will be replaced with ``None``.
.. note::
The communication is blocking and must be paired with a :class:`SendGrad`
on the next pipeline stage to avoid deadlock.
"""
pass
def _is_even(x):
return x % 2 == 0
def _is_odd(x):
return x % 2 != 0
| 15,546 | 30.408081 | 103 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/moe/layer.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.utils import log_dist
from deepspeed.utils import groups
from .sharded_moe import MOELayer, TopKGate
from .experts import Experts
import typing
class MoE(torch.nn.Module):
"""Initialize an MoE layer.
Arguments:
hidden_size (int): the hidden dimension of the model, importantly this is also the input and output dimension.
expert (torch.nn.Module): the torch module that defines the expert (e.g., MLP, torch.linear).
num_experts (int, optional): default=1, the total number of experts per layer.
ep_size (int, optional): default=1, number of ranks in the expert parallel world or group.
k (int, optional): default=1, top-k gating value, only supports k=1 or k=2.
capacity_factor (float, optional): default=1.0, the capacity of the expert at training time.
eval_capacity_factor (float, optional): default=1.0, the capacity of the expert at eval time.
min_capacity (int, optional): default=4, the minimum capacity per expert regardless of the capacity_factor.
use_residual (bool, optional): default=False, make this MoE layer a Residual MoE (https://arxiv.org/abs/2201.05596) layer.
noisy_gate_policy (str, optional): default=None, noisy gate policy, valid options are 'Jitter', 'RSample' or 'None'.
drop_tokens (bool, optional): default=True, whether to drop tokens - (setting to False is equivalent to infinite capacity).
use_rts (bool, optional): default=True, whether to use Random Token Selection.
use_tutel (bool, optional): default=False, whether to use Tutel optimizations (if installed).
enable_expert_tensor_parallelism (bool, optional): default=False, whether to use tensor parallelism for experts
"""
def __init__(self,
hidden_size,
expert,
num_experts=1,
ep_size=1,
k=1,
capacity_factor=1.,
eval_capacity_factor=1.,
min_capacity=4,
use_residual=False,
noisy_gate_policy: typing.Optional[str] = None,
drop_tokens: bool = True,
use_rts=True,
use_tutel: bool = False,
enable_expert_tensor_parallelism: bool = False):
super(MoE, self).__init__()
self.use_residual = use_residual
self.enable_expert_tensor_parallelism = enable_expert_tensor_parallelism
assert num_experts % ep_size == 0, f"Number of experts ({num_experts}) should be divisible by expert parallel size ({ep_size})"
self.ep_size = ep_size
self.expert_group_name = f"ep_size_{self.ep_size}"
self.num_experts = num_experts
self.num_local_experts = num_experts // self.ep_size
log_dist(
f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {self.ep_size}',
[0])
assert noisy_gate_policy is None or noisy_gate_policy in ['None', 'Jitter', 'RSample'], \
'Unsupported noisy_gate_policy: ' + noisy_gate_policy
experts = Experts(expert, self.num_local_experts, self.expert_group_name)
self.deepspeed_moe = MOELayer(TopKGate(hidden_size, num_experts, k, capacity_factor, eval_capacity_factor,
min_capacity, noisy_gate_policy, drop_tokens, use_rts),
experts,
self.expert_group_name,
self.ep_size,
self.num_local_experts,
use_tutel=use_tutel)
if self.use_residual:
self.mlp = expert
# coefficient is used for weighted sum of the output of expert and mlp
self.coefficient = torch.nn.Linear(hidden_size, 2)
def set_deepspeed_parallelism(self):
self._create_process_groups()
def _create_process_groups(self):
# Create process group for a layer if needed
if self.expert_group_name not in groups._get_expert_parallel_group_dict():
print(f"No existing process group found, creating a new group named: {self.expert_group_name}")
if (groups.mpu is None) or (not self.enable_expert_tensor_parallelism):
# Condition 1 - no groups.mpu means no tensor parallelism
# Condition 2 - disabling expert tensor parallelism on purpose
groups._create_expert_and_data_parallel(self.ep_size)
else:
# expert tensor parallelism is enabled
groups._create_expert_data_and_model_parallel(self.ep_size, mpu=groups.mpu)
# Set the group handle for the MOELayer (deepspeed_moe) object
self.deepspeed_moe._set_ep_group(groups._get_expert_parallel_group(self.expert_group_name))
def forward(self, hidden_states, used_token=None):
""" MoE forward
Arguments:
hidden_states (Tensor): input to the layer
used_token (Tensor, optional): default: None, mask only used tokens
Returns:
A tuple including output, gate loss, and expert count.
* output (Tensor): output of the model
* l_aux (Tensor): gate loss value
* exp_counts (int): expert count
"""
output = self.deepspeed_moe(hidden_states, used_token)
if self.use_residual:
# Residual MoE
output_mlp = self.mlp(hidden_states)
if type(output_mlp) is tuple:
output_mlp = output_mlp[0] # Ignore the bias term for now
coef = self.coefficient(hidden_states)
coef = torch.nn.functional.softmax(coef, dim=-1)
output = output * coef[..., 0:1] + output_mlp * coef[..., 1:]
return output, self.deepspeed_moe.l_aux, self.deepspeed_moe.exp_counts
| 6,082 | 47.664 | 151 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/moe/mappings.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# The file has been adapted from the following Megatron-LM file:
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/mpu/mappings.py
# Git commit hash: 9dc3c42a84aa656f583703cf8b6b4f79f712b796
# We retain the following copyright from the original files:
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import deepspeed
def _gather_tokens(input_, dim=0):
"""Gather tensors and concatenate them along a dimension"""
mpu = deepspeed.utils.groups.mpu
input_ = input_.contiguous()
# Size and dimension.
rank = mpu.get_tensor_model_parallel_rank()
tensor_list = [torch.empty_like(input_) for _ in range(mpu.get_tensor_model_parallel_world_size())]
tensor_list[rank] = input_
deepspeed.comm.all_gather(tensor_list, input_, group=mpu.get_tensor_model_parallel_group())
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=dim).contiguous()
return output
def _drop_tokens(input_, dim=0):
"""Divide a tensor among the tensor parallel ranks"""
mpu = deepspeed.utils.groups.mpu
total_chunks = mpu.get_tensor_model_parallel_world_size()
this_chunk = mpu.get_tensor_model_parallel_rank()
assert input_.shape[
dim] % total_chunks == 0, f"input dimension {dim} ({input_.shape[dim]}) is not divisible by tensor parallel world size ({total_chunks})"
chunk_size = input_.shape[dim] // total_chunks
return torch.narrow(input_, dim, this_chunk * chunk_size, chunk_size)
class _GatherTokens(torch.autograd.Function):
"""All gather tokens among the tensor parallel ranks"""
@staticmethod
def symbolic(graph, input_, dim):
return _gather_tokens(input_, dim)
@staticmethod
def forward(ctx, input_, dim):
ctx.dim = dim
return _gather_tokens(input_, dim)
@staticmethod
def backward(ctx, grad_output):
return _drop_tokens(grad_output, ctx.dim), None
class _DropTokens(torch.autograd.Function):
"Divide tokens equally among the tensor parallel ranks"
@staticmethod
def symbolic(graph, input_, dim):
return _drop_tokens(input_, dim)
@staticmethod
def forward(ctx, input_, dim):
ctx.dim = dim
return _drop_tokens(input_, dim)
@staticmethod
def backward(ctx, input_):
return _gather_tokens(input_, ctx.dim), None
def gather_tokens(input_, dim=0):
mpu = deepspeed.utils.groups.mpu
if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1:
# no tensor parallelism for non-experts
return input_
return _GatherTokens.apply(input_, dim)
def drop_tokens(input_, dim=0):
mpu = deepspeed.utils.groups.mpu
if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1:
# no tensor parallelism for non-experts
return input_
return _DropTokens.apply(input_, dim)
| 3,529 | 31.990654 | 144 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/moe/sharded_moe.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
The file has been adapted from two fairscale files:
(1) https://github.com/facebookresearch/fairscale/blob/master/fairscale/nn/moe/moe_layer.py
(2) https://github.com/facebookresearch/fairscale/blob/master/fairscale/nn/moe/top2gate.py
Git commit hash: 34df606902a240567a0d898037ece55c2f1336cf
We retain the following license from the original files:
"""
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from deepspeed.utils.timer import SynchronizedWallClockTimer
from deepspeed.utils import logger
from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Module
import torch.nn.functional as F
from deepspeed.utils import groups
from .mappings import drop_tokens, gather_tokens
if TYPE_CHECKING:
Base = Module[Tensor]
else:
Base = Module
uniform_map: Dict[torch.device, Callable] = {}
gumbel_map: Dict[torch.device, Callable] = {}
exp_selection_uniform_map: Dict[torch.device, Callable] = {}
try:
# To enable Tutel MoE optimizations:
# python3 -m pip install --user --upgrade git+https://github.com/microsoft/[email protected]
from tutel import moe as tutel_moe
TUTEL_INSTALLED = True
except:
# Fail silently so we don't spam logs unnecessarily if user isn't using tutel
TUTEL_INSTALLED = False
pass
def multiplicative_jitter(x, device: torch.device, epsilon=1e-2):
"""
Modified from switch transformer paper. mesh transformers
Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a torch.tensor
device: torch.device
epsilon: a floating point value
Returns:
a jittered x.
"""
if epsilon == 0:
return x
uniform = uniform_map.get(device)
if uniform is None:
uniform = torch.distributions.uniform.Uniform(low=torch.tensor(1.0 - epsilon, device=device),
high=torch.tensor(1.0 + epsilon,
device=device)).rsample # type: ignore
uniform_map[device] = uniform
return x * uniform(x.shape)
def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:
gumbel = gumbel_map.get(device)
if gumbel is None:
one = torch.tensor(1.0, device=device)
zero = torch.tensor(0.0, device=device)
gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore
gumbel_map[device] = gumbel
return gumbel(shape)
from deepspeed import comm as dist
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
# See https://arxiv.org/pdf/2006.16668.pdf for details.
# Based on https://github.com/pytorch/pytorch/pull/40762
class _AllToAll(torch.autograd.Function):
@staticmethod
def forward(
ctx: Any,
# TODO: replace with DS process group
group: torch.distributed.ProcessGroup,
input: Tensor) -> Tensor: # type: ignore
ctx.group = group
input = input.contiguous()
output = torch.empty_like(input)
dist.all_to_all_single(output, input, group=group)
return output
@staticmethod
def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]:
return (None, _AllToAll.apply(ctx.group, *grad_output))
# einsum rewrites are on par or more performant
# switch can be bubbled up in future
USE_EINSUM = True
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
# See https://arxiv.org/pdf/2006.16668.pdf for details.
def einsum(rule, a, b):
if USE_EINSUM:
return torch.einsum(rule, a, b)
elif rule == 's,se->se':
return a.reshape(a.shape[0], -1) * b
elif rule == 'se,sc->sec':
return a.unsqueeze(2) * b.unsqueeze(1)
elif rule == 'se,se->s':
return torch.bmm(a.unsqueeze(1), b.unsqueeze(2)).reshape(-1)
elif rule == 'sec,sm->ecm':
s = a.shape[0]
e = a.shape[1]
c = a.shape[2]
m = b.shape[1]
return torch.matmul(a.reshape(s, -1).t(), b).reshape(e, c, m)
elif rule == 'sec,ecm->sm':
return torch.matmul(a.reshape(a.shape[0], -1), b.reshape(-1, b.shape[-1]))
elif rule == 'ks,ksm->sm':
k = b.shape[0]
s = b.shape[1]
m = b.shape[2]
# [k, s] -> [s, k] -> [s, 1, k]
a = a.t().unsqueeze(1)
# [k,s,m] -> [k, sm] -> [sm, k] -> [s, m, k]
b = b.reshape(k, -1).t().reshape(s, m, k)
# bmm([s, 1, k], [s, m, k]^t) -> [s, m, 1]
return torch.bmm(a, b.transpose(1, 2)).squeeze(2)
else:
return torch.einsum(rule, a, b)
# The following functions are extracted and scripted
# because otherwise during a torch.jit.trace, the non-Tensor
# values used in the calculations get recorded as constants.
# torch.jit.script coerces them into Tensors and preserves
# their dynamic shapes. This enables ONNX export.
# We can't script the entire top1gating function because it
# includes stateful caching logic which is incompatible with ONNX.
@torch.jit.script
def _capacity(gates: Tensor, capacity_factor: Tensor, min_capacity: Tensor) -> Tensor:
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
# to(torch.int64) works around a bug in torch.onnx.export:
# it should cast k to int64 when converting torch.topk but it doesn't.
capacity = torch.ceil((num_tokens / num_experts) * capacity_factor).to(torch.int64)
if capacity < min_capacity:
capacity = min_capacity.to(torch.int64)
return capacity
@torch.jit.script
def _top_idx(source, k):
return torch.topk(source, k=k, dim=0)[1]
@torch.jit.script
def _one_hot_to_float(x, num_classes):
return F.one_hot(x, num_classes=num_classes).float()
def top1gating(logits: Tensor,
capacity_factor: float,
min_capacity: int,
used_token: Tensor = None,
noisy_gate_policy: Optional[str] = None,
drop_tokens: bool = True,
use_rts: bool = True,
use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Implements Top1Gating on logits."""
if noisy_gate_policy == 'RSample':
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
# everything is in fp32 in this function
gates = F.softmax(logits, dim=1)
capacity = _capacity(gates, torch.tensor(capacity_factor), torch.tensor(min_capacity))
# Create a mask for 1st's expert per token
# noisy gating
indices1_s = torch.argmax(logits_w_noise if noisy_gate_policy == 'RSample' else gates, dim=1)
num_experts = int(gates.shape[1])
mask1 = F.one_hot(indices1_s, num_classes=num_experts)
# mask only used tokens
if used_token is not None:
mask1 = einsum("s,se->se", used_token, mask1)
# gating decisions
exp_counts = torch.sum(mask1, dim=0).detach().to('cpu')
# if we don't want to drop any tokens
if not drop_tokens:
new_capacity = torch.max(exp_counts).to(logits.device)
dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=dist.get_world_group())
capacity = new_capacity
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.float(), dim=0)
l_aux = torch.sum(me * ce) * num_experts
# Random Token Selection
if use_rts:
uniform = exp_selection_uniform_map.get(logits.device)
if uniform is None:
uniform = torch.distributions.uniform.Uniform(low=torch.tensor(0.0, device=logits.device),
high=torch.tensor(1.0, device=logits.device)).rsample
exp_selection_uniform_map[logits.device] = uniform
mask1_rand = mask1 * uniform(mask1.shape)
else:
mask1_rand = mask1
assert logits.shape[
0] >= min_capacity, "No. of tokens (batch-size) should be greater than min_capacity. Either set min_capacity to 0 or increase your batch size."
top_idx = _top_idx(mask1_rand, capacity)
new_mask1 = mask1 * torch.zeros_like(mask1).scatter_(0, top_idx, 1)
mask1 = new_mask1
if use_tutel:
# Tutel doesn't support index values masked with zero
# so we need to replace masked indices with -1
indices_mask = mask1.sum(dim=1) * num_experts - 1
indices1_s = torch.min(indices1_s, indices_mask)
# Compute locations in capacity buffer
if use_tutel:
locations1 = tutel_moe.fast_cumsum_sub_one(mask1)
else:
locations1 = torch.cumsum(mask1, dim=0) - 1
if use_tutel:
gates1_s = (gates * mask1).sum(dim=1)
locations1_s = torch.sum(locations1 * mask1, dim=1)
return l_aux, capacity, num_experts, [
indices1_s,
], [
locations1_s,
], [
gates1_s,
], exp_counts
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
# Normalize gate probabilities
mask1_float = mask1.float()
gates = gates * mask1_float
locations1_sc = _one_hot_to_float(locations1_s, capacity)
combine_weights = einsum("se,sc->sec", gates, locations1_sc)
dispatch_mask = combine_weights.bool()
return l_aux, combine_weights, dispatch_mask, exp_counts
def top2gating(logits: Tensor, capacity_factor: float, min_capacity: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Implements Top2Gating on logits."""
# everything is in fp32 in this function
gates = F.softmax(logits, dim=1)
capacity = _capacity(gates, torch.tensor(capacity_factor * 2), torch.tensor(min_capacity))
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1)
num_experts = int(gates.shape[1])
mask1 = F.one_hot(indices1_s, num_classes=num_experts)
# Create a mask for 2nd's expert per token using Gumbel-max trick
# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
# Replace top-expert with min value
logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf"))
indices2_s = torch.argmax(logits_except1, dim=1)
mask2 = F.one_hot(indices2_s, num_classes=num_experts)
# Compute locations in capacity buffer
locations1 = torch.cumsum(mask1, dim=0) - 1
locations2 = torch.cumsum(mask2, dim=0) - 1
# Update 2nd's location by accounting for locations of 1st
locations2 += torch.sum(mask1, dim=0, keepdim=True)
# gating decisions
exp_counts = torch.sum(mask1, dim=0).detach().to('cpu')
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.float(), dim=0)
l_aux = torch.mean(me * ce) * num_experts * num_experts
# Remove locations outside capacity from mask
mask1 *= torch.lt(locations1, capacity)
mask2 *= torch.lt(locations2, capacity)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
locations2_s = torch.sum(locations2 * mask2, dim=1)
# Normalize gate probabilities
mask1_float = mask1.float()
mask2_float = mask2.float()
gates1_s = einsum("se,se->s", gates, mask1_float)
gates2_s = einsum("se,se->s", gates, mask2_float)
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s /= denom_s
gates2_s /= denom_s
# Calculate combine_weights and dispatch_mask
gates1 = einsum("s,se->se", gates1_s, mask1_float)
gates2 = einsum("s,se->se", gates2_s, mask2_float)
locations1_sc = _one_hot_to_float(locations1_s, capacity)
locations2_sc = _one_hot_to_float(locations2_s, capacity)
combine1_sec = einsum("se,sc->sec", gates1, locations1_sc)
combine2_sec = einsum("se,sc->sec", gates2, locations2_sc)
combine_weights = combine1_sec + combine2_sec
dispatch_mask = combine_weights.bool()
return l_aux, combine_weights, dispatch_mask, exp_counts
class TopKGate(Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = TopKGate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(self,
model_dim: int,
num_experts: int,
k: int = 1,
capacity_factor: float = 1.0,
eval_capacity_factor: float = 1.0,
min_capacity: int = 8,
noisy_gate_policy: Optional[str] = None,
drop_tokens: bool = True,
use_rts: bool = True) -> None:
super().__init__()
# Only top-1 and top-2 are supported at the moment.
if k != 1 and k != 2:
raise ValueError('Only top-1 and top-2 gatings are supported.')
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False).float()
self.k = k
self.capacity_factor = capacity_factor
self.eval_capacity_factor = eval_capacity_factor
self.min_capacity = min_capacity
self.noisy_gate_policy = noisy_gate_policy
self.timers = SynchronizedWallClockTimer()
self.wall_clock_breakdown = False
self.gate_time = 0.0
self.drop_tokens = drop_tokens
self.use_rts = use_rts
def forward(self,
input: torch.Tensor,
used_token: torch.Tensor = None,
use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore
if self.wall_clock_breakdown:
self.timers('TopKGate').start()
if self.wg.weight.dtype != torch.float32:
self.wg = self.wg.float()
input_fp32 = input.float()
# input jittering
if self.noisy_gate_policy == 'Jitter' and self.training:
input_fp32 = multiplicative_jitter(input_fp32, device=input.device)
logits = self.wg(input_fp32)
if self.k == 1:
gate_output = top1gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor,
self.min_capacity, used_token, self.noisy_gate_policy if self.training else None,
self.drop_tokens, self.use_rts, use_tutel)
else:
gate_output = top2gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor,
self.min_capacity)
if self.wall_clock_breakdown:
self.timers('TopKGate').stop()
self.gate_time = self.timers('TopKGate').elapsed(reset=False)
return gate_output
class MOELayer(Base):
"""MOELayer module which implements MixtureOfExperts as described in Gshard_.
::
gate = TopKGate(model_dim, num_experts)
moe = MOELayer(gate, expert)
output = moe(input)
l_aux = moe.l_aux
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
gate (torch.nn.Module):
gate network
expert (torch.nn.Module):
expert network
"""
def __init__(self,
gate: Module,
experts: Module,
ep_group_name,
ep_size,
num_local_experts: int,
use_tutel: bool = False) -> None:
super().__init__()
self.gate = gate
self.experts = experts
self.ep_group = None
self.ep_size = ep_size
self.ep_group_name = ep_group_name
self.num_local_experts = num_local_experts
self.time_falltoall = 0.0
self.time_salltoall = 0.0
self.time_moe = 0.0
self.timers = SynchronizedWallClockTimer()
self.wall_clock_breakdown = False
self.use_tutel = use_tutel and TUTEL_INSTALLED and gate.k == 1
if self.use_tutel:
logger.info('Using Tutel optimizations.')
elif use_tutel and not TUTEL_INSTALLED:
logger.warning("Tutel optimization requested but not installed. "
"Proceeding without Tutel.")
elif use_tutel and TUTEL_INSTALLED and gate.k != 1:
logger.warning("To enable Tutel optimization, use top-1 instead of top-2 gate. "
"Proceeding without Tutel.")
def _set_ep_group(self, ep_group):
self.ep_group = ep_group
def forward(self, *input: Tensor, **kwargs: Any) -> Tensor:
if self.wall_clock_breakdown:
self.timers('moe').start()
# Implement Algorithm 2 from GShard paper.
d_model = input[0].shape[-1]
# Initial implementation -> Reshape into S tokens by dropping sequence dimension.
# Reshape into G groups so that each group can distribute tokens equally
# group_size = kwargs['group_size'] if 'group_size' in kwargs.keys() else 1
reshaped_input = input[0].reshape(-1, d_model)
if self.use_tutel:
self.l_aux, C, E, indices_, locations_, gates_, self.exp_counts = self.gate(reshaped_input, input[1], True)
S, M = reshaped_input.size(0), reshaped_input.size(1)
if not hasattr(self, '_tutel_dispatcher'):
self._tutel_dispatcher = tutel_moe.fast_dispatcher(E, C, M, dispatch_dtype=reshaped_input.dtype)
self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C)
dispatched_input = self._tutel_dispatcher.encode(reshaped_input)
else:
self.l_aux, combine_weights, dispatch_mask, self.exp_counts = self.gate(reshaped_input, input[1])
dispatched_input = einsum("sec,sm->ecm", dispatch_mask.type_as(input[0]), reshaped_input)
if self.wall_clock_breakdown:
self.timers('falltoall').start()
if groups._get_expert_model_parallel_world_size() == 1:
# If the non-expert is tensor-parallel, it will create
# duplicate tokens on the tensor-parallel ranks.
# Since our experts are not tensor-parallel, these duplicates
# need to be dropped to ensure correctness.
# this also doubles up as a communication optimization as we are
# reducing the all-to-all communication volume.
dispatched_input = drop_tokens(dispatched_input, dim=1)
dispatched_input = _AllToAll.apply(self.ep_group, dispatched_input)
if self.wall_clock_breakdown:
self.timers('falltoall').stop()
self.time_falltoall = self.timers('falltoall').elapsed(reset=False)
# Re-shape after all-to-all: ecm -> gecm
dispatched_input = dispatched_input.reshape(self.ep_size, self.num_local_experts, -1, d_model)
expert_output = self.experts(dispatched_input)
if self.wall_clock_breakdown:
self.timers('salltoall').start()
expert_output = _AllToAll.apply(self.ep_group, expert_output)
if self.wall_clock_breakdown:
self.timers('salltoall').stop()
self.time_salltoall = self.timers('salltoall').elapsed(reset=False)
# Re-shape back: gecm -> ecm
expert_output = expert_output.reshape(self.ep_size * self.num_local_experts, -1, d_model)
if groups._get_expert_model_parallel_world_size() == 1:
# the dropped duplicate tokens need to be gathered on each
# tensor parallel rank again for the tensor-parallel
# non-expert of the next layer.
expert_output = gather_tokens(expert_output, dim=1)
if self.use_tutel:
combined_output = self._tutel_dispatcher.decode(expert_output.view(E * C, M))
else:
combined_output = einsum("sec,ecm->sm", combine_weights.type_as(input[0]), expert_output)
a = combined_output.reshape(input[0].shape)
if self.wall_clock_breakdown:
self.timers('moe').stop()
self.time_moe = self.timers('moe').elapsed(reset=False)
return a
| 20,621 | 36.494545 | 151 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/moe/utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import List, Tuple, Dict
import torch
from .layer import MoE
def has_moe_layers(m):
has_moe = False
num_experts = 0
for _, module in m.named_modules():
if isinstance(module, MoE):
has_moe = True
num_experts = module.num_experts
break
return has_moe, num_experts
def is_moe_param(param: torch.Tensor) -> bool:
if hasattr(param, "allreduce") and not param.allreduce:
return True
return False
def split_params_into_shared_and_expert_params(
params: List[torch.nn.Parameter]) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]:
shared_params, expert_params = [], []
for p in params:
if is_moe_param(p):
expert_params.append(p)
else:
shared_params.append(p)
return shared_params, expert_params
def split_params_grads_into_shared_and_expert_params(
group: List[torch.nn.Parameter]) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]:
"""Split grad of parameters into grads of non-expert params
and grads of expert params. This is useful while computing
grad-norms for clipping and overflow detection
group (List[torch.nn.Parameter]):
Args:
The group of parameters to split
Returns:
Tuple[List[torch.nn.Parameter], List[torch.nn.Parameter]]:
list of gradients for non MoE params, list of gradients of MoE params
"""
expert_grads = []
shared_grads = []
for p in group:
if p.grad is not None:
if is_moe_param(p):
expert_grads.append(p.grad.to(p.dtype))
else:
shared_grads.append(p.grad.to(p.dtype))
return shared_grads, expert_grads
def split_params_into_different_moe_groups_for_optimizer(param_groups: Tuple[Dict],
max_group_size=178956971) -> Tuple[Dict]:
"""Split parameters into different MoE groups for optimizer
Args:
param_groups (Tuple[Dict]):
The list of parameter groups to split
Returns:
Tuple[Dict]:
list of MoE/non-MoE groups for optimizer
"""
if isinstance(param_groups, tuple):
param_groups = list(param_groups) # Tuple cannot be modified
elif isinstance(param_groups, dict):
param_groups = [param_groups]
elif not isinstance(param_groups, list):
raise ValueError(f"Unknown param group type of {type(param_groups)}")
# gather all data parallel group names
data_parallel_group_names = set()
for param_group in param_groups:
for param in param_group["params"]:
if is_moe_param(param):
data_parallel_group_names.add(param.group_name)
data_parallel_group_names = list(data_parallel_group_names)
group_moe = {}
# Create the param MoE groups, leave param assign to next step
for param_group in param_groups:
group_moe[param_group['name']] = {}
for key in data_parallel_group_names:
group_moe[param_group['name']][key] = {}
group_moe[param_group['name']][key]['name'] = key
group_moe[param_group['name']][key]['moe'] = True
for ori_key in param_group.keys():
if ori_key != 'name':
if ori_key == 'params':
group_moe[param_group['name']][key][ori_key] = []
else:
group_moe[param_group['name']][key][ori_key] = param_group[ori_key]
# Assign param
for param_group in param_groups:
new_params = []
for param in param_group['params']:
if is_moe_param(param):
group_moe[param_group['name']][param.group_name]['params'].append(param)
# param_group['params'].remove(param)
else:
new_params.append(param)
param_group['params'] = new_params
# Flatten the moe groups
if max_group_size is not None:
for k, v in group_moe.items():
for k1, v1 in v.items():
cur_group = []
all_groups = []
size_of_cur_group = 0
for param in v1['params']:
if size_of_cur_group + param.numel() <= max_group_size:
cur_group.append(param)
size_of_cur_group += param.numel()
else:
all_groups.append(cur_group)
cur_group = [param]
size_of_cur_group = param.numel()
if cur_group:
all_groups.append(cur_group)
for group in all_groups:
new_dict = {}
for key, val in v1.items():
if key != 'params':
new_dict[key] = val
new_dict['params'] = group
param_groups.append(new_dict)
else:
for k, v in group_moe.items():
for k1, v1 in v.items():
param_groups.append(v1)
return tuple(param_groups)
| 5,214 | 34.719178 | 98 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/moe/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
| 140 | 22.5 | 44 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/moe/experts.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import copy
class Experts(torch.nn.Module):
def __init__(self, expert, num_local_experts=1, expert_group_name=None):
super(Experts, self).__init__()
self.deepspeed_experts = torch.nn.ModuleList([copy.deepcopy(expert) for i in range(num_local_experts)])
self.num_local_experts = num_local_experts
# TODO: revisit allreduce for moe.gate...
for expert in self.deepspeed_experts:
# TODO: Create param groups to handle expert + data case (e.g. param.group = moe_group)
for name, param in expert.named_parameters():
param.allreduce = False
param.group_name = expert_group_name
def forward(self, inputs):
chunks = inputs.chunk(self.num_local_experts, dim=1)
expert_outputs = []
for chunk, expert in zip(chunks, self.deepspeed_experts):
out = expert(chunk)
if type(out) is tuple:
out = out[0] # Ignore the bias term for now
expert_outputs += [out]
expert_output = torch.cat(expert_outputs, dim=1)
return expert_output
| 1,223 | 33 | 111 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/launcher/constants.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
PDSH_LAUNCHER = 'pdsh'
PDSH_MAX_FAN_OUT = 1024
OPENMPI_LAUNCHER = 'openmpi'
MPICH_LAUNCHER = 'mpich'
IMPI_LAUNCHER = 'impi'
SLURM_LAUNCHER = 'slurm'
MVAPICH_LAUNCHER = 'mvapich'
MVAPICH_TMP_HOSTFILE = '/tmp/deepspeed_mvapich_hostfile'
ELASTIC_TRAINING_ID_DEFAULT = "123456789"
| 375 | 21.117647 | 56 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/launcher/launch.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
DeepSpeed launcher, this is similar to torch's distributed.launch but supports
additional features such as arbitrary gpu exclusion.
deepspeed.launcher.launch is intended to be run on a single worker node and
will spawn several worker sub-processes depending on how many devices/ranks
are on the worker.
"""
import sys
import subprocess
import os
import json
import base64
import time
import signal
import psutil
from collections import defaultdict
from typing import Dict
from argparse import ArgumentParser, REMAINDER
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT
from ..nebula.constants import DLTS_POD_ENV_PATH
from ..utils import logger, get_numactl_cmd
from ..elasticity import is_torch_elastic_compatible
from .constants import ELASTIC_TRAINING_ID_DEFAULT
PID_FILE_BASEPATH = "/tmp"
def parse_args():
parser = ArgumentParser(description="DeepSpeed distributed training launch"
" utility that creates multiple distributed"
" processes on a single node")
# Optional arguments for the launch helper
parser.add_argument("--node_rank",
type=int,
default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--master_addr",
default="127.0.0.1",
type=str,
help="Master node (rank 0)'s address, should be either"
" the IP address or the hostname of node 0, for"
" single node multi-proc training, the"
" --master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port",
default=TORCH_DISTRIBUTED_DEFAULT_PORT,
type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communication during distributed "
"training")
parser.add_argument("--world_info", default="None", type=str, help="world info base64 encoded dictionary")
parser.add_argument("--module",
action="store_true",
help="Change each process to interpret the launch "
"script as a Python module, executing with the same "
"behavior as 'python -m'.")
parser.add_argument("--no_python",
action="store_true",
help="Skip prepending the training script with "
"'python' - just execute it directly.")
parser.add_argument("--enable_elastic_training", action="store_true", help="Enable elastic training support.")
parser.add_argument("--min_elastic_nodes", type=int, default=-1, help="Min number of nodes in elastic training.")
parser.add_argument("--max_elastic_nodes", type=int, default=-1, help="Max number of nodes in elastic training.")
parser.add_argument("--no_local_rank",
action="store_true",
help="Do not pass local_rank as an argument when calling "
"the user's training script.")
parser.add_argument("--save_pid",
type=int,
default=0,
help="main launching process pid, for internal pid tracking")
parser.add_argument("--enable_each_rank_log",
default="None",
type=str,
help="redirect the stdout and stderr from each rank into different log files")
parser.add_argument("--bind_cores_to_rank",
action="store_true",
help="Bind each rank to different cores of the host. "
"This improves host efficiency especially for CPU backend")
parser.add_argument("--bind_core_list",
type=str,
default=None,
help="List of cores to bind to with comma separated list of "
"numbers and range. i.e. 1,3-5,7 => [1,3,4,5,7]. When not "
"specified, all cores on system would be used rank binding")
# positional
parser.add_argument("training_script",
type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
# Adapted from https://psutil.readthedocs.io/en/latest/#kill-process-tree
def terminate_process_tree(pid):
process = psutil.Process(pid)
children = process.children(recursive=True)
children.append(process)
for child in children:
try:
child.terminate()
except psutil.NoSuchProcess:
pass
gone, alive = psutil.wait_procs(children, timeout=30)
for p in alive:
p.kill()
def main():
args = parse_args()
current_env = os.environ.copy()
for k in current_env.keys():
if "NCCL" in k:
logger.info(f"{args.node_rank} {k}={current_env[k]}")
if args.world_info == "None":
raise ValueError("world_info can not be None")
world_info = base64.urlsafe_b64decode(args.world_info)
world_info = json.loads(world_info)
logger.info(f"WORLD INFO DICT: {world_info}")
node_list = list(world_info.keys())
args.nnodes = len(node_list)
local_node = node_list[args.node_rank]
local_gpu_ids = world_info[local_node]
num_local_procs = len(local_gpu_ids)
logger.info(f"nnodes={args.nnodes}, num_local_procs={num_local_procs}, node_rank={args.node_rank}")
global_rank_mapping = defaultdict(list)
curr_global_rank = 0
dist_world_size = 0
for node_id in node_list:
gids = world_info[node_id]
dist_world_size += len(gids)
for gid in gids:
global_rank_mapping[node_id].append(curr_global_rank)
curr_global_rank += 1
logger.info(f"global_rank_mapping={global_rank_mapping}")
logger.info(f"dist_world_size={dist_world_size}")
current_env["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, local_gpu_ids))
logger.info(f"Setting CUDA_VISIBLE_DEVICES={current_env['CUDA_VISIBLE_DEVICES']}")
# set PyTorch distributed related environmental variables
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
current_env["CROSS_RANK"] = str(args.node_rank)
current_env["CROSS_SIZE"] = str(args.nnodes)
current_env["LOCAL_SIZE"] = str(num_local_procs)
if args.save_pid:
print(f"launcher pid: {os.getpid()}")
pid_file = None
if args.save_pid:
launcher_pid = os.getpid()
pid_file = os.path.join(PID_FILE_BASEPATH, f"{args.save_pid}.deepspeed")
assert not os.path.isfile(pid_file), "pid file exists but shouldn't"
with open(pid_file, 'w') as fd:
fd.write(f"{launcher_pid}")
if not is_torch_elastic_compatible():
if args.enable_elastic_training:
logger.info(f"Disabling elastic training support as \
PyTorch version should be greater than 1.11.x")
args.enable_elastic_training = False
if os.path.exists(DLTS_POD_ENV_PATH):
with open(DLTS_POD_ENV_PATH) as file:
lines = file.readlines()
lines = [line.rstrip() for line in lines]
for line in lines:
if line.startswith('export FC_TASKROLE_NAME') or line.startswith('export FC_TASK_INDEX'):
key_val = line.split()[1]
key, val = key_val.split('=')
current_env[key] = val
processes = []
cmd = []
if not args.enable_elastic_training:
if args.enable_each_rank_log != "None":
# prepare the log path and the file name prefix
if os.path.isfile(args.enable_each_rank_log):
raise ValueError(f"{args.enable_each_rank_log} should not be a file, it should be a directory.")
if not os.path.exists(args.enable_each_rank_log):
try:
os.makedirs(args.enable_each_rank_log)
except Exception as e:
print(e)
raise ValueError(f"unable to create directory {args.enable_each_rank_log} for each rank log.")
log_name_prefix = time.strftime("%Y%m%d%H%M%S", time.localtime())
for local_proc in range(0, num_local_procs):
# each process's rank
dist_rank = global_rank_mapping[local_node][local_proc]
local_rank = dist_rank % num_local_procs
current_env["RANK"] = str(dist_rank)
current_env["LOCAL_RANK"] = str(local_rank)
# spawn the processes
cmd = []
if args.bind_cores_to_rank:
cores_per_rank, numactl_cmd = get_numactl_cmd(args.bind_core_list, num_local_procs, local_rank)
current_env["OMP_NUM_THREADS"] = f"{cores_per_rank}"
cmd = cmd + numactl_cmd
if not args.no_python:
cmd.append(sys.executable)
cmd.append("-u")
if args.module:
cmd.append("-m")
else:
if args.module:
raise ValueError("Don't use both the '--no_python' flag"
" and the '--module' flag at the same time.")
cmd.append(args.training_script)
# A user may not want to pass local_rank as a keyword arg so we make this optional.
if not args.no_local_rank:
cmd.append(f"--local_rank={local_rank}")
cmd += args.training_script_args
if args.enable_each_rank_log != "None":
log_file = os.path.join(args.enable_each_rank_log, f"{log_name_prefix}_rank{dist_rank}.log")
log_fd = open(log_file, 'w')
process = subprocess.Popen(cmd, env=current_env, stdout=log_fd, stderr=log_fd)
else:
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
else:
from ..elasticity import DSElasticAgent
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.agent.server.api import WorkerSpec
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
from torch.distributed.elastic.multiprocessing import Std
if args.min_elastic_nodes == -1:
args.min_elastic_nodes = 1
if args.max_elastic_nodes == -1:
args.max_elastic_nodes = args.nnodes
assert args.max_elastic_nodes > 0 and args.min_elastic_nodes > 0, "Max and Min nodes should be positive"
current_env["NCCL_ASYNC_ERROR_HANDLING"] = str(1)
# Get config and arguments
cmd = []
if not args.no_python:
cmd = [sys.executable, "-u"]
if args.module:
cmd.append("-m")
else:
if args.module:
raise ValueError("Don't use both the '--no_python' flag"
" and the '--module' flag at the same time.")
cmd.append(args.training_script)
cmd += args.training_script_args
cmd_args = cmd[1:]
rdzv_configs: Dict[str, str] = {'timeout': 100}
run_id = os.environ.get("ELASTIC_RUN_ID", ELASTIC_TRAINING_ID_DEFAULT)
# Creating config for rendezvous class
rdzv_parameters = RendezvousParameters(backend='c10d',
endpoint=args.master_addr + ":" + str(args.master_port),
run_id=run_id,
min_nodes=args.min_elastic_nodes,
max_nodes=args.max_elastic_nodes,
**rdzv_configs)
spec = WorkerSpec(
role='trainer',
local_world_size=num_local_procs,
entrypoint=cmd[0],
args=cmd[1:],
rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters),
max_restarts=100,
monitor_interval=5,
redirects=Std.from_str("0"),
tee=Std.from_str("0"),
master_addr=None,
master_port=None,
)
agent = DSElasticAgent(spec, current_env)
agent.run()
sig_names = {2: "SIGINT", 15: "SIGTERM"}
last_return_code = None
def sigkill_handler(signum, frame):
for process in processes:
logger.info(f"Killing subprocess {process.pid}")
try:
terminate_process_tree(process.pid)
except Exception:
pass
if last_return_code is not None:
logger.error(f"{cmd} exits with return code = {last_return_code}")
sys.exit(last_return_code)
if signum in sig_names:
logger.info(f"Main process received {sig_names[signum]}, exiting")
if args.save_pid:
if os.path.isfile(pid_file):
os.remove(pid_file)
sys.exit(1)
# pass SIGINT/SIGTERM to children if the parent is being terminated
signal.signal(signal.SIGINT, sigkill_handler)
signal.signal(signal.SIGTERM, sigkill_handler)
alive_processes = set(processes)
while len(alive_processes):
finished_processes = []
for process in alive_processes:
if process.poll() is None:
# the process is still running
continue
else:
if process.returncode != 0:
last_return_code = process.returncode # for sigkill_handler
sigkill_handler(signal.SIGTERM, None) # not coming back
else:
# exited cleanly
logger.info(f"Process {process.pid} exits successfully.")
finished_processes.append(process)
alive_processes = set(alive_processes) - set(finished_processes)
time.sleep(1)
if __name__ == "__main__":
main()
| 14,654 | 40.16573 | 117 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/launcher/runner.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
DeepSpeed runner is the main front-end to launching multi-worker
training jobs with DeepSpeed. By default this uses pdsh to parallel
ssh into multiple worker nodes and launch all the necessary processes
per rank for training.
"""
import os
import re
import sys
import json
import base64
import argparse
import subprocess
import collections
from copy import deepcopy
import signal
import time
from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner, SlurmRunner, MPICHRunner, IMPIRunner
from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER, SLURM_LAUNCHER, MPICH_LAUNCHER, IMPI_LAUNCHER
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT
from ..nebula.constants import NEBULA_EXPORT_ENVS
from ..utils import logger
from ..autotuning import Autotuner
from deepspeed.accelerator import get_accelerator
DLTS_HOSTFILE = "/job/hostfile"
EXPORT_ENVS = ['MLFLOW', 'NCCL', 'PYTHON', 'MV2', 'UCX']
EXPORT_ENVS += NEBULA_EXPORT_ENVS
DEEPSPEED_ENVIRONMENT_NAME = ".deepspeed_env"
DEEPSPEED_ENVIRONMENT_PATHS = [os.path.expanduser("~"), '.']
PDSH_MAX_FAN_OUT = 1024
# On AISC compute, each node sets environment variables independently, want to prevent
# exporting rank-0 env variables in case of heterogeneous compute.
EXCLUDE_ENVS = {'AISC_JOB_NAME': ['NCCL_IB_HCA', 'UCX_NET_DEVICES']}
def parse_args(args=None):
parser = argparse.ArgumentParser(description="DeepSpeed runner to help launch distributed "
"multi-node/multi-gpu training jobs.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-H",
"--hostfile",
type=str,
default=DLTS_HOSTFILE,
help="Hostfile path (in MPI style) that defines the "
"resource pool available to the job (e.g., "
"worker-0 slots=4)")
parser.add_argument("-i",
"--include",
type=str,
default="",
help='''Specify hardware resources to use during execution.
String format is
NODE_SPEC[@NODE_SPEC ...],
where
NODE_SPEC=NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include all slots on that host.
Example: -i "worker-0@worker-1:0,2" will use all slots
on worker-0 and slots [0, 2] on worker-1.
''')
parser.add_argument("-e",
"--exclude",
type=str,
default="",
help='''Specify hardware resources to NOT use during execution.
Mutually exclusive with --include. Resource formatting
is the same as --include.
Example: -e "worker-1:0" will use all available
resources except slot 0 on worker-1.
''')
parser.add_argument("--num_nodes",
type=int,
default=-1,
help="Total number of worker nodes to run on, this will use "
"the top N hosts from the given hostfile.")
parser.add_argument("--min_elastic_nodes",
type=int,
default=-1,
help="Minimum number of nodes to run elastic training on. "
"Default is 1 when elastic training is enabled")
parser.add_argument("--max_elastic_nodes",
type=int,
default=-1,
help="Maximum number of nodes to run elastic training on. "
"Default is num_nodes when elastic training is enabled")
parser.add_argument("--num_gpus",
"--num_accelerators",
type=int,
default=-1,
help="Max number of GPUs to use on each node, will use "
"[0:N) GPU ids on each node.")
parser.add_argument("--master_port",
default=TORCH_DISTRIBUTED_DEFAULT_PORT,
type=int,
help="(optional) Port used by PyTorch distributed for "
"communication during training.")
parser.add_argument("--master_addr",
default="",
type=str,
help="(optional) IP address of node 0, will be "
"inferred via 'hostname -I' if not specified.")
parser.add_argument("--launcher",
default=PDSH_LAUNCHER,
type=str,
help="(optional) choose launcher backend for multi-node "
"training. Options currently include PDSH, OpenMPI, MVAPICH, SLURM, MPICH, IMPI.")
parser.add_argument("--launcher_args",
default="",
type=str,
help="(optional) pass launcher specific arguments as a "
"single quoted argument.")
parser.add_argument("--module",
action="store_true",
help="Change each process to interpret the launch "
"script as a Python module, executing with the same "
"behavior as 'python -m'.")
parser.add_argument("--no_python",
action="store_true",
help="Skip prepending the training script with "
"'python' - just execute it directly.")
parser.add_argument("--no_local_rank",
action="store_true",
help="Do not pass local_rank as an argument when calling "
"the user's training script.")
parser.add_argument("--no_ssh_check",
action="store_true",
help="Do not perform ssh check in multi-node launcher model")
parser.add_argument("--force_multi",
action="store_true",
help="Force multi-node launcher mode, helps in cases where user "
"wants to launch on single remote node.")
parser.add_argument("--save_pid",
action="store_true",
help="Save file containing launcher process id (pid) at /tmp/<main-pid>.ds, "
"where <main-pid> is the pid of the first process that invoked `deepspeed`. "
"Useful when launching deepspeed processes programmatically.")
parser.add_argument("--enable_each_rank_log",
default="None",
type=str,
help="redirect the stdout and stderr from each rank into different log files")
parser.add_argument("--autotuning",
default="",
choices=["tune", "run"],
type=str,
help="Run DeepSpeed autotuner to discover optimal configuration parameters "
"before running job.")
parser.add_argument("--elastic_training",
action="store_true",
help="Enable elastic training support in DeepSpeed.")
parser.add_argument("user_script", type=str, help="User script to launch, followed by any required "
"arguments.")
parser.add_argument('user_args', nargs=argparse.REMAINDER)
parser.add_argument("--bind_cores_to_rank",
action="store_true",
help="Bind each rank to different cores of the host")
parser.add_argument("--bind_core_list",
type=str,
default=None,
help="List of cores to bind to with comma separated list of "
"numbers and range. i.e. 1,3-5,7 => [1,3,4,5,7]. When not "
"specified, all cores on system would be used rank binding")
return parser.parse_args(args=args)
def fetch_hostfile(hostfile_path):
if not os.path.isfile(hostfile_path):
logger.warning("Unable to find hostfile, will proceed with training "
"with local resources only.")
return None
# e.g., worker-0 slots=16
with open(hostfile_path, 'r') as fd:
hostfile_text = fd.readlines()
return _parse_hostfile(hostfile_text)
def _parse_hostfile(hostfile_lines):
# Regex matches one or more non-whitespace characters (\S+) at the start of
# the line, followed by one or more whitespace characters (\s+), followed
# by the string "slots=", followed by one or more digits (\d+).
pattern = r'^(\S+)\s+slots=(\d+)'
resource_pool = collections.OrderedDict()
for line in hostfile_lines:
line = line.strip()
match = re.search(pattern, line)
if line.startswith("#") or line == "":
# hostfile comment or empty line, ignore
continue
elif match:
host = match.group(1)
num_slots = int(match.group(2))
if host in resource_pool:
logger.error(f"Bad hostfile text: {hostfile_lines}")
raise ValueError(f"Hostfile contains multiple entries for {host}, unable to proceed with launching")
resource_pool[host] = num_slots
else:
logger.error(f"Bad hostfile text: {hostfile_lines}")
raise ValueError(f"Hostfile contains a bad entry: {line}, unable to proceed with launching")
if len(resource_pool) == 0:
logger.error(f"Bad hostfile text: {hostfile_lines}")
raise ValueError("Hostfile is empty or not formatted correctly, unable to proceed with launching.")
return resource_pool
def _stable_remove_duplicates(data):
# Create a new list in the same order as original but with duplicates
# removed, should never be more than ~16 elements so simple is best
new_list = []
for x in data:
if x not in new_list:
new_list.append(x)
return new_list
def parse_resource_filter(host_info, include_str="", exclude_str=""):
'''Parse an inclusion or exclusion string and filter a hostfile dictionary.
String format is NODE_SPEC[@NODE_SPEC ...], where
NODE_SPEC = NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include/exclude all slots on that host.
Examples:
include_str="worker-0@worker-1:0,2" will use all slots on worker-0 and
slots [0, 2] on worker-1.
exclude_str="worker-1:0" will use all available resources except
slot 0 on worker-1.
'''
# Constants that define our syntax
NODE_SEP = '@'
SLOT_LIST_START = ':'
SLOT_SEP = ','
# Ensure include/exclude are mutually exclusive
if (include_str != "") and (exclude_str != ""):
raise ValueError('include_str and exclude_str are mutually exclusive.')
# no-op
if (include_str == "") and (exclude_str == ""):
return host_info
# Either build from scratch or remove items
filtered_hosts = dict()
if include_str:
parse_str = include_str
if exclude_str != "":
filtered_hosts = deepcopy(host_info)
parse_str = exclude_str
# foreach node in the list
for node_config in parse_str.split(NODE_SEP):
# Node can either be alone or node:slot,slot,slot
if SLOT_LIST_START in node_config:
hostname, slots = node_config.split(SLOT_LIST_START)
slots = [int(x) for x in slots.split(SLOT_SEP)]
# sanity checks
if hostname not in host_info:
raise ValueError(f"Hostname '{hostname}' not found in hostfile")
for slot in slots:
if slot not in host_info[hostname]:
raise ValueError(f"No slot '{slot}' specified on host '{hostname}'")
# If include string, build the list from here
if include_str:
filtered_hosts[hostname] = slots
elif exclude_str:
for slot in slots:
logger.info(f'removing {slot} from {hostname}')
filtered_hosts[hostname].remove(slot)
# User just specified the whole node
else:
hostname = node_config
# sanity check hostname
if hostname not in host_info:
raise ValueError(f"Hostname '{hostname}' not found in hostfile")
if include_str:
filtered_hosts[hostname] = host_info[hostname]
elif exclude_str:
filtered_hosts[hostname] = []
# Post-processing to remove duplicates and empty nodes
del_keys = []
for hostname in filtered_hosts:
# Remove duplicates
filtered_hosts[hostname] = _stable_remove_duplicates(filtered_hosts[hostname])
# Remove empty hosts
if len(filtered_hosts[hostname]) == 0:
del_keys.append(hostname)
for name in del_keys:
del filtered_hosts[name]
# Lastly, go over filtered_hosts and convert to a OrderedDict() to ensure
# we map ranks to nodes correctly by maintaining host_info ordering.
ordered_hosts = collections.OrderedDict()
for host in host_info:
if host in filtered_hosts:
ordered_hosts[host] = filtered_hosts[host]
return ordered_hosts
def parse_inclusion_exclusion(resource_pool, inclusion, exclusion):
active_resources = collections.OrderedDict()
for hostname, slots in resource_pool.items():
active_resources[hostname] = list(range(slots))
return parse_resource_filter(active_resources, include_str=inclusion, exclude_str=exclusion)
def encode_world_info(world_info):
world_info_json = json.dumps(world_info).encode('utf-8')
world_info_base64 = base64.urlsafe_b64encode(world_info_json).decode('utf-8')
return world_info_base64
def run_autotuning(args, active_resources):
tuner = Autotuner(args, active_resources)
logger.info("[Start] Running autotuning")
tuner.tune()
tuner.print_tuning_results()
logger.info("[End] Running autotuning")
tuner.write_optimal_config()
if args.autotuning == "run":
tuner.run_after_tuning()
def parse_num_nodes(str_num_nodes: str, elastic_training: bool):
node_list = str_num_nodes.split(":")
if len(node_list) == 1:
min_nodes, max_nodes = int(node_list[0]), -1
elif len(node_list) == 2 and elastic_training:
min_nodes, max_nodes = int(node_list[0]), int(node_list[1])
elif len(node_list) == 2 and not elastic_training:
raise RuntimeError("MIN:MAX format is only supported in elastic training")
else:
raise RuntimeError("num_nodes {} is not in MIN:MAX format".format(str_num_nodes))
return min_nodes, max_nodes
def main(args=None):
args = parse_args(args)
if args.elastic_training:
assert args.master_addr != "", "Master Addr is required when elastic training is enabled"
resource_pool = fetch_hostfile(args.hostfile)
# respect CUDA_VISIBLE_DEVICES for a single node and no explicit resource filters
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "")
if not resource_pool and len(cuda_visible_devices):
detected_str = f"Detected CUDA_VISIBLE_DEVICES={cuda_visible_devices}"
if len(args.include) or len(args.exclude) or args.num_nodes > 1 or args.num_gpus > 0:
print(
f"{detected_str} but ignoring it because one or several of --include/--exclude/--num_gpus/--num_nodes cl args were used. If you want to use CUDA_VISIBLE_DEVICES don't pass any of these arguments to deepspeed."
)
else:
args.include = f"localhost:{cuda_visible_devices}"
print(f"{detected_str}: setting --include={args.include}")
del os.environ["CUDA_VISIBLE_DEVICES"]
if args.num_nodes >= 0 or args.num_gpus >= 0:
if args.include != "" or args.exclude != "":
raise ValueError("Cannot specify num_nodes/gpus with include/exclude")
multi_node_exec = True
if not resource_pool:
resource_pool = {}
device_count = get_accelerator().device_count()
if device_count == 0:
raise RuntimeError("Unable to proceed, no GPU resources available")
resource_pool['localhost'] = device_count
args.master_addr = "127.0.0.1"
multi_node_exec = False
if not multi_node_exec and args.num_nodes > 1:
raise ValueError("Num nodes is >1 but no extra nodes available via hostfile")
active_resources = parse_inclusion_exclusion(resource_pool, args.include, args.exclude)
env = os.environ.copy()
# validate that passwordless-ssh is workly properly with this hostfile
if multi_node_exec and not args.no_ssh_check:
first_host = list(active_resources.keys())[0]
try:
subprocess.check_call(f'ssh -o PasswordAuthentication=no {first_host} hostname',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True)
except subprocess.CalledProcessError:
raise RuntimeError(
f"Using hostfile at {args.hostfile} but host={first_host} was not reachable via ssh. If you are running with a single node please remove {args.hostfile} or setup passwordless ssh."
)
if not args.master_addr:
assert multi_node_exec
first_host = list(active_resources.keys())[0]
hostname_cmd = [f"ssh {first_host} hostname -I"]
try:
result = subprocess.check_output(hostname_cmd, shell=True)
except subprocess.CalledProcessError as err:
logger.error(
"Unable to detect suitable master address via `hostname -I`, please manually specify one via --master_addr"
)
raise err
args.master_addr = result.decode('utf-8').split()[0]
if not args.master_addr:
raise RuntimeError(
f"Unable to detect suitable master address via `hostname -I`, please manually specify one via --master_addr"
)
logger.info(f"Using IP address of {args.master_addr} for node {first_host}")
if args.autotuning != "":
run_autotuning(args, active_resources)
return
if args.num_nodes > 0:
updated_active_resources = collections.OrderedDict()
for count, hostname in enumerate(active_resources.keys()):
if args.num_nodes == count:
break
updated_active_resources[hostname] = active_resources[hostname]
active_resources = updated_active_resources
if args.num_gpus > 0:
updated_active_resources = collections.OrderedDict()
for hostname in active_resources.keys():
updated_active_resources[hostname] = list(range(args.num_gpus))
active_resources = updated_active_resources
if args.elastic_training:
assert not args.no_local_rank, "--no_local_rank argument is not supported in Elastic training"
# encode world info as base64 to make it easier to pass via command line
world_info_base64 = encode_world_info(active_resources)
multi_node_exec = args.force_multi or len(active_resources) > 1
if not multi_node_exec:
deepspeed_launch = [
sys.executable, "-u", "-m", "deepspeed.launcher.launch", f"--world_info={world_info_base64}",
f"--master_addr={args.master_addr}", f"--master_port={args.master_port}"
]
if args.no_python:
deepspeed_launch.append("--no_python")
if args.module:
deepspeed_launch.append("--module")
if args.no_local_rank:
deepspeed_launch.append("--no_local_rank")
if args.save_pid:
deepspeed_launch += ["--save_pid", f"{os.getpid()}"]
if args.enable_each_rank_log:
deepspeed_launch.append(f"--enable_each_rank_log={args.enable_each_rank_log}")
if args.elastic_training:
deepspeed_launch.append("--enable_elastic_training")
deepspeed_launch.append(f"--max_elastic_nodes={args.max_elastic_nodes}")
deepspeed_launch.append(f"--min_elastic_nodes={args.min_elastic_nodes}")
if args.bind_cores_to_rank:
deepspeed_launch.append("--bind_cores_to_rank")
if args.bind_core_list is not None:
deepspeed_launch.append(f"--bind_core_list={args.bind_core_list}")
cmd = deepspeed_launch + [args.user_script] + args.user_args
else:
args.launcher = args.launcher.lower()
if args.launcher == PDSH_LAUNCHER:
runner = PDSHRunner(args, world_info_base64)
elif args.launcher == OPENMPI_LAUNCHER:
runner = OpenMPIRunner(args, world_info_base64, resource_pool)
elif args.launcher == MPICH_LAUNCHER:
runner = MPICHRunner(args, world_info_base64, resource_pool)
elif args.launcher == IMPI_LAUNCHER:
runner = IMPIRunner(args, world_info_base64, resource_pool)
elif args.launcher == MVAPICH_LAUNCHER:
runner = MVAPICHRunner(args, world_info_base64, resource_pool)
elif args.launcher == SLURM_LAUNCHER:
runner = SlurmRunner(args, world_info_base64, resource_pool)
else:
raise NotImplementedError(f"Unknown launcher {args.launcher}")
if not runner.backend_exists():
raise RuntimeError(f"launcher '{args.launcher}' not installed.")
curr_path = os.path.abspath('.')
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = curr_path + ":" + env['PYTHONPATH']
else:
env['PYTHONPATH'] = curr_path
excluded_vars = []
for exclude_key, var_list in EXCLUDE_ENVS.items():
if exclude_key in env.keys():
# key exists in launcher env -> var list should be used
excluded_vars += var_list
exports = ""
for var in env.keys():
if any([var.startswith(name) for name in EXPORT_ENVS]):
if not any([var == name for name in excluded_vars]):
runner.add_export(var, env[var])
for environ_path in DEEPSPEED_ENVIRONMENT_PATHS:
environ_file = os.path.join(environ_path, DEEPSPEED_ENVIRONMENT_NAME)
if os.path.isfile(environ_file):
with open(environ_file, 'r') as fd:
for var in fd.readlines():
key, val = var.split('=', maxsplit=1)
runner.add_export(key, val)
if args.launcher == PDSH_LAUNCHER:
cmd, kill_cmd = runner.get_cmd(env, active_resources)
else:
cmd = runner.get_cmd(env, active_resources)
logger.info(f"cmd = {' '.join(cmd)}")
result = subprocess.Popen(cmd, env=env)
def sigkill_handler(signum, frame):
result.send_signal(signal.SIGINT)
time.sleep(0.1)
result.send_signal(signal.SIGTERM)
result_kill = subprocess.Popen(kill_cmd, env=env)
result_kill.wait()
time.sleep(1)
sys.exit(1)
if args.launcher == PDSH_LAUNCHER and multi_node_exec:
signal.signal(signal.SIGINT, sigkill_handler)
result.wait()
# In case of failure must propagate the error-condition back to the caller (usually shell). The
# actual error and traceback should have been printed in the subprocess, so in order to avoid
# unnecessary noise we just quietly exit here with the same code as the subprocess
if result.returncode > 0:
sys.exit(result.returncode)
if __name__ == "__main__":
main()
| 24,150 | 40.567986 | 225 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/launcher/multinode_runner.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import sys
import shutil
import subprocess
import warnings
from shlex import split
from abc import ABC, abstractmethod
from deepspeed.accelerator import get_accelerator
from ..utils import logger, get_numactl_cmd
from .constants import PDSH_MAX_FAN_OUT, MVAPICH_TMP_HOSTFILE
class MultiNodeRunner(ABC):
def __init__(self, args, world_info_base64):
self.args = args
self.validate_args()
self.user_arguments = self.parse_user_args()
self.user_script = args.user_script
self.world_info_base64 = world_info_base64
self.exports = {}
@abstractmethod
def backend_exists(self):
"""Return whether the corresponding backend exists"""
@abstractmethod
def get_cmd(self, environment, active_resources):
"""Return the command to execute on node"""
def add_export(self, key, var):
self.exports[key.strip()] = var.strip()
def parse_user_args(self):
return self.args.user_args
@property
def name(self):
"""Return the name of the backend"""
return self.__class__.__name__
def validate_args(self):
"""Validate self.args"""
class PDSHRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64):
super().__init__(args, world_info_base64)
def backend_exists(self):
return shutil.which('pdsh')
@property
def name(self):
return "pdsh"
def parse_user_args(self):
return list(map(lambda x: x if x.startswith("-") else f"'{x}'", self.args.user_args))
def get_cmd(self, environment, active_resources):
environment['PDSH_RCMD_TYPE'] = 'ssh'
active_workers = ",".join(active_resources.keys())
logger.info("Running on the following workers: %s" % active_workers)
# PDSH flags for max node fan out and specific hosts to launch on
# See https://linux.die.net/man/1/pdsh for flag details
pdsh_cmd_args = ['pdsh', '-S', '-f', str(PDSH_MAX_FAN_OUT), '-w', active_workers] + split(
self.args.launcher_args)
exports = ""
for key, val in self.exports.items():
exports += "export {}={}; ".format(key, val)
# https://linux.die.net/man/1/pdsh
# %n will be replaced by pdsh command
deepspeed_launch = [
exports, f"cd {os.path.abspath('.')};", sys.executable, "-u", "-m", "deepspeed.launcher.launch",
f'--world_info={self.world_info_base64}', "--node_rank=%n", f"--master_addr={self.args.master_addr}",
f"--master_port={self.args.master_port}"
]
if self.args.no_python:
deepspeed_launch.append("--no_python")
if self.args.module:
deepspeed_launch.append("--module")
if self.args.no_local_rank:
deepspeed_launch.append("--no_local_rank")
if self.args.save_pid:
deepspeed_launch += ["--save_pid", f"{os.getpid()}"]
if self.args.elastic_training:
deepspeed_launch.append("--enable_elastic_training")
deepspeed_launch.append(f"--max_elastic_nodes={self.args.max_elastic_nodes}")
deepspeed_launch.append(f"--min_elastic_nodes={self.args.min_elastic_nodes}")
cmd_to_search = [i + "\\" for i in deepspeed_launch[2:6]]
kill_command = pdsh_cmd_args + ["pkill -f ", " ".join(cmd_to_search)[:-2]]
return pdsh_cmd_args + deepspeed_launch + [self.user_script] + self.user_arguments, kill_command
class OpenMPIRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
self.add_export('UCX_TLS', 'tcp')
def backend_exists(self):
#TODO: if IB is available we should suggestion mvapich
return shutil.which('ompi_info')
@property
def name(self):
return "openmpi"
def validate_args(self):
super().validate_args()
#TODO: Allow for include/exclude at node-level but not gpu-level
if self.args.include != "" or self.args.exclude != "":
raise ValueError(f"{self.name} backend does not support worker include/exclusion")
if self.args.num_nodes != -1 or self.args.num_gpus != -1:
raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus")
def get_cmd(self, environment, active_resources):
total_process_count = sum(self.resource_pool.values())
mpirun_cmd = [
'mpirun',
'-n',
f'{total_process_count}',
'-hostfile',
f'{self.args.hostfile}',
'--mca',
'btl',
'^openib',
'--mca',
'btl_tcp_if_include',
'eth0',
] + split(self.args.launcher_args)
export_cmd = []
for k, v in self.exports.items():
export_cmd += ['-x', "{}={}".format(k, v)]
python_exec = []
if not self.args.no_python:
python_exec = [sys.executable, "-u"]
if self.args.module:
python_exec.append("-m")
return mpirun_cmd + export_cmd + python_exec + [self.user_script] + self.user_arguments
class MPICHRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
def backend_exists(self):
#TODO: if IB is available we should suggestion mpich
return shutil.which('mpirun') #mpich_info
@property
def name(self):
return "mpich"
def validate_args(self):
super().validate_args()
#TODO: Allow for include/exclude at node-level but not gpu-level
if self.args.include != "" or self.args.exclude != "":
raise ValueError(f"{self.name} backend does not support worker include/exclusion")
if self.args.num_nodes != -1 or self.args.num_gpus != -1:
raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus")
def get_cmd(self, environment, active_resources):
devices_per_node = self.resource_pool.values()
total_process_count = sum(devices_per_node)
process_per_node = list(devices_per_node)[0]
if not all([n == process_per_node for n in devices_per_node]):
raise ValueError("MPICH requires same number of devices per node")
mpirun_cmd = [
'mpirun',
] + split(self.args.launcher_args)
export_cmd = []
for k, v in self.exports.items():
export_cmd += ['-genv', "{}={}".format(k, v)]
export_cmd += ['-genv', 'MASTER_ADDR', str(self.args.master_addr)]
export_cmd += ['-genv', 'MASTER_PORT', str(self.args.master_port)]
export_cmd += ['-genv', 'WORLD_SIZE', str(total_process_count)]
export_cmd += ['-genv', 'LOCAL_SIZE', str(process_per_node)]
hosts = list(self.resource_pool.keys())
per_host_cmd = []
host_id = 0
host_count = 0
for i in range(total_process_count):
local_rank = i % process_per_node
python_exec = []
if not self.args.no_python:
python_exec += [sys.executable, "-u"]
if self.args.module:
python_exec.append("-m")
env_mapping = ['-env', 'RANK', str(i)]
env_mapping += ['-env', 'LOCAL_RANK', str(local_rank)]
if i == 0:
per_host_cmd = ['-n', '1', '-host', hosts[host_id]
] + env_mapping + python_exec + [self.user_script] + self.user_arguments
else:
per_host_cmd = per_host_cmd + [':', '-n', '1', '-host', hosts[host_id]
] + env_mapping + python_exec + [self.user_script] + self.user_arguments
host_count = host_count + 1
if host_count == process_per_node:
host_id = host_id + 1
host_count = 0
return mpirun_cmd + export_cmd + per_host_cmd
class IMPIRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
def backend_exists(self):
#TODO: if IB is available we should suggestion mpich
return shutil.which('mpirun') #mpich_info
@property
def name(self):
return "impi"
def validate_args(self):
super().validate_args()
#TODO: Allow for include/exclude at node-level but not gpu-level
if self.args.include != "" or self.args.exclude != "":
raise ValueError(f"{self.name} backend does not support worker include/exclusion")
if self.args.num_nodes != -1 or self.args.num_gpus != -1:
raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus")
def get_cmd(self, environment, active_resources):
devices_per_node = self.resource_pool.values()
total_process_count = sum(devices_per_node)
process_per_node = list(devices_per_node)[0]
if not all([n == process_per_node for n in devices_per_node]):
raise ValueError("Intel MPI requires same number of devices per node")
mpirun_cmd = [
'mpirun',
'-ppn',
f'{process_per_node}',
] + split(self.args.launcher_args)
export_cmd = []
for k, v in self.exports.items():
export_cmd += ['-genv', f'{k}', f'{v}']
if self.args.bind_cores_to_rank:
cores_per_rank, _ = get_numactl_cmd(self.args.bind_core_list, process_per_node, 0)
export_cmd += ['-genv', 'OMP_NUM_THREADS', str(cores_per_rank)]
export_cmd += ['-genv', 'MASTER_ADDR', str(self.args.master_addr)]
export_cmd += ['-genv', 'MASTER_PORT', str(self.args.master_port)]
export_cmd += ['-genv', 'WORLD_SIZE', str(total_process_count)]
export_cmd += ['-genv', 'LOCAL_SIZE', str(process_per_node)]
export_cmd += ['-hosts']
hosts = ""
for i, host in enumerate(self.resource_pool.keys()):
if i == 0:
hosts = f"{host}"
else:
hosts += f",{host}"
export_cmd += [hosts]
per_host_cmd = []
for i in range(total_process_count):
local_rank = i % process_per_node
python_exec = []
if self.args.bind_cores_to_rank:
_, numactl_cmd = get_numactl_cmd(self.args.bind_core_list, process_per_node, local_rank)
python_exec += numactl_cmd
if not self.args.no_python:
python_exec += [sys.executable, "-u"]
if self.args.module:
python_exec.append("-m")
env_mapping = ['-env', 'RANK', str(i)]
env_mapping += ['-env', 'LOCAL_RANK', str(local_rank)]
if i == 0:
per_host_cmd = ['-n', '1'] + env_mapping + python_exec + [self.user_script] + self.user_arguments
else:
per_host_cmd = per_host_cmd + [':', '-n', '1'] + env_mapping + python_exec + [self.user_script
] + self.user_arguments
print(mpirun_cmd + export_cmd + per_host_cmd)
return mpirun_cmd + export_cmd + per_host_cmd
class SlurmRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
def backend_exists(self):
return shutil.which('sinfo')
@property
def name(self):
return 'slurm'
def get_cmd(self, environment, active_resources):
assert not getattr(self.args, 'detect_nvlink_pairs',
False), "slurm backend does not support remapping visible devices"
total_process_count = sum(self.resource_pool.values())
srun_cmd = [
'srun',
'-n',
f'{total_process_count}',
] + split(self.args.launcher_args)
if getattr(self.args, 'slurm_comment', ''):
srun_cmd += ['--comment', self.args.slurm_comment]
if self.args.include != "":
srun_cmd.append('--include')
srun_cmd.append(f'{self.args.include}')
if self.args.exclude != "":
srun_cmd.append('--exclude')
srun_cmd.append(f'{self.args.exclude}')
if self.args.num_nodes > 0:
srun_cmd.append('--nodes')
srun_cmd.append(f'{self.args.num_nodes}')
if self.args.num_gpus > 0:
srun_cmd.append('--gpus')
srun_cmd.append(f'{self.args.num_gpus}')
exports = '--export=ALL'
for key, val in self.exports.items():
exports += f",{key}={val}"
python_exec = [sys.executable, "-u"]
command = srun_cmd + [exports] + python_exec + [self.user_script] + self.user_arguments
return command
class MVAPICHRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
# Disable the CMA kernel module, not available on Ubuntu systems
self.add_export('MV2_SMP_USE_CMA', '0')
# If we fail this will output more verbose logging
self.add_export('MV2_DEBUG_SHOW_BACKTRACE', '1')
# Enabled cuda-aware communication
if get_accelerator().device_name() == 'cuda':
self.add_export('MV2_USE_CUDA', '1')
# Support deep learning frameworks: http://hidl.cse.ohio-state.edu/userguide/horovod/
self.add_export('MV2_SUPPORT_DL', '1')
# Support MPI_THREAD_MULTIPLE
self.add_export('MV2_ENABLE_AFFINITY', '0')
# Performance tuning flags for allgather
self.add_export('MV2_INTER_ALLGATHER_TUNING', '5')
self.add_export('MV2_CUDA_USE_NAIVE', '0')
def backend_exists(self):
#TODO: if IB is available we should suggestion mvapich
mpiname_exists = shutil.which('mpiname')
exists = False
if not mpiname_exists:
warnings.warn("mpiname does not exist, mvapich is not installed properly")
else:
results = subprocess.check_output('mpiname', shell=True)
mpiname_results = results.decode('utf-8').strip()
if "MVAPICH2-GDR" in mpiname_results:
exists = True
else:
warnings.warn(f"Expected MVAPICH2-GDR as return for mpiname but received {mpiname_results}")
return exists
@property
def name(self):
return "mvapich"
def validate_args(self):
super().validate_args()
#TODO: Allow for include/exclude at node-level but not gpu-level
if self.args.include != "" or self.args.exclude != "":
raise ValueError(f"{self.name} backend does not support worker include/exclusion")
if self.args.num_nodes != -1 or self.args.num_gpus != -1:
raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus")
def get_cmd(self, environment, active_resources):
devices_per_node = self.resource_pool.values()
total_process_count = sum(devices_per_node)
process_per_node = list(devices_per_node)[0]
if not all([n == process_per_node for n in devices_per_node]):
raise ValueError("mvapich requires same number of devices per node")
with open(MVAPICH_TMP_HOSTFILE, 'w') as fd:
for host in self.resource_pool.keys():
fd.write(f'{host}\n')
mpirun_cmd = [
'mpirun',
'-np',
f'{total_process_count}',
'-ppn',
f'{process_per_node}',
'--hostfile',
f'{MVAPICH_TMP_HOSTFILE}',
] + split(self.args.launcher_args)
export_cmd = []
for k, v in self.exports.items():
export_cmd += ['-env', "{}={}".format(k, v)]
python_exec = []
if not self.args.no_python:
python_exec = [sys.executable, "-u"]
if self.args.module:
python_exec.append("-m")
return mpirun_cmd + export_cmd + python_exec + [self.user_script] + self.user_arguments
| 16,568 | 36.150224 | 119 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/launcher/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
| 140 | 22.5 | 44 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/module_quantize.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
def quantize_transformer_layer(orig_layer_impl, model, megatron=False, preln=False):
""" Quantize bert-style transformer layers with DeepSpeed's transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
megatron (bool): megatron model-parallel implementation (this is supported for inference only)
preln (bool): does the original layer implementation do pre or post layer norm?
Note: For Bert kind of models, we inject based on the DeepSpeed-Example models, if not setting huggingface flag.
Returns:
Updated nn.module with quantized transformer layers
"""
def quantize_weight(weight):
return weight.to(torch.int8)
def megatron_layer_quantize(layer):
layer.attention.query_key_value.weight.data = quantize_weight(layer.attention.query_key_value.weight.data)
layer.attention.dense.weight.data = quantize_weight(layer.attention.dense.weight.data)
layer.mlp.dense_h_to_4h.weight.data = quantize_weight(layer.mlp.dense_h_to_4h.weight.data)
layer.mlp.dense_4h_to_h.weight.data = quantize_weight(layer.mlp.dense_4h_to_h.weight.data)
def bert_layer_quantize(layer):
layer.attention.self.query.weight.data = quantize_weight(layer.attention.self.query.weight.data)
layer.attention.self.key.weight.data = quantize_weight(layer.attention.self.key.weight.data)
layer.attention.self.value.weight.data = quantize_weight(layer.attention.self.value.weight.data)
layer.attention.output.dense.weight.data = quantize_weight(layer.attention.output.dense.weight.data)
if preln:
layer.intermediate.dense_act.weight.data = quantize_weight(layer.intermediate.dense_act.weight.data)
else:
layer.intermediate.dense.weight.data = quantize_weight(layer.intermediate.dense.weight.data)
layer.output.dense.weight.data = quantize_weight(layer.output.dense.weight.data)
def quantize_fn(child):
if megatron:
# Quantize megatron GPT2 / GPT3 trained model
megatron_layer_quantize(child)
else:
# Quantize either DeepSpeed or HuggingFace trained model
bert_layer_quantize(child)
return child
return quantize_module(model=model, orig_class=orig_layer_impl, quantize_fn=quantize_fn)
def quantize_module(model, orig_class, quantize_fn):
policy = {orig_class: quantize_fn}
return _quantize_module(model, policy)
def _quantize_module(model, policies):
for name, child in model.named_children():
if child.__class__ in policies:
orig = repr(child)
setattr(model, name, policies[child.__class__](child))
new = getattr(model, name)
else:
_quantize_module(child, policies)
return model
| 3,107 | 41.575342 | 120 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/load_checkpoint.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from torch import nn
from deepspeed.model_implementations.transformers.ds_bloom import DeepSpeedBloomInference
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference
from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference
from deepspeed.model_implementations.transformers.ds_opt import DeepSpeedOPTInference
import deepspeed.ops.transformer as transformer_inference
from .layers import LinearLayer, Normalize, EmbeddingLayer, OPTEmbedding, RMSNormalize
import torch
import gc
from deepspeed.accelerator import get_accelerator
import re
def load_model_with_checkpoint(r_module,
sd,
mp_replace,
ckpt_type,
ckpt_mp_size,
weight_quantizer=None,
rank=0,
container=None):
error_msgs = []
def prefix_check():
# if keys start with 'model.' or 'transformer.', don't skip level 0 prefix
for key in sd[0].keys():
# OPT models
if re.match("^model[.]", key):
return False
# BLOOM models
if re.match("^transformer[.]", key):
return False
return True
skip_level_0_prefix = prefix_check() and container.policy.use_load_prefix
def transpose(data):
with torch.no_grad():
data = data.contiguous()
data1 = data.transpose(-1, -2).reshape(-1)
data.reshape(-1).copy_(data1)
data1 = None
return data.reshape(data.shape[-1], data.shape[-2])
def load(module, prefix):
args = (sd[0], prefix, {}, True, [], [], error_msgs)
if hasattr(module, 'weight'):
module.weight = mp_replace.copy(module.weight.data, sd[0][prefix + 'weight'])
if prefix + 'bias' in sd[0].keys():
if module.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data, device="cpu"),
requires_grad=module.bias.data.requires_grad)
module.bias = mp_replace.copy(module.bias.data, sd[0][prefix + 'bias'])
args = None
gc.collect()
def load_transformer_layer(module, prefix):
if ckpt_type == "tp":
def load_parameters(module, prefix):
for n, p in module.named_parameters():
if prefix + n in sd[0] and len(n.split('.')) == 1:
if type(sd[0][prefix + n]) is list:
tmp_data, scale = sd[0][prefix + n]
tmp_data = tmp_data
scale = scale.to(get_accelerator().current_device_name())
# set the quantizer number of groups using the checkpoint scale shape
weight_quantizer.num_groups = scale.shape[0]
else:
tmp_data = sd[0][prefix + n].to(get_accelerator().current_device_name())
scale = None
src_shape = tmp_data.shape
dst_shape = p.shape
inner_dim = 1 if tmp_data.dtype == torch.int8 else 0
outer_dim = 0 if tmp_data.dtype == torch.int8 else 1
if (len(src_shape) == 2 and len(dst_shape) == 2):
if (src_shape[inner_dim] == dst_shape[0] and src_shape[outer_dim] == dst_shape[1]):
if tmp_data.dtype != torch.int8:
p = weight_quantizer.quantize(
transpose(tmp_data) if weight_quantizer.q_int8 else tmp_data)
else:
p = torch.nn.parameter.Parameter(tmp_data, requires_grad=False)
p.scale = scale
setattr(module, n, p)
else:
dim = inner_dim if src_shape[inner_dim] != dst_shape[0] else outer_dim
dim1 = 0 if src_shape[inner_dim] != dst_shape[0] else 1
if src_shape[dim] > dst_shape[dim1]:
weight_partition = torch.split(tmp_data, dst_shape[dim1], dim=dim)[rank].to(
get_accelerator().current_device_name())
assert tmp_data.dtype != torch.int8 or scale.numel() > weight_quantizer.num_groups * (rank+1), \
'''ERROR: We require the quantization scales for larger TP-size when loading INT8 checkpoint!\
Please use the FP16 checkpoint to generate INT8 checkpoint with the sharding parameters!'''
scale = scale.view(-1)[weight_quantizer.num_groups * (rank + 1):].reshape(
weight_quantizer.num_groups, -1).contiguous()
else:
assert tmp_data.dtype != torch.int8, \
'''Merging of the checkpoints are not supported when using INT8 checkpoint! \
Please use a as many GPUs as TP-size for the checkpoint'''
all_data = [
sd[j][prefix + n] if type(sd[j][prefix + n]) is list else sd[j][prefix + n].to(
get_accelerator().current_device_name()) for j in range(len(sd))
]
# Check if the weight tensor is for the QKV parameter
if src_shape[1] == (3 * src_shape[0]) // ckpt_mp_size:
qkv_size = src_shape[outer_dim] // 3
src_split = [
torch.split(src[0].data, qkv_size, dim=outer_dim) for src in all_data
]
weight_partition = torch.cat([
torch.cat([qkv_s[i] for qkv_s in src_split], axis=outer_dim)
for i in range(len(src_split[0]))
],
dim=dim)
else:
weight_partition = torch.cat([
ad[0].to(get_accelerator().current_device_name())
if type(ad) is list else ad for ad in all_data
],
dim=dim)
if tmp_data.dtype == torch.int8:
scale = torch.cat(
[ad[1].to(get_accelerator().current_device_name()) for ad in all_data],
dim=dim)
if tmp_data.dtype != torch.int8:
weight_partition = weight_quantizer.quantize(
transpose(weight_partition), \
parallel_dim=(0 if dim == 1 else 1)) if weight_quantizer.q_int8 else \
weight_quantizer.quantize(weight_partition)
else:
weight_partition = torch.nn.parameter.Parameter(weight_partition,
requires_grad=False)
weight_partition.scale = scale
setattr(module, n, weight_partition)
else:
if src_shape[0] == dst_shape[0]:
p.data.copy_(tmp_data)
else:
if src_shape[0] > dst_shape[0]:
bias_split = torch.split(tmp_data, dst_shape[-1])[rank].to(
get_accelerator().current_device_name()).contiguous()
p.data.copy_(bias_split)
else:
# Check if the weight tensor is for the QKV parameter
if src_shape[0] == (3 * r_module.config.hidden_size) // ckpt_mp_size:
qkv_size = src_shape[0] // 3
src_split = [
torch.split(sd[j][prefix + n], qkv_size, dim=0) for j in range(len(sd))
]
p.data.copy_(
torch.cat([
torch.cat([qkv_s[i] for qkv_s in src_split], axis=0)
for i in range(len(src_split[0]))
],
dim=0).to(get_accelerator().current_device_name()).contiguous())
else:
p.data.copy_(
torch.cat([sd[j][prefix + n] for j in range(len(sd))],
dim=0).to(get_accelerator().current_device_name()).contiguous())
load_parameters(module, prefix)
for n, child in module.named_children():
load_parameters(child, prefix + n + '.')
else:
container.load_params(module, sd[0], weight_quantizer, mp_replace, prefix)
try:
import transformers
OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding
if hasattr(transformers.models, "llama"):
LlamaRMSNorm = transformers.models.llama.modeling_llama.LlamaRMSNorm
else:
LlamaRMSNorm = None
except:
OPTLearnedPositionalEmbedding = None
layer_policies = {
nn.Linear: load,
nn.Embedding: load,
nn.LayerNorm: load,
EmbeddingLayer: load,
LinearLayer: load,
Normalize: load,
transformer_inference.DeepSpeedTransformerInference: load_transformer_layer,
DeepSpeedBloomInference: load_transformer_layer,
DeepSpeedGPTInference: load_transformer_layer,
DeepSpeedBERTInference: load_transformer_layer,
DeepSpeedMegatronGPTInference: load_transformer_layer,
DeepSpeedOPTInference: load_transformer_layer,
OPTLearnedPositionalEmbedding: load,
OPTEmbedding: load,
LlamaRMSNorm: load,
RMSNormalize: load
}
all_ds_ids = {}
def load_module_recursive(module, prefix='', level=0):
for name, child in module.named_children():
if child.__class__ in layer_policies:
checking_key = prefix + name + '.'
if not any(checking_key in item for item in sd[0].keys()):
if hasattr(child, 'weight') and \
(hasattr(child.weight, 'ds_id') and \
child.weight.ds_id in all_ds_ids):
prefix1 = all_ds_ids[child.weight.ds_id]
if child.__class__ is nn.Linear:
child = LinearLayer(weight=all_ds_ids[child.weight.ds_id])
setattr(module, name, child)
continue
child_params = list(child.parameters())
if len(child_params) > 0 and (child_params[0].numel() == 0 or child_params[0].is_meta):
if child.weight.is_meta:
ds_shape = child.weight.shape
else:
ds_shape = child.weight.ds_shape
if child.__class__ is nn.LayerNorm:
child = Normalize(dim=ds_shape[-1], dtype=child.weight.dtype, eps=child.eps)
setattr(module, name, child)
elif child.__class__ is nn.Linear:
child = LinearLayer(weight_shape=child.weight.shape, bias=child.bias)
setattr(module, name, child)
elif child.__class__ is OPTLearnedPositionalEmbedding:
child = OPTEmbedding(weight_shape=ds_shape)
setattr(module, name, child)
elif child.__class__ is LlamaRMSNorm:
child = RMSNormalize(dim=ds_shape[-1], dtype=child.weight.dtype, eps=child.variance_epsilon)
setattr(module, name, child)
else:
ds_id = None
if hasattr(child.weight, 'ds_id'):
ds_id = child.weight.ds_id
child = EmbeddingLayer(weight_shape=ds_shape, dtype=child.weight.dtype)
if ds_id is not None:
all_ds_ids[ds_id] = child.weight
setattr(module, name, child)
layer_policies[child.__class__](child, prefix + name + '.')
else:
load_module_recursive(
child,
prefix if (level == 0 and ckpt_type == 'pp') and skip_level_0_prefix else \
prefix + name + '.',
level + 1)
load_module_recursive(r_module)
embedding_weight = None
for n, p in r_module.named_parameters():
if "word_embeddings." in n or "embed_tokens." in n or "wte." in n:
embedding_weight = p
if embedding_weight is not None and r_module.lm_head.weight.is_meta:
r_module.lm_head.weight = embedding_weight
for sd_ in sd:
del sd_
sd = None
gc.collect()
| 14,745 | 53.817844 | 134 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.utils import log_dist
# helper function to map between DS policies and DS containers
def policy_to_ds_container(**kwargs):
from .containers import HFGPT2LayerPolicy, DS_GPT2Container
from .containers import HFBertLayerPolicy, DS_BERTContainer
from .containers import BLOOMLayerPolicy, DS_BloomContainer
from .containers import HFGPTJLayerPolicy, DS_GPTJContainer
from .containers import HFGPTNEOLayerPolicy, DS_GPTNEOContainer
from .containers import GPTNEOXLayerPolicy, DS_GPTNEOXContainer
from .containers import HFOPTLayerPolicy, DS_OPTContainer
from .containers import MegatronLayerPolicy, DS_MegatronGPTContainer
from .containers import HFDistilBertLayerPolicy, DS_DistilBERTContainer
from .containers import LLAMALayerPolicy, DS_LLAMAContainer
policy_to_container = {
HFGPT2LayerPolicy: DS_GPT2Container,
HFBertLayerPolicy: DS_BERTContainer,
BLOOMLayerPolicy: DS_BloomContainer,
HFGPTJLayerPolicy: DS_GPTJContainer,
HFGPTNEOLayerPolicy: DS_GPTNEOContainer,
GPTNEOXLayerPolicy: DS_GPTNEOXContainer,
HFOPTLayerPolicy: DS_OPTContainer,
MegatronLayerPolicy: DS_MegatronGPTContainer,
HFDistilBertLayerPolicy: DS_DistilBERTContainer,
LLAMALayerPolicy: DS_LLAMAContainer,
}
container = None
policy = kwargs['policy']
assert policy is not None, "Policy cannot be None"
policy_type = type(policy)
if policy_type not in policy_to_container:
log_dist(f"Policy type {policy_type} not supported", [0])
else:
container = policy_to_container[policy_type](**kwargs)
return container
| 1,762 | 37.326087 | 75 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/policy.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import ABC, abstractmethod
from deepspeed.utils.types import ActivationFuncType, NormType
import torch
from deepspeed.accelerator import get_accelerator
transformer_param_names = (
'attn_qkvw', \
'attn_qkvb', \
'attn_ow' , \
'attn_ob', \
'inter_w', \
'inter_b', \
'output_w', \
'output_b', \
'attn_nw', \
'attn_nb', \
'norm_w', \
'norm_b')
class DSPolicy(ABC):
_orig_layer_class = None
def __init__(self):
self.cuda_graph_supported = False
@abstractmethod
def attention(self):
"""
Returns attention qkv and dense parameters
weight: (3*hidden, hidden) and (hidden, hidden)
bias: (3*hidden) and (hidden)
"""
raise NotImplementedError
class TransformerPolicy(DSPolicy):
# a static class variable containing the HuggingFace model configuration.
# see e.g., transformers.models.opt.configuration_opt.OPTConfig
hf_model_config = None
def __init__(
self,
inference=True,
linear_layer=True,
scale_attention=True,
megatron_v2=False,
use_mup=False,
# the type of activation function used in MLP
mlp_act_func_type=ActivationFuncType.GELU,
# applies layer norm before attention if `pre_attn_norm` is set to True
pre_attn_norm=True,
# this flag shows whether or not using prefix in loading the checkpoint
use_load_prefix=False,
# whether or not the qkv is stored in the split-format
split_qkv=True,
# Type of normalization to perform
norm_type=NormType.LayerNorm):
super().__init__()
self.cuda_graph_supported = False
self.inference = inference
self.linear_layer = linear_layer
self.scale_attention = scale_attention
self.is_megatron_v2 = megatron_v2
self.use_mup = use_mup
self.mlp_act_func_type = mlp_act_func_type
self.pre_attn_norm = pre_attn_norm
self.use_load_prefix = use_load_prefix
self.split_qkv = split_qkv
self.norm_type = norm_type
@abstractmethod
def attention(self):
"""
Returns attention qkv and dense parameters
weight: (3*hidden, hidden) and (hidden, hidden)
bias: (3*hidden) and (hidden)
"""
raise NotImplementedError
@abstractmethod
def get_hidden_heads(self):
"""
return hidden_size and number of heads
"""
raise NotImplementedError
@abstractmethod
def mlp(self):
"""
Returns mlp intermediate and output
weight: (intermediate, hidden) and (hidden, intermediate)
bias: (intermediate) and (hidden)
"""
raise NotImplementedError
@abstractmethod
def layernorm(self):
"""
Returns LayerNorms used in transformer layer
Post-Attention and pre/post layer norm
gamma and beta with shape: (hidden)
"""
raise NotImplementedError
# TODO (lekurile): This function exists in base container as well, consolidate as some point
def transpose(data):
with torch.no_grad():
data = data.contiguous()
data1 = data.transpose(-1, -2).reshape(-1)
data.reshape(-1).copy_(data1)
data1 = None
return data.reshape(data.shape[-1], data.shape[-2])
# TODO (lekurile): This function exists in megatron feature container as well, consolidate as some point
def _transpose(x, heads=1, mp_replace=None):
heads = heads // mp_replace.mp_size # type: ignore
outer_dim = -1
attention_head_size = x.shape[outer_dim] // heads
new_x_shape = x.size()[:outer_dim] + (heads, attention_head_size)
x_1 = x.view(*new_x_shape)
(q, k, v) = torch.split(x_1, (x_1.shape[-1] // 3), dim=-1)
if len(q.shape) > 2:
new_shape = (q.shape[0], ) + (-1, )
return torch.cat((q.reshape(new_shape), k.reshape(new_shape), v.reshape(new_shape)),
dim=outer_dim).reshape(x.shape)
else:
return torch.cat((q.reshape(-1), k.reshape(-1), v.reshape(-1)), dim=-1).reshape(x.shape)
# This checks if the parameter exits in the checkpoint file and maybe copies it into the corresponding destination tensor.
# Note that not all parameters are saved in one checkpoint, that's why we always need to check if they exist!
def maybe_copy(module,
sd,
weight_quantizer,
mp_replace,
dst_name,
src_name,
qkv=False,
megatron_v2=False,
split_qkv=False,
heads=1):
if src_name in sd:
dst = getattr(module, dst_name)
tmp = sd[src_name]
if len(dst.shape) == 1:
if split_qkv:
dst = mp_replace.strided_copy(dst, tmp, num_splits=3)
else:
dst = mp_replace.copy(dst, tmp)
if qkv and megatron_v2:
dst = torch.nn.parameter.Parameter(_transpose(dst, heads=heads, mp_replace=mp_replace).contiguous())
else:
if split_qkv:
dst = mp_replace.strided_copy(dst, weight_quantizer.quantize(tmp if weight_quantizer.q_int8 else \
(transpose(tmp).contiguous())), num_splits=3, int8=weight_quantizer.q_int8)
else:
if qkv and megatron_v2:
tmp = _transpose(transpose(tmp), heads=heads, mp_replace=mp_replace).contiguous()
if weight_quantizer.q_int8:
tmp = transpose(tmp)
dst = mp_replace.copy(dst, weight_quantizer.quantize(tmp if weight_quantizer.q_int8 else \
transpose(tmp)), int8=weight_quantizer.q_int8)
setattr(module, dst_name, dst)
# Extending the maybe_copy function for when the q, k, and v are in separate parameters!
def maybe_copy_qkv(module, sd, weight_quantizer, mp_replace, dst_name, src_names, split_qkv=False):
if src_names[0] in sd:
q = sd[src_names[0]]
k = sd[src_names[1]]
v = sd[src_names[2]]
qkv_data = torch.cat((q, k, v), dim=0)
dst = getattr(module, dst_name)
if len(dst.shape) == 1:
if split_qkv:
dst = mp_replace.strided_copy(dst, qkv_data.contiguous(), num_splits=3)
else:
dst = mp_replace.copy(dst, qkv_data)
else:
if split_qkv:
dst = mp_replace.strided_copy(dst, weight_quantizer.quantize(qkv_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \
((transpose(qkv_data)).contiguous())), num_splits=3, int8=weight_quantizer.q_int8)
else:
dst = mp_replace.copy(dst, weight_quantizer.quantize(qkv_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \
transpose(qkv_data)), int8=weight_quantizer.q_int8)
setattr(module, dst_name, dst)
# Extending the `maybe_copy` function for when mlp1 is in separate parameters for GeGLU
def maybe_copy_geglu(module, sd, weight_quantizer, mp_replace, dst_name, src_names):
if src_names[0] in sd:
reg_proj = sd[src_names[0]]
gate_proj = sd[src_names[1]]
mlp1_data = torch.cat((reg_proj, gate_proj), dim=0)
dst = getattr(module, dst_name)
dst = mp_replace.strided_copy(dst, weight_quantizer.quantize(mlp1_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \
transpose(mlp1_data)), num_splits=2, int8=weight_quantizer.q_int8)
setattr(module, dst_name, dst)
def pack_lora_weights(p):
return [
p.lora_right_weight, \
p.lora_left_weight, \
p.lora_scaling
]
def maybe_get_lora(p):
if hasattr(p, 'lora_right_weight'):
lora_param = pack_lora_weights(p)
else:
lora_param = []
return lora_param
| 8,259 | 35.711111 | 155 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/auto_tp.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Automatic Tensor Parallelism
import re
from torch import nn
from .replace_policy import replace_policies
class AutoTP():
def in_module_list(module, module_list):
for item in module_list:
if type(item).__name__ == type(module).__name__:
return True
return False
def get_module_list(model):
mlist = []
for child in model.children():
if isinstance(child, nn.ModuleList):
for module in child.children():
if not mlist:
mlist = [module]
elif not AutoTP.in_module_list(module, mlist):
mlist = mlist + [module]
else:
mlist = mlist + AutoTP.get_module_list(child)
return mlist
def supported(model):
unsupported = ['codegen', 'deberta', 'flaubert', 'fsmt', 'gpt2', 'led', 'longformer', 'xlm', 'xlnet']
model = str(model)
key = re.search(r": (.*?)Model", model)
if key is None:
key = re.search(r": (.*?)Stack", model)
if key is None:
key = re.match(r"(.*?)Model", model)
assert key is not None, "Not able to determine model policy automatically. Please provide policy."
if key.group(1).lower() in unsupported:
return False
return True
def get_layers(parent, module):
layer_list = []
for key, submodule in module._modules.items():
if isinstance(submodule, nn.Linear):
layer_list = layer_list + [parent + "." + key]
elif isinstance(submodule, nn.LayerNorm) or key == 'LayerNorm' or key == 'layer_norm':
layer_list = layer_list + ["ln"]
else:
layer_list = layer_list + AutoTP.get_layers(key, submodule)
return layer_list
def update_policy_list(policy_list, new_module, new_gems):
if len(policy_list):
for i, policy in enumerate(policy_list):
# if module already exists in policy, combine gems and remove duplicates
if policy[0] == type(new_module):
new_gems = set(new_gems + policy[1])
policy_list[i] = tuple([type(new_module), new_gems])
return policy_list
policy_list.append(tuple([type(new_module), new_gems]))
return policy_list
def kernel_supported(module_list):
policy = []
for plcy in replace_policies:
# instantiate a throw-away policy in order to populate the _orig_layer_class
_ = plcy(None)
if isinstance(plcy._orig_layer_class, list):
for orig_layer_class in plcy._orig_layer_class:
policy.append(orig_layer_class)
elif plcy._orig_layer_class is not None:
policy.append(plcy._orig_layer_class)
for child in module_list:
if child.__class__ in policy:
return True
return False
def tp_parser(model):
policy_list = []
module_list = []
layer_list = []
gem_list = []
module_list = AutoTP.get_module_list(model)
assert AutoTP.supported(model), "AutoTP not supported for model. Please use kernel injection since container policy for model exists." \
if AutoTP.kernel_supported(module_list) else "AutoTP not supported for model. Please provide policy."
for module in module_list:
for key, submodule in module._modules.items():
if isinstance(submodule, nn.Linear):
layer_list = layer_list + ["." + key]
elif isinstance(submodule, nn.LayerNorm) or key == 'LayerNorm' or key == 'layer_norm':
layer_list = layer_list + ["ln"]
else:
layer_list = layer_list + AutoTP.get_layers(key, submodule)
for i, layer in enumerate(layer_list):
if layer == 'ln':
if layer_list[i - 1] != 'ln':
gem_list = gem_list + [layer_list[i - 1]]
elif 'out_proj' in layer:
gem_list = gem_list + [layer]
elif 'o_proj' in layer:
gem_list = gem_list + [layer]
elif 'down_proj' in layer:
gem_list = gem_list + [layer]
elif 'attention.dense' in layer and 'GPTNeoX' in str(model):
gem_list = gem_list + [layer]
elif 'self_attention.dense' in layer and 'falcon' in str(
type(module)): # this is a hack to get the right linear layer for this model!
gem_list = gem_list + [layer]
layer_list = []
if gem_list != []:
gem_list = list(set(gem_list))
policy_list = AutoTP.update_policy_list(policy_list, module, gem_list)
gem_list = []
assert len(policy_list), "AutoTP not supported for model. Please use kernel injection since container policy for model exists." \
if AutoTP.kernel_supported(module_list) else "Not able to determine model policy automatically. Please provide policy."
return policy_list
| 5,353 | 41.832 | 144 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/layers.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed import comm as dist
from torch import nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from deepspeed.accelerator import get_accelerator
class LinearAllreduce(nn.Module):
def __init__(self, weight, bias=None, mp_group=None):
super(LinearAllreduce, self).__init__()
self.weight = weight
self.bias = bias
self.mp_group = mp_group
def forward(self, input):
output = torch.matmul(input, self.weight.transpose(-1, -2))
if self.mp_group is not None:
dist.all_reduce(output, group=self.mp_group)
if self.bias is not None:
output += self.bias
return output
class LinearLayer(nn.Module):
def __init__(self, weight_shape=None, dtype=torch.half, weight=None, bias=None):
super(LinearLayer, self).__init__()
if weight is not None:
self.weight = weight
self.bias = bias
else:
self.weight = Parameter(
torch.empty(weight_shape, dtype=dtype, device=get_accelerator().current_device_name()))
self.bias = Parameter(
torch.empty(weight_shape[0],
dtype=dtype,
device=get_accelerator().current_device_name())) \
if bias is not None else None
def forward(self, input):
output = torch.matmul(input, self.weight.transpose(-1, -2))
if self.bias is not None:
output += self.bias
return output
class Normalize(nn.Module):
def __init__(self, dim=None, dtype=torch.float, eps=1e-5, weight=None, bias=None):
super(Normalize, self).__init__()
if weight is not None:
self.weight = weight
self.bias = bias
else:
self.norm = nn.LayerNorm(dim, eps=eps).to(dtype).to(get_accelerator().current_device_name())
self.weight = self.norm.weight
self.bias = self.norm.bias
self.eps = eps
def forward(self, input):
return nn.functional.layer_norm(input, input.shape[-1:], self.weight, self.bias, eps=self.eps)
class EmbeddingLayer(nn.Module):
def __init__(self, weight_shape=None, dtype=torch.half, weight=None, bias=None):
super(EmbeddingLayer, self).__init__()
if weight is None:
self.weight = Parameter(
torch.empty(weight_shape[0],
weight_shape[1],
dtype=dtype,
device=get_accelerator().current_device_name()))
else:
self.weight = weight
def forward(self, input):
return F.embedding(input, self.weight)
class OPTEmbedding(EmbeddingLayer):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, weight_shape=None, weight=None, bias=None):
# OPT is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(weight_shape, weight=weight)
def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
attention_mask = attention_mask.long()
# create positions depending on attention_mask
positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1
# cut positions if `past_key_values_length` is > 0
positions = positions[:, past_key_values_length:]
return super().forward(positions + self.offset)
class RMSNormalize(nn.Module):
def __init__(self, dim=None, dtype=torch.float, eps=1e-5, weight=None):
super(RMSNormalize, self).__init__()
if weight is not None:
self.weight = weight
else:
self.weight = nn.Parameter(torch.ones(dim, dtype=dtype, device=get_accelerator().current_device_name()))
self.eps = eps
def forward(self, hidden_states):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
print(self.weight)
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return hidden_states * self.weight
| 4,592 | 33.276119 | 116 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .replace_module import replace_transformer_layer, revert_transformer_layer, ReplaceWithTensorSlicing, GroupQuantizer, generic_injection
from .module_quantize import quantize_transformer_layer
from .replace_policy import HFBertLayerPolicy
from .layers import LinearAllreduce, LinearLayer, EmbeddingLayer, Normalize
from .policy import DSPolicy
| 444 | 39.454545 | 140 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/inject.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import copy
import torch
from deepspeed.ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
def module_inject(layer_obj, model, config, micro_batch_size, max_seq_length, seed, preln, fp16=True):
for name, child in model.named_children():
if isinstance(child, layer_obj):
print('REPLACING BertLayer')
cuda_config = DeepSpeedTransformerConfig(batch_size=micro_batch_size,
max_seq_length=max_seq_length,
hidden_size=config.hidden_size,
heads=config.num_attention_heads,
attn_dropout_ratio=config.attention_probs_dropout_prob,
hidden_dropout_ratio=config.hidden_dropout_prob,
num_hidden_layers=config.num_hidden_layers,
initializer_range=config.initializer_range,
seed=seed,
fp16=fp16,
pre_layer_norm=preln)
new_module = DeepSpeedTransformerLayer(cuda_config)
# copy relevant state from child -> new module
qw = child.attention.self.query.weight
qb = child.attention.self.query.bias
kw = child.attention.self.key.weight
kb = child.attention.self.key.bias
vw = child.attention.self.value.weight
vb = child.attention.self.value.bias
qkvw = torch.cat((qw, kw, vw), 0)
qkvb = torch.cat((qb, kb, vb), 0)
new_module.attn_qkvw.data = qkvw
new_module.attn_qkvb.data = qkvb
new_module.attn_ow.data = child.attention.output.dense.weight
new_module.attn_ob.data = child.attention.output.dense.bias
if preln:
attention_layerNorm = child.PostAttentionLayerNorm
else:
attention_layerNorm = child.attention.output.LayerNorm
new_module.attn_nw.data = attention_layerNorm.weight
new_module.attn_nb.data = attention_layerNorm.bias
if preln:
intermediate_FF = child.intermediate.dense_act
else:
intermediate_FF = child.intermediate.dense
new_module.inter_w.data = intermediate_FF.weight
new_module.inter_b.data = intermediate_FF.bias
new_module.output_w.data = child.output.dense.weight
new_module.output_b.data = child.output.dense.bias
if preln:
transformer_LayerNorm = child.PreAttentionLayerNorm
else:
transformer_LayerNorm = child.output.LayerNorm
new_module.norm_w.data = transformer_LayerNorm.weight
new_module.norm_b.data = transformer_LayerNorm.bias
setattr(model, name, copy.deepcopy(new_module))
else:
module_inject(layer_obj, child, config, micro_batch_size, max_seq_length, seed, preln, fp16)
return model
def test_hi():
from turing.nvidia_modelingpreln import BertConfig as BertConfigPreLN
from turing.nvidia_modelingpreln import BertForQuestionAnswering as BertForQuestionAnsweringPreLN
from turing.nvidia_modelingpreln import BertLayer
bert_model_config = {
"vocab_size_or_config_json_file": 119547,
"hidden_size": 1024,
"num_hidden_layers": 1,
"num_attention_heads": 16,
"intermediate_size": 4096,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02
}
bert_config = BertConfigPreLN(**bert_model_config)
base_model = BertForQuestionAnsweringPreLN(bert_config, args=None)
#base_model = LinearStack()
test_model = copy.deepcopy(base_model)
test_model = module_inject(BertLayer, test_model, bert_config, 4, 384, 1234)
print('BASE', base_model)
print('TEST', test_model)
#base_model.eval()
#test_model.eval()
#test_input = torch.rand(1, base_model.input_dim)
#base_output = base_model(test_input)
#test_output = test_model(test_input)
#
#assert torch.allclose(base_output, test_output, atol=3e-8)
| 4,719 | 40.769912 | 108 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/replace_module.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from typing import Optional
import torch
import tqdm
import deepspeed
import deepspeed.ops.transformer as transformer_inference
from deepspeed.ops.transformer.inference.diffusers_attention import DeepSpeedDiffusersAttention
from deepspeed.ops.transformer.inference.diffusers_transformer_block import DeepSpeedDiffusersTransformerBlock
from deepspeed.ops.transformer.inference.diffusers_2d_transformer import Diffusers2DTransformerConfig
from deepspeed.accelerator import get_accelerator
from .replace_policy import HFGPT2LayerPolicy
from .replace_policy import replace_policies, generic_policies
from deepspeed import comm as dist
from torch import nn
from .layers import LinearAllreduce, LinearLayer
from .load_checkpoint import load_model_with_checkpoint
import time
from .utils import policy_to_ds_container
import gc
class ReplaceWithTensorSlicing:
def __init__(self, mp_group=None, mp_size=1, out_dim=1, in_dim=0):
if mp_group is not None:
self.gpu_index = dist.get_rank(group=mp_group)
else:
self.gpu_index = 0
self.out_dim = out_dim
self.in_dim = in_dim
self.mp_size = mp_size
def merge_assert(self, dim1, dim2):
assert dim1 > dim2, \
'Merging tensors is not allowed here! Please use deepspeed load_checkpoint\
for merging your checkpoints before replacing the transformer layer with\
inference-kernels'
def strided_copy(self,
dst: Optional[torch.Tensor],
src: Optional[torch.Tensor],
num_splits: int,
int8: bool = False,
allocate_tensor: bool = False):
if src is None:
return src
src_shape = src.shape
dst_shape = dst.shape
outer_dim = 0 if int8 else -1
if allocate_tensor:
dst = torch.empty_like(dst)
src_split = torch.split(src.data, src.shape[outer_dim] // num_splits, dim=outer_dim)
if (len(src_shape) == 2 and len(dst_shape) == 2):
if src_shape[outer_dim] == dst_shape[self.out_dim]:
dst = dst.reshape(-1).data.copy_(src.data.reshape(-1)).reshape(src.shape)
dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
if hasattr(src, 'scale'):
dst.scale = src.scale
return dst
self.merge_assert(src_shape[outer_dim], dst_shape[self.out_dim])
qkv_size = dst_shape[self.out_dim] // num_splits
qkv_split = [torch.split(src_s, qkv_size, dim=outer_dim) for src_s in src_split]
weight_split = [
torch.cat([qkv_s[i] for qkv_s in qkv_split], axis=outer_dim) for i in range(len(qkv_split[0]))
]
dst = dst.reshape(-1).data.copy_(weight_split[self.gpu_index].contiguous().reshape(-1)).reshape(
weight_split[self.gpu_index].shape)
else:
if src_shape[0] == dst_shape[0]:
return torch.nn.parameter.Parameter(src)
qkv_size = dst_shape[0] // num_splits
qkv_split = [torch.split(src_s, qkv_size, dim=0) for src_s in src_split]
bias_split = [torch.cat([qkv_s[i] for qkv_s in qkv_split], axis=0) for i in range(len(qkv_split[0]))]
dst.data.copy_(bias_split[self.gpu_index].contiguous())
dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
if hasattr(src, 'scale'):
dst.scale = src.scale
return dst
def copy(self, dst, src, int8=False, allocate_tensor=False):
if src is None:
return src
assert not dst.data.is_meta # the torch.Tensor.copy_ method used below will silently fail on meta tensors
if allocate_tensor:
dst = torch.empty_like(dst)
outer_dim = 0 if int8 else 1
inner_dim = 1 if int8 else 0
src_shape = src.shape
dst_shape = dst.shape
if (len(src_shape) == 2 and len(dst_shape) == 2):
if src_shape[inner_dim] == dst_shape[self.in_dim] and src_shape[outer_dim] == dst_shape[self.out_dim]:
dst = dst.reshape(-1).data.copy_(src.data.reshape(-1)).reshape(src.shape)
else:
if src_shape[inner_dim] != dst_shape[self.in_dim]:
self.merge_assert(src_shape[inner_dim], dst_shape[self.in_dim])
dst.data.copy_(src[:, self.gpu_index * dst_shape[self.in_dim]: (self.gpu_index + 1) * dst_shape[self.in_dim]] if inner_dim == 1 else \
src[self.gpu_index * dst_shape[self.in_dim]: (self.gpu_index + 1) * dst_shape[self.in_dim], :])
else:
self.merge_assert(src_shape[outer_dim], dst_shape[self.out_dim])
dst.data.copy_(src[:, self.gpu_index * dst_shape[self.out_dim]: (self.gpu_index + 1) * dst_shape[self.out_dim]] if outer_dim == 1 else \
src[self.gpu_index * dst_shape[self.out_dim]: (self.gpu_index + 1) * dst_shape[self.out_dim], :])
else:
if src_shape[0] == dst_shape[0]:
dst = src if src.dtype == dst.dtype else dst.data.copy_(src)
else:
dst.data.copy_(src[self.gpu_index * dst_shape[-1]:(self.gpu_index + 1) * dst_shape[-1]])
dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
if hasattr(src, 'scale'):
dst.scale = src.scale
return dst
def get_transformer_name(replaced_module):
from .containers import supported_models
from torch.nn import ModuleList
transformer_name = ''
for n, c in replaced_module.named_children():
if c.__class__ in supported_models:
transformer_name += n + '.'
for name, child in c.named_children():
if child.__class__ is ModuleList:
transformer_name += name
break
break
return transformer_name
class GroupQuantizer:
def __init__(self, q_int8=True, group_size=1, num_bits=8, num_groups=0):
self.group_size = group_size
self.num_bits = num_bits
self.q_int8 = q_int8
self.num_groups = num_groups
def quantize(self, inputs, qkv=True, count=1, parallel_dim=0):
if not self.q_int8 or not qkv:
inputs = torch.nn.Parameter(inputs, requires_grad=False)
inputs.scale = torch.empty(1)
return inputs
q_range = 2**self.num_bits
num_groups = self.num_groups if self.num_groups > 0 else inputs.shape[0] // self.group_size
inputs = inputs.to(get_accelerator().current_device_name())
input_flat = inputs.reshape(num_groups, -1).contiguous()
input_min = torch.min(input_flat, dim=1, keepdim=True)[0].float()
input_max = torch.max(input_flat, dim=1, keepdim=True)[0].float()
scale = torch.max(input_min.abs(), input_max.abs()) * 2.0 / (q_range)
input_flat = (input_flat / scale).round().clamp(-q_range // 2, q_range // 2 - 1)
inputs_q = input_flat.reshape(inputs.shape).to(torch.int8).contiguous()
out = torch.nn.Parameter(inputs_q, requires_grad=False)
inputs_split = inputs.split(inputs.shape[parallel_dim] // 2, dim=parallel_dim)
input_flat = [inputs_split[i].reshape(num_groups, -1).contiguous() for i in range(2)]
input_min = [torch.min(input_flat[i], dim=1, keepdim=True)[0].float() for i in range(2)]
input_max = [torch.max(input_flat[i], dim=1, keepdim=True)[0].float() for i in range(2)]
scale1 = [(torch.max(input_min[i].abs(), input_max[i].abs()) * 2.0 / (q_range)).squeeze().unsqueeze(0)
for i in range(2)]
out.scale = torch.cat([scale.squeeze().unsqueeze(0), scale1[0], scale1[1]], dim=0).reshape(num_groups,
-1).contiguous()
return out
def _module_match(module):
for policy in generic_policies:
policy = policy()
if policy.match(module):
return policy
return None
def generic_injection(module, fp16=False, bf16=False, enable_cuda_graph=True):
def replace_attn(child, policy):
policy_attn = policy.attention(child)
if policy_attn is None:
return child
if len(policy_attn) == 5:
qkvw, attn_ow, attn_ob, hidden_size, heads = policy_attn
else:
qw, kw, vw, attn_ow, attn_ob, hidden_size, heads = policy_attn
config = transformer_inference.DeepSpeedInferenceConfig(
hidden_size=hidden_size,
heads=heads,
fp16=fp16,
bf16=bf16,
triangular_masking=False,
max_out_tokens=4096,
)
attn_module = DeepSpeedDiffusersAttention(config)
def transpose(data):
data = data.contiguous()
data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1))
data = data.reshape(data.shape[-1], data.shape[-2])
data.to(get_accelerator().current_device_name())
return data
if len(policy_attn) == 5:
attn_module.attn_qkvw.data = transpose(qkvw.data)
else:
attn_module.attn_qkvw = None
attn_module.attn_qw.data = transpose(qw.data)
attn_module.attn_kw.data = transpose(kw.data)
attn_module.attn_vw.data = transpose(vw.data)
attn_module.attn_qkvb = None
attn_module.attn_ow.data = transpose(attn_ow.data)
attn_module.attn_ob.data.copy_(attn_ob.data.to(get_accelerator().current_device_name()))
return attn_module
def replace_attn_block(child, policy):
config = Diffusers2DTransformerConfig()
return DeepSpeedDiffusersTransformerBlock(child, config)
if isinstance(module, torch.nn.Module):
pass
else:
if fp16 is False and bf16 is False:
raise ValueError("Generic injection only supported with FP16 or BF16")
try:
import diffusers
if hasattr(diffusers.models.attention, 'CrossAttention'):
cross_attention = diffusers.models.attention.CrossAttention
else:
cross_attention = diffusers.models.attention_processor.Attention
attention_block = diffusers.models.attention.BasicTransformerBlock
new_policies = {
cross_attention: replace_attn,
attention_block: replace_attn_block,
}
except ImportError:
new_policies = {}
#replace_transformer_layer(None,
# module.text_encoder,
# training=False,
# replace_with_kernel_inject=True,
# triangular_masking=True,
# max_out_tokens=8192)
from ..model_implementations.transformers.clip_encoder import DSClipEncoder
cg_encoder = DSClipEncoder(module.text_encoder, enable_cuda_graph=enable_cuda_graph)
setattr(module, 'text_encoder', cg_encoder)
for name in module.__dict__.keys():
sub_module = getattr(module, name)
policy = _module_match(sub_module)
if policy is not None:
def _replace_module(module, policy):
for name, child in module.named_children():
_replace_module(child, policy)
if child.__class__ in new_policies:
replaced_module = new_policies[child.__class__](child, policy)
setattr(module, name, replaced_module)
_replace_module(sub_module, policy)
new_module = policy.apply(sub_module, enable_cuda_graph=enable_cuda_graph)
print(f"**** found and replaced {name} w. {type(new_module)}")
setattr(module, name, new_module)
container_g = None
def replace_transformer_layer(orig_layer_impl, model, checkpoint_dict, config, model_config):
""" Replace bert-style transformer layers with DeepSpeed's transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
checkpoint_dict: Dictionary for checkpoint passed from the Inference Engine
config: top-level DS Inference config defined in inference/config.py
model_config: HuggingFace model config passed from the inference/engine.py
Returns:
Updated nn.module with replaced transformer layers
"""
# defining globals as internally defined functions inherit these everywhere
quantize = (config.dtype == torch.int8)
# todo: Refactor later. In future, let's minimize the style used above and use config.** instead
linear_layer_setting = None
'''
linear_layer_setting (tuple of modules) [Optional]: shows which two classes are used for linear layers and embedding layers
'''
micro_batch_size = -1
seed = -1
local_rank = -1
mp_replace = ReplaceWithTensorSlicing(mp_group=config.tensor_parallel.tp_group,
mp_size=config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1)
def replace_with_policy(child, policy_cls, triangular_masking, inference=False, layer_id=0):
policy = policy_cls(child, inference=inference)
if not policy.cuda_graph_supported:
# policy says cuda graph is not supported raise an error if set
assert not config.enable_cuda_graph, "cuda graph is not supported with this model, please disable"
from deepspeed.moe.layer import MoE
moe = False
if hasattr(child, 'mlp') and isinstance(child.mlp, MoE):
num_experts = child.mlp.num_experts
moe = True
# 1. Create a model-specific container object using the policy object.
_container = policy_to_ds_container(policy=policy,
config=config,
model_config=model_config,
layer_id=layer_id,
child=child)
_container.set_moe(moe)
# 2. Set the tensor parallelism config
_container.set_tensor_parallel_config(config.tensor_parallel.tp_size, config.tensor_parallel.tp_group)
# 3. Initialize tensors
_container.initialize_tensors()
# 4. deal with data types -- needs refactor to use dtype instead of fp16
if config.dtype in [torch.float16, torch.bfloat16, torch.int8]:
_container.convert_to_required_dtype()
# 5. Set the quantization config
quantizer = GroupQuantizer(q_int8=quantize)
_container.set_quantization_config(quantizer)
# 6. create a DS Inference config object
_container.create_ds_model_config()
# 7. use the config and create the module
_container.create_module()
# 8. transpose the weights and bias if needed
_container.transpose()
# 9. deal with tensor parallelism.
_container.apply_tensor_parallelism(mp_replace)
# 10. copy the tensors from the model-specific container to the new module
_container.copy_data_to_new_module()
# 11. set global for generic checkpoint loading
global container_g
if container_g is None:
container_g = _container
return _container.module
def replace_wo_policy(module, all_reduce_linears, prefix="", state_dict=None):
mp_size = config.tensor_parallel.tp_size
mp_group = config.tensor_parallel.tp_group
def _replace(child, name, conv_linear_layer):
if getattr(child, "replaced", False) == True:
return
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
weight_shape = child.weight.shape
if name in all_reduce_linears:
new_weight = torch.empty((
weight_shape[1] if conv_linear_layer else weight_shape[0],
(weight_shape[0] if conv_linear_layer else weight_shape[1]) // mp_size,
),
device=child.weight.device,
dtype=child.weight.dtype)
if conv_linear_layer:
child.weight.data = child.weight.data.transpose(-1, -2).contiguous()
data = mp_replace.copy(new_weight, child.weight.data)
new_bias = torch.empty((weight_shape[0]), device=child.weight.device, dtype=child.weight.dtype)
if child.bias is not None:
new_bias.data.copy_(child.bias.data)
setattr(child, "replaced", True)
return LinearAllreduce(data, child.bias if child.bias is None else \
torch.nn.parameter.Parameter(new_bias.to(get_accelerator().current_device_name())), mp_group)
else:
new_weight = torch.empty((
(weight_shape[1] if conv_linear_layer else weight_shape[0]) // mp_size,
weight_shape[0] // mp_size if conv_linear_layer else weight_shape[1],
),
device=child.weight.device,
dtype=child.weight.dtype)
if conv_linear_layer:
child.weight.data = child.weight.data.transpose(-1, -2).contiguous()
data = mp_replace.copy(new_weight, child.weight.data)
new_bias = torch.empty((weight_shape[0] // mp_size),
device=child.weight.device,
dtype=child.weight.dtype)
bias_data = None if child.bias is None else mp_replace.copy(new_bias, child.bias.data).to(
get_accelerator().current_device_name())
setattr(child, "replaced", True)
return LinearLayer(weight=data.to(get_accelerator().current_device_name()), bias=bias_data)
def _slice_embedding(child, name, conv_linear_layer):
if getattr(child, "replaced", False) == True:
return
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
new_weight = torch.empty((child.weight.shape[0], child.weight.shape[1] // mp_size),
device=child.weight.device,
dtype=child.weight.dtype)
data = mp_replace.copy(new_weight,
child.weight.ds_tensor.data if hasattr(child.weight, 'ds_tensor') else \
child.weight.data)
new_embedding = nn.Embedding(child.weight.shape[0], child.weight.shape[1] // mp_size)
new_embedding.weight.data.copy_(data)
setattr(child, "replaced", True)
return new_embedding
def update_mp_params(child):
if getattr(child, "replaced", False) == True:
return
for param in [
"n_heads", "inner_dim", "num_heads", "num_kv", "num_attention_heads", "num_attn_heads",
"all_head_size", "embed_dim", "hidden_size"
]:
if hasattr(child, param):
param_val = getattr(child, param)
assert param_val % mp_size == 0, f"{param} ({param_val}) must be divisible by mp_size ({mp_size})"
setattr(child, param, param_val // mp_size)
setattr(child, "replaced", True)
conv_linear_layer = False
if linear_layer_setting is not None:
linear_policies = {linear_layer_setting[0]: _replace}
if len(linear_layer_setting) == 2:
linear_policies.update({linear_layer_setting[1]: _slice_embedding})
else:
if orig_layer_impl is HFGPT2LayerPolicy._orig_layer_class:
try:
import transformers
conv_linear_layer = True
linear_policies = {transformers.model_utils.Conv1D: _replace}
except ImportError:
linear_policies = {nn.Linear: _replace}
else:
linear_policies = {nn.Linear: _replace, nn.Embedding: _slice_embedding}
def _replace_module(r_module, prev_name='', prev_class_name=''):
for name, child in r_module.named_children():
if prev_class_name == "":
class_name = prev_name
elif prev_name == "":
class_name = prev_class_name
else:
class_name = prev_class_name + '.' + prev_name
checking_key = prefix + '.' + class_name + '.' + name + '.' if class_name != "" else prefix + '.' + name + '.'
if child.__class__ in [nn.Linear, nn.Embedding, nn.LayerNorm] and state_dict is not None:
if any(checking_key in item for item in state_dict):
load(child, state_dict, checking_key, mp_group)
else:
continue
if len(child._buffers) != 0 and state_dict is not None:
load_buffer(child, state_dict, checking_key)
if child.__class__ in linear_policies:
setattr(r_module, name, linear_policies[child.__class__](child, prev_name + '.' + name,
conv_linear_layer))
elif any(isinstance(child, lp) for lp in linear_policies):
# Added for falcon model support
# Note: isinstance will account for class inheritance, child.__class__ does not
key = None
for lp in linear_policies:
if isinstance(child, lp):
key = lp
break
assert key is not None
setattr(r_module, name, linear_policies[key](child, prev_name + '.' + name, conv_linear_layer))
else:
update_mp_params(child)
_replace_module(child, name, class_name)
return r_module
return _replace_module(module)
def replace_fn(child, _policy, layer_id=0, prefix="", state_dict=None):
training = False # todo: refactor this part to go in the config
if training:
# copy relevant state from child -> new module
new_module = replace_with_policy(child, _policy, config.triangular_masking)
else:
# copy relevant state from child -> new module
if config.replace_with_kernel_inject:
new_module = replace_with_policy(child,
_policy,
config.triangular_masking,
inference=True,
layer_id=layer_id)
else:
new_module = replace_wo_policy(child, _policy, prefix=prefix, state_dict=state_dict)
return new_module
if checkpoint_dict is not None and not config.replace_with_kernel_inject:
# AutoTP shard loading
checkpoint = checkpoint_dict["checkpoints"]
pbar = tqdm.tqdm(total=len(checkpoint), desc=f"Loading {len(checkpoint)} checkpoint shards")
for i in range(len(checkpoint)):
replaced_module = replace_module(model=model,
orig_class=orig_layer_impl,
replace_fn=replace_fn,
_replace_policy=config.injection_policy_tuple,
checkpoint=checkpoint[i])
pbar.update(1)
gc.collect()
else:
replaced_module = replace_module(model=model,
orig_class=orig_layer_impl,
replace_fn=replace_fn,
_replace_policy=config.injection_policy_tuple)
quantizer = GroupQuantizer(q_int8=quantize)
world_size = dist.get_world_size() if dist.is_initialized() else 1
rank = dist.get_rank() if dist.is_initialized() else 0
if checkpoint_dict is not None and config.replace_with_kernel_inject:
assert container_g.ckpt_load_enabled, \
f"Meta Tensor checkpoint loading not supported in {container_g.__class__.__name__} container"
start_time = time.time()
checkpoint = checkpoint_dict['checkpoints']
ckpt_list = checkpoint["tp"] if type(checkpoint) is dict else checkpoint
ckpt_type = checkpoint_dict.get('parallelization', 'pp')
ckpt_mp_size = checkpoint_dict.get('tp_size', len(ckpt_list))
ckpt_mp_size = checkpoint_dict.get('mp_size', ckpt_mp_size)
base_dir1 = checkpoint_dict.get('base_dir', config.base_dir)
if ckpt_type == 'pp' and type(checkpoint) is list:
pbar = tqdm.tqdm(total=len(checkpoint), desc=f"Loading {len(checkpoint)} checkpoint shards")
for i in range(len(checkpoint)):
sd = [torch.load(os.path.join(base_dir1, checkpoint[i]), map_location='cpu')]
load_model_with_checkpoint(replaced_module,
sd,
mp_replace,
ckpt_type,
ckpt_mp_size,
quantizer,
container=container_g)
pbar.update(1)
else:
num_checkpoints = len(ckpt_list) // ckpt_mp_size
tp_split_size = (world_size / ckpt_mp_size)
sd_offset = int(rank / tp_split_size)
sd_count = int((rank + max(1, tp_split_size)) / tp_split_size) - sd_offset
pbar = tqdm.tqdm(total=num_checkpoints, desc=f"Loading {num_checkpoints} checkpoint shards")
for i in range(num_checkpoints):
pbar.update(1)
ckpt_index = i * ckpt_mp_size + sd_offset
ckpt_files = [
os.path.join(base_dir1, ckpt_list[ckpt_index + j]) if base_dir1 else ckpt_list[ckpt_index + j]
for j in range(sd_count)
]
sds = [torch.load(ckpt_file, map_location='cpu') for ckpt_file in ckpt_files]
load_model_with_checkpoint(replaced_module,
sds,
mp_replace,
ckpt_type,
ckpt_mp_size,
quantizer,
int(rank % tp_split_size),
container=container_g)
sds = [None for _ in sds]
gc.collect()
if "non_tp" in checkpoint:
pbar = tqdm.tqdm(total=len(checkpoint["non_tp"]),
desc=f"Loading {len(checkpoint['non_tp'])} checkpoint shards")
for i in range(len(checkpoint["non_tp"])):
pbar.update(1)
ckpt_file = os.path.join(base_dir1,
checkpoint["non_tp"][i]) if base_dir1 else checkpoint["non_tp"][i]
sds = [torch.load(ckpt_file, map_location='cpu')]
load_model_with_checkpoint(replaced_module,
sds,
mp_replace,
ckpt_type,
ckpt_mp_size,
quantizer,
int(rank % tp_split_size),
container=container_g)
sds = [None for _ in sds]
gc.collect()
print(f"checkpoint loading time at rank {rank}: {time.time()-start_time} sec")
if config.save_mp_checkpoint_path is not None:
from collections import OrderedDict
import json
num_partitions = 8
if checkpoint_dict is None:
ckpt_name = "ds_model"
try:
from transformers.models.bloom.modeling_bloom import BloomForCausalLM
if isinstance(model, BloomForCausalLM):
ckpt_name = "bloom"
except ImportError:
ckpt_name = "ds_model"
else:
ckpt_name = checkpoint_dict['type']
if dist.is_initialized():
dist.barrier()
transformer_name = get_transformer_name(replaced_module)
non_tp_ckpt_name = f'non-tp.pt'
ckpt_files = [non_tp_ckpt_name]
os.makedirs(config.save_mp_checkpoint_path, exist_ok=True)
if not dist.is_initialized() or dist.get_rank() == 0:
print("Saving tp-sharded checkpoints")
torch.save(
OrderedDict({k: v
for k, v in dict(replaced_module.state_dict()).items()
if transformer_name not in k}), f'{config.save_mp_checkpoint_path}/{non_tp_ckpt_name}')
dtype_reprs = {
torch.float32: 'float32',
torch.float16: 'float16',
torch.int8: 'int8',
torch.bfloat16: 'bfloat16'
}
ckpt_config = json.dumps({
'type': ckpt_name,
'base_dir': f'{config.save_mp_checkpoint_path}',
'checkpoints': {
"non_tp": ckpt_files,
"tp": [f'tp_{r:0>2d}_{m:0>2d}.pt' for m in range(num_partitions) for r in range(world_size)]
},
'version': 1.0,
'parallelization': 'tp',
'tp_size': world_size,
'dtype': dtype_reprs[config.dtype]
})
with open(f"{config.save_mp_checkpoint_path}/ds_inference_config.json", "w") as cfg:
cfg.write(ckpt_config)
rep_sd = replaced_module.state_dict()
for n, p in replaced_module.named_parameters():
if hasattr(p, 'scale'):
rep_sd[n] = [p, p.scale]
keys = list(rep_sd.keys())
partition_size = (len(keys) // num_partitions + 1)
for m in range(num_partitions):
torch.save(
OrderedDict({
k: [rep_sd[k], rep_sd[k].scale] if hasattr(rep_sd[k], 'scale') else rep_sd[k]
for k in keys[m * partition_size:(m + 1) * partition_size] if transformer_name in k
}), f'{config.save_mp_checkpoint_path}/tp_{rank:0>2d}_{m:0>2d}.pt')
return replaced_module
def revert_transformer_layer(orig_layer_impl, model, config, preln=False):
""" Revert DeepSpeed's transformer layer back to original bert-style transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation that was replaced,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
config (dict): model config containing hidden size, attention heads, etc.
Returns:
Updated nn.module with original bert-style transformer layers
"""
def replace_fn(child, _replace_policy, layer_id):
#from turing.nvidia_modelingpreln import BertLayer
orig_module = orig_layer_impl(config)
# copy relevant state from child -> original module
qkvw = child.attn_qkvw.data
qkvb = child.attn_qkvb.data
qw, kw, vw = torch.chunk(qkvw, 3, axis=0)
qb, kb, vb = torch.chunk(qkvb, 3, axis=0)
orig_module.attention.self.query.weight.data = qw
orig_module.attention.self.query.bias.data = qb
orig_module.attention.self.key.weight.data = kw
orig_module.attention.self.key.bias.data = kb
orig_module.attention.self.value.weight.data = vw
orig_module.attention.self.value.bias.data = vb
orig_module.attention.output.dense.weight.data = child.attn_ow.data
orig_module.attention.output.dense.bias.data = child.attn_ob.data
attn_ln_w = child.attn_nw.data
attn_ln_b = child.attn_nb.data
if preln:
orig_module.PostAttentionLayerNorm.weight.data = attn_ln_w
orig_module.PostAttentionLayerNorm.bias.data = attn_ln_b
else:
orig_module.attention.output.LayerNorm.weight.data = attn_ln_w
orig_module.attention.output.LayerNorm.bias.data = attn_ln_b
inter_ff_w = child.inter_w.data
inter_ff_b = child.inter_b.data
if preln:
orig_module.intermediate.dense_act.weight.data = inter_ff_w
orig_module.intermediate.dense_act.bias.data = inter_ff_b
else:
orig_module.intermediate.dense.weight.data = inter_ff_w
orig_module.intermediate.dense.bias.data = inter_ff_b
orig_module.output.dense.weight.data = child.output_w.data
orig_module.output.dense.bias.data = child.output_b.data
transformer_ln_w = child.norm_w.data
transformer_ln_b = child.norm_b.data
if preln:
orig_module.PreAttentionLayerNorm.weight.data = transformer_ln_w
orig_module.PreAttentionLayerNorm.bias.data = transformer_ln_b
else:
orig_module.output.LayerNorm.weight.data = transformer_ln_w
orig_module.output.LayerNorm.bias.data = transformer_ln_b
return orig_module
return replace_module(model=model,
orig_class=deepspeed.DeepSpeedTransformerLayer,
replace_fn=replace_fn,
_replace_policy=None)
def replace_module(model, orig_class, replace_fn, _replace_policy, checkpoint=None):
""" Scan the model for instances of ``orig_clas:`` to replace using ``replace_fn``.
Arguments:
model (torch.nn.Module): the model to augment
orig_class (torch.nn.Module): the module to search for
replace_fn (method): a method to convert instances of ``orig_class`` to the
desired type and return a new instance.
Returns:
A modified ``model``.
"""
sd = None
if checkpoint is not None:
sd = torch.load(checkpoint, map_location='cpu')
policy = {}
if orig_class is not None:
policy.update({orig_class: (replace_fn, _replace_policy)})
else:
for plcy in replace_policies:
# instantiate a throw-away policy in order to populate the _orig_layer_class
_ = plcy(None)
if isinstance(plcy._orig_layer_class, list):
for orig_layer_class in plcy._orig_layer_class:
policy.update({orig_layer_class: (replace_fn, plcy)})
elif plcy._orig_layer_class is not None:
policy.update({plcy._orig_layer_class: (replace_fn, plcy)})
assert len(policy.items()) > 0,\
"No default policy found! Please specify your policy injection_policy (like {BertLayer:HFBEertLayerPolicy})." +\
"You can find some samples here: https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/replace_policy.py"
replaced_module, _ = _replace_module(model, policy, state_dict=sd)
if checkpoint is not None:
embedding_weight = None
for n, p in replaced_module.named_parameters():
if "word_embeddings." in n or "embed_tokens." in n or "wte." in n:
embedding_weight = p
if embedding_weight is not None and hasattr(replaced_module, "lm_head") and hasattr(
replaced_module.lm_head, "weight") and replaced_module.lm_head.weight.is_meta:
replaced_module.lm_head.weight = embedding_weight
return replaced_module
from ..pipe import PipelineModule
import re
def skip_level_0_prefix(model, name):
model = str(model)
key = re.search(r": (.*?)Model", model)
if key is None:
key = re.search(r": (.*?)Stack", model)
if key is None:
key = re.match(r"(.*?)Model", model)
if key is not None and key.group(1).lower() in "bloom":
# if keys start with 'model.', don't skip level 0 prefix
if not re.match("^model[.]", name):
return True
return False
def load_buffer(module, state_dict, prefix):
for name in module._buffers.keys():
if module._buffers[name].data.is_meta:
module._buffers[name] = torch.nn.parameter.Parameter(
data=torch.empty_like(module._buffers[name].data, device="cpu"),
requires_grad=module._buffers[name].data.requires_grad)
if prefix + name in state_dict.keys():
module._buffers[name].data.copy_(state_dict[prefix + name])
def _replace_module(model, policies, prefix='', layer_id=0, level_id=0, state_dict=None):
""" Traverse model's children recursively and apply any transformations in ``policies``.
Arguments:
model (torch.nn.Module): model to augment
policies (dict): Mapping of source class to replacement function.
Returns:
Modified ``model``.
"""
try:
import transformers
OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding
except:
OPTLearnedPositionalEmbedding = None
load_layers = [nn.Linear, nn.Embedding, nn.LayerNorm, OPTLearnedPositionalEmbedding]
for name, child in model.named_children():
if child.__class__ in policies:
replaced_module = policies[child.__class__][0](child,
policies[child.__class__][-1],
layer_id,
prefix=prefix + name,
state_dict=state_dict)
setattr(model, name, replaced_module)
if isinstance(model, PipelineModule):
assert hasattr(model, 'forward_funcs'),\
"we require pipe-module to have the list of fwd_functions"
model.forward_funcs[model.fwd_map[name]] = replaced_module
layer_id += 1
else:
checking_key = prefix + name + '.'
if child.__class__ in load_layers and state_dict is not None:
if any(checking_key in item for item in state_dict):
load(
child,
state_dict,
checking_key,
)
else:
continue
if len(child._buffers) != 0 and state_dict is not None:
load_buffer(child, state_dict, checking_key)
_, layer_id = _replace_module(child,
policies,
prefix if level_id == 0 and skip_level_0_prefix(model, name) else \
prefix + name + '.',
layer_id=layer_id,
level_id=level_id + 1,
state_dict=state_dict)
# Add the reset_cache func to the model, so that it can be called in the beginning of text-generation.
model.reset_cache = transformer_inference.DeepSpeedTransformerInference.reset_cache
return model, layer_id
def load(module, state_dict, prefix, mp_group=None):
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
if hasattr(module, 'weight'):
if module.weight.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.weight.data, device="cpu"),
requires_grad=module.weight.data.requires_grad)
if 'query_key_value' in prefix:
module.weight = mp_replace.strided_copy(module.weight.data,
state_dict[prefix + 'weight'],
num_splits=3)
else:
module.weight = mp_replace.copy(module.weight.data, state_dict[prefix + 'weight'])
else:
if hasattr(module, 'norm') and hasattr(module.norm, 'weight'):
if module.norm.weight.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.norm.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.norm.weight.data,
device="cpu"),
requires_grad=module.norm.weight.data.requires_grad)
module.norm.weight = mp_replace.copy(module.norm.weight.data, state_dict[prefix + 'weight'])
if prefix + 'bias' in state_dict.keys():
if hasattr(module, 'bias'):
if module.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data, device="cpu"),
requires_grad=module.bias.data.requires_grad)
module.bias = mp_replace.copy(module.bias, state_dict[prefix + 'bias'])
else:
if hasattr(module, 'norm') and hasattr(module.norm, 'bias'):
if module.norm.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.norm.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.norm.bias.data,
device="cpu"),
requires_grad=module.norm.bias.data.requires_grad)
module.norm.bias = mp_replace.copy(module.norm.bias, state_dict[prefix + 'bias'])
| 43,185 | 47.144928 | 156 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/replace_policy.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .containers import HFGPT2LayerPolicy
from .containers import HFBertLayerPolicy
from .containers import BLOOMLayerPolicy
from .containers import HFGPTJLayerPolicy
from .containers import HFGPTNEOLayerPolicy
from .containers import GPTNEOXLayerPolicy
from .containers import HFOPTLayerPolicy
from .containers import MegatronLayerPolicy
from .containers import HFDistilBertLayerPolicy
from .containers import HFCLIPLayerPolicy
from .containers import LLAMALayerPolicy
from .containers import UNetPolicy
from .containers import VAEPolicy
# transformer-based policies
replace_policies = [
HFBertLayerPolicy, HFGPTNEOLayerPolicy, GPTNEOXLayerPolicy, HFGPTJLayerPolicy, MegatronLayerPolicy,
HFGPT2LayerPolicy, BLOOMLayerPolicy, HFOPTLayerPolicy, HFCLIPLayerPolicy, HFDistilBertLayerPolicy, LLAMALayerPolicy
]
# non-transformer-based policies
generic_policies = [UNetPolicy, VAEPolicy]
| 989 | 34.357143 | 119 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/distil_bert.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
class DS_DistilBERTContainer(BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
self.triangular_masking = False
self.return_single_tuple = True
self.use_triton = kwargs['config'].use_triton and deepspeed.HAS_TRITON
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedBERTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
class HFDistilBertLayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=False, preln=False):
super().__init__(inference)
self.client_module = client_module
self.preln = preln
self.cuda_graph_supported = True
if HFDistilBertLayerPolicy._orig_layer_class is None:
try:
import transformers
HFDistilBertLayerPolicy._orig_layer_class = [
transformers.models.distilbert.modeling_distilbert.TransformerBlock,
]
except:
HFDistilBertLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attention.q_lin.weight.shape[1], \
self.client_module.attention.n_heads, \
self.client_module.sa_layer_norm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.attention.q_lin.weight
qb = self.client_module.attention.q_lin.bias
kw = self.client_module.attention.k_lin.weight
kb = self.client_module.attention.k_lin.bias
vw = self.client_module.attention.v_lin.weight
vb = self.client_module.attention.v_lin.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
return qkvw, \
qkvb, \
self.client_module.attention.out_lin.weight, \
self.client_module.attention.out_lin.bias
def mlp(self, enable_training=False):
intermediate_ff = self.client_module.ffn.lin1
return intermediate_ff.weight, intermediate_ff.bias, \
self.client_module.ffn.lin2.weight, \
self.client_module.ffn.lin2.bias
def layernorm(self):
attention_layernorm = self.client_module.sa_layer_norm
transformer_layernorm = self.client_module.output_layer_norm
return attention_layernorm.weight, \
attention_layernorm.bias, \
transformer_layernorm.weight, \
transformer_layernorm.bias
| 3,188 | 37.421687 | 88 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/bert.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
class DS_BERTContainer(BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
self.return_tuple = True
self.triangular_masking = False
self.use_triton = kwargs['config'].use_triton and deepspeed.HAS_TRITON
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedBERTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
class HFBertLayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=False):
super().__init__(inference, pre_attn_norm=False)
self.client_module = client_module
self.cuda_graph_supported = True
if HFBertLayerPolicy._orig_layer_class is None:
try:
import transformers
HFBertLayerPolicy._orig_layer_class = [
transformers.models.bert.modeling_bert.BertLayer,
transformers.models.roberta.modeling_roberta.RobertaLayer
]
except:
HFBertLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
if self.pre_attn_norm:
attention_layernorm = self.client_module.PostAttentionLayerNorm
else:
attention_layernorm = self.client_module.attention.output.LayerNorm
return self.client_module.attention.self.query.weight.shape[1], \
self.client_module.attention.self.num_attention_heads, \
attention_layernorm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.attention.self.query.weight
qb = self.client_module.attention.self.query.bias
kw = self.client_module.attention.self.key.weight
kb = self.client_module.attention.self.key.bias
vw = self.client_module.attention.self.value.weight
vb = self.client_module.attention.self.value.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
return qkvw, \
qkvb, \
self.client_module.attention.output.dense.weight, \
self.client_module.attention.output.dense.bias, \
def mlp(self, enable_training=False):
if self.pre_attn_norm:
intermediate_ff = self.client_module.intermediate.dense_act
else:
intermediate_ff = self.client_module.intermediate.dense
return intermediate_ff.weight, intermediate_ff.bias, \
self.client_module.output.dense.weight, \
self.client_module.output.dense.bias
def layernorm(self):
if self.pre_attn_norm:
attention_layernorm = self.client_module.PostAttentionLayerNorm
transformer_layernorm = self.client_module.PreAttentionLayerNorm
else:
attention_layernorm = self.client_module.attention.output.LayerNorm
transformer_layernorm = self.client_module.output.LayerNorm
return attention_layernorm.weight, \
attention_layernorm.bias, \
transformer_layernorm.weight, \
transformer_layernorm.bias
| 3,769 | 39.106383 | 87 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/base.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Create a container object to save model-specific tensors using the policy file above.
from abc import ABC
import torch
import deepspeed
from deepspeed.ops.transformer.inference.config import DeepSpeedInferenceConfig
from deepspeed.accelerator import get_accelerator
# If the intermediate size attribute is set DEFAULT_INTERMEDIATE_SIZE
# it is assumed the intermediate size is 4x the embedding dimension
DEFAULT_INTERMEDIATE_SIZE = -1
class BaseConvolutionContainer(ABC):
# not implemented
def __init__(self):
pass
class BaseTransformerContainer(ABC):
def __init__(self, policy, config, model_config, layer_id, child):
self.policy = policy
self.config = config
self.model_config = model_config
self.layer_id = layer_id
self.child = child
self.megatron_v2 = self.policy.is_megatron_v2
self.scale_attention = self.policy.scale_attention
self.ckpt_load_enabled = False
# configuration for models. todo: can this be moved to a pydantic model config?
self.hidden_size = None
self.intermediate_size = None
self.num_attention_heads = None
self.mp_size = self.config.tensor_parallel.tp_size
self.pre_layer_norm = self.model_config.do_layer_norm_before if \
hasattr(self.model_config, 'do_layer_norm_before') else self.policy.pre_attn_norm
self.dtype = self.config.dtype
self.attn_linear_layer = self.policy.linear_layer
self.mlp_linear_layer = self.policy.linear_layer
self.return_tuple = self.config.return_tuple
self.triangular_masking = True
self.local_attention = ((self.model_config.attention_layers[self.layer_id] == "local") if hasattr(
self.model_config, 'attention_layers') else False)
self.window_size = getattr(self.model_config, "window_size", 1)
self.mlp_act_func_type = self.policy.mlp_act_func_type
self.norm_type = self.policy.norm_type
self.training_mp_size = self.config.training_mp_size
self.bigscience_bloom = False
self.max_out_tokens = self.config.max_out_tokens
self.min_out_tokens = self.config.min_out_tokens
self.scale_attn_by_inverse_layer_idx = getattr(self.config, "scale_attn_by_inverse_layer_idx", False)
self.use_mup = self.policy.use_mup
self.return_single_tuple = False
self.rotary_dim = self.get_rotary_dim()
self.mlp_after_attn = (self.rotary_dim is None or self.rotary_dim < 0)
# Attention tensors
self.qkvw = None
self.qkvb = None
self.dense_w = None
self.dense_b = None
# MLP tensors
self._h4h_w = None
self._h4h_b = None
self._4hh_w = None
self._4hh_b = None
# LayerNorm tensors
self.attn_nw = None
self.attn_nb = None
self.input_nw = None
self.input_nb = None
self.mp_group = None
self.use_triton = False
# Triton
self.use_triton = config.use_triton and deepspeed.HAS_TRITON
def create_ds_model_config(self):
self.set_hidden_heads(*self.policy.get_hidden_heads())
assert self.num_attention_heads % self.mp_size == 0,\
"To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\
"This is because the attention computation is partitioned evenly among the parallel GPUs."
self.ds_model_config = DeepSpeedInferenceConfig(
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
heads=self.num_attention_heads,
layer_norm_eps=self.layernorm_epsilon,
dtype=self.dtype,
pre_layer_norm=self.pre_layer_norm,
norm_type=self.norm_type,
mp_size=self.mp_size,
return_tuple=self.return_tuple,
triangular_masking=self.triangular_masking,
local_attention=self.local_attention,
window_size=self.window_size,
rotary_dim=self.rotary_dim,
mlp_after_attn=self.mlp_after_attn,
mlp_act_func_type=self.mlp_act_func_type,
training_mp_size=self.training_mp_size,
bigscience_bloom=self.bigscience_bloom,
max_out_tokens=self.max_out_tokens,
min_out_tokens=self.min_out_tokens,
scale_attn_by_inverse_layer_idx=self.scale_attn_by_inverse_layer_idx,
use_mup=self.use_mup,
return_single_tuple=self.return_single_tuple,
set_empty_params=self.config.set_empty_params,
transposed_mode=self.config.transposed_mode,
use_triton=self.use_triton,
triton_autotune=self.config.triton_autotune)
if self.use_triton and deepspeed.HAS_TRITON:
if not self.config.triton_autotune:
from deepspeed.ops.transformer.inference.triton.matmul_ext import fp16_matmul
fp16_matmul.skip_autotune()
return self.ds_model_config
def check_meta_tensor_support(self):
if hasattr(self.qkvw, 'is_meta'):
if self.qkvw.is_meta:
assert self.ckpt_load_enabled, "Meta tensors are not supported for this model currently."
else:
raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+")
def initialize_tensors(self, enable_training=False):
# Set the tensors from policy (user module) to container (DS module)
self.set_attention(*self.policy.attention(enable_training=enable_training))
self.set_mlp(*self.policy.mlp(enable_training=enable_training))
self.set_layernorm(*self.policy.layernorm())
self.check_meta_tensor_support()
def convert_to_required_dtype(self):
# Note: converting tensors to fp16 requires that we do it in-place using self.__dict__ and not make a list/dict copy
if self.dtype in [torch.half, torch.bfloat16]:
for k, v in self.__dict__.items():
# The list comprehension is used for MoE tensor lists
if isinstance(v, list) and all((isinstance(tensor, torch.Tensor) \
or isinstance(tensor, torch.nn.Parameter)) for tensor in v):
self.__dict__[k] = [moe_tensor.to(self.dtype) for moe_tensor in v]
if isinstance(v, torch.Tensor) or isinstance(v, torch.nn.Parameter):
self.__dict__[k] = v.to(self.dtype)
def get_rotary_dim(self):
if hasattr(self.model_config, 'rotary_dim'):
return self.model_config.rotary_dim
if hasattr(self.child, 'attention') and hasattr(self.child.attention, 'rotary_ndims'):
return self.child.attention.rotary_ndims
return -1
def set_moe(self, moe=False):
self.moe = moe
def set_tensor_parallel_config(self, mp_size, mp_group):
self.mp_size = mp_size
self.mp_group = mp_group
def set_quantization_config(self, quantizer):
self.quantizer = quantizer
def set_hidden_heads(self, hidden_size, num_attention_heads, epsilon, intermediate_size):
"""
Args:
hidden_size: embedding dimension of the model
num_attention_heads: number of attention heads in the model
epsilon: epsilon value for layer norm (same value used for all norms)
intermediate_size: Size of MLP projection. If `DEFAULT_INTERMEDIATE_SIZE` is passed
it is assumed to be `4 * hidden_size`
"""
self.hidden_size = hidden_size
if intermediate_size == DEFAULT_INTERMEDIATE_SIZE:
self.intermediate_size = 4 * hidden_size
else:
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.layernorm_epsilon = epsilon
def set_attention(self, qkvw, qkvb, dense_w, dense_b):
self.qkvw = qkvw
self.qkvb = qkvb
self.dense_w = dense_w
self.dense_b = dense_b
def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b):
self._h4h_w = _h4h_w
self._h4h_b = _h4h_b
self._4hh_w = _4hh_w
self._4hh_b = _4hh_b
def set_layernorm(self, attn_nw, attn_nb, input_nw, input_nb):
self.attn_nw = attn_nw
self.attn_nb = attn_nb
self.input_nw = input_nw
self.input_nb = input_nb
def apply_weight_quantization(self):
# quantize attention weights
self.attention_quantization()
# quantize mlp weights
self.mlp_quantization()
def attention_quantization(self):
self.module.attention.attn_qkvw = self.quantizer.quantize(self.module.attention.attn_qkvw)
self.module.attention.attn_ow = self.quantizer.quantize(self.module.attention.attn_ow)
def mlp_quantization(self):
self.module.mlp.inter_w = self.quantizer.quantize(self.module.mlp.inter_w)
self.module.mlp.output_w = self.quantizer.quantize(self.module.mlp.output_w)
def apply_tensor_parallelism(self, mp_replace):
# setup the new Attention module
self.attention_qkv_mp(mp_replace)
self.attention_o_mp(mp_replace)
# setup the new MLP module
self.mlp_inter_mp(mp_replace)
self.mlp_output_mp(mp_replace)
# Apply weight quantization
# TODO(cmikeh2): Re-enable this once verified
#self.apply_weight_quantization()
def attention_qkv_mp(self, mp_replace, reversed_dim=False):
self.module.attention.attn_qkvw = mp_replace.strided_copy(self.module.attention.attn_qkvw,
self.qkvw,
num_splits=3,
int8=reversed_dim)
self.module.attention.attn_qkvb = mp_replace.strided_copy(self.module.attention.attn_qkvb,
self.qkvb,
num_splits=3,
int8=reversed_dim)
def attention_o_mp(self, mp_replace, reversed_dim=False):
self.module.attention.attn_ow = mp_replace.copy(self.module.attention.attn_ow, self.dense_w, int8=reversed_dim)
self.module.attention.attn_ob = mp_replace.copy(self.module.attention.attn_ob,
self.dense_b,
int8=reversed_dim,
allocate_tensor=reversed_dim)
def mlp_inter_mp(self, mp_replace, reversed_dim=False):
self.module.mlp.inter_w = mp_replace.copy(self.module.mlp.inter_w, self._h4h_w, int8=reversed_dim)
self.module.mlp.inter_b = mp_replace.copy(self.module.mlp.inter_b, self._h4h_b, int8=reversed_dim)
def mlp_output_mp(self, mp_replace, reversed_dim=False):
self.module.mlp.output_w = mp_replace.copy(self.module.mlp.output_w, self._4hh_w, int8=reversed_dim)
self.module.mlp.output_b = mp_replace.copy(self.module.mlp.output_b,
self._4hh_b,
int8=reversed_dim,
allocate_tensor=reversed_dim)
def copy_data_to_new_module(self):
params = {'attn_nw': self.attn_nw, 'attn_nb': self.attn_nb}
for key in params:
if params[key] is None:
setattr(self.module.mlp, key, None)
else:
setattr(self.module.mlp, key,
torch.nn.parameter.Parameter(params[key].to(get_accelerator().current_device_name())))
params = {'norm_w': self.input_nw, 'norm_b': self.input_nb}
for key in params:
if params[key] is None:
setattr(self.module, key, None)
else:
setattr(self.module, key,
torch.nn.parameter.Parameter(params[key].to(get_accelerator().current_device_name())))
def transpose(self):
self.transpose_attention()
self.transpose_mlp()
def transpose_attention(self):
if self.attn_linear_layer:
self.qkvw = self.transpose_impl(self.qkvw.data)
self.dense_w = self.transpose_impl(self.dense_w.data)
def transpose_mlp(self):
if self.mlp_linear_layer:
self._h4h_w = self.transpose_impl(self._h4h_w.data)
self._4hh_w = self.transpose_impl(self._4hh_w.data)
def transpose_impl(self, data):
data = data.contiguous()
data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1))
data = data.reshape(data.shape[-1], data.shape[-2])
data.to(get_accelerator().current_device_name())
return data
def get_all_params(self):
params = [
self.attn_nw,
self.attn_nb,
self.input_nw,
self.input_nb,
]
params.extend(self.get_attn_params())
params.extend(self.get_mlp_params())
return params
def get_attn_params(self):
return [self.qkvw, self.qkvb, self.dense_w, self.dense_b]
def get_mlp_params(self):
return [self._h4h_w, self._h4h_b, self._4hh_w, self._4hh_b]
| 13,564 | 41.523511 | 126 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/llama.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features import HybridSplitQKVContainer, HybridGatedMLPContainer
from deepspeed.utils.types import ActivationFuncType, NormType
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import (
TransformerPolicy,
transformer_param_names,
maybe_copy,
maybe_copy_qkv,
maybe_copy_geglu,
maybe_get_lora,
)
class DS_LLAMAContainer(HybridGatedMLPContainer, HybridSplitQKVContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
_config.rotate_half = True
_config.rotate_every_two = False
_config.rotary_dim = self.hidden_size // self.num_attention_heads
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.up_proj.weight, self.policy.client_module.mlp.gate_proj.weight,
self.policy.client_module.mlp.down_proj.weight, self.policy.client_module.self_attn.q_proj.weight,
self.policy.client_module.self_attn.k_proj.weight, self.policy.client_module.self_attn.v_proj.weight,
self.policy.client_module.self_attn.o_proj.weight
]
]
def get_lora_matched_pair(self):
up_proj_lora, gate_proj_lora, down_proj_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params()
ret = [(up_proj_lora, self.inter_up_w), (gate_proj_lora, self.inter_gate_w), (down_proj_lora, self._4hh_w),
(out_lora, self.dense_w), (q_lora, self.qw), (k_lora, self.kw), (v_lora, self.vw)]
return ret
def set_q_k_v(self):
"""
Necessary to implement for `HybridSplitQKVContainer`
"""
self.qw = self.policy.client_module.self_attn.q_proj.weight
self.qb = None
self.kw = self.policy.client_module.self_attn.k_proj.weight
self.kb = None
self.vw = self.policy.client_module.self_attn.v_proj.weight
self.vb = None
def set_mlp_gate(self):
"""
Necessary to implement for `HybridGatedMLPContainer`
"""
self.inter_up_w = self.policy.client_module.mlp.up_proj.weight
self.inter_up_b = None
self.inter_gate_w = self.policy.client_module.mlp.gate_proj.weight
self.inter_gate_b = None
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'self_attn.q_proj.weight', \
'self_attn.k_proj.weight', \
'self_attn.v_proj.weight', \
'self_attn.o_proj.weight', \
'mlp.up_proj.weight', \
'mlp.gate_proj.weight', \
'mlp.down_proj.weight', \
'input_layernorm.weight', \
'post_attention_layernorm.weight'
)
maybe_copy_qkv(module.attention,
sd,
weight_quantizer,
mp_replace,
'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]],
split_qkv=self.policy.split_qkv)
for i in range(3, 4):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
maybe_copy_geglu(module.mlp, sd, weight_quantizer, mp_replace, 'inter_w',
[prefix + param_names[4], prefix + param_names[5]])
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, 'output_w', prefix + param_names[6])
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[8], prefix + param_names[7])
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[10], prefix + param_names[8])
class LLAMALayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=True):
super().__init__(
inference,
mlp_act_func_type=ActivationFuncType.GATED_SILU,
norm_type=NormType.RMSNorm,
)
self.client_module = client_module
try:
import transformers
LLAMALayerPolicy._orig_layer_class = transformers.models.llama.modeling_llama.LlamaDecoderLayer # type: ignore
except:
LLAMALayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.self_attn.q_proj.weight.shape[1], \
self.client_module.self_attn.num_heads, \
self.client_module.input_layernorm.variance_epsilon, \
self.client_module.mlp.gate_proj.weight.shape[0]
def attention(self, enable_training=False):
qw = self.client_module.self_attn.q_proj.weight
kw = self.client_module.self_attn.k_proj.weight
vw = self.client_module.self_attn.v_proj.weight
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
return qkvw, \
None, \
self.client_module.self_attn.o_proj.weight, \
None
def mlp(self, enable_training=False):
mlp1_up = self.client_module.mlp.up_proj.weight
mlp1_gate = self.client_module.mlp.gate_proj.weight
mlp2 = self.client_module.mlp.down_proj.weight
mlp1 = Parameter(torch.cat((mlp1_up, mlp1_gate), dim=0), requires_grad=enable_training)
return mlp1, None, mlp2, None
def layernorm(self):
return self.client_module.post_attention_layernorm.weight, \
None, \
self.client_module.input_layernorm.weight, \
None
| 6,180 | 38.621795 | 123 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/clip.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
class DS_CLIPContainer(BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
class HFCLIPLayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=False):
super().__init__(inference, pre_attn_norm=True, scale_attention=True)
self.client_module = client_module
self.cuda_graph_supported = True
if HFCLIPLayerPolicy._orig_layer_class is None:
try:
import transformers
HFCLIPLayerPolicy._orig_layer_class = transformers.models.clip.modeling_clip.CLIPEncoderLayer
except:
HFCLIPLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.self_attn.q_proj.weight.shape[1], \
self.client_module.self_attn.num_heads, \
self.client_module.layer_norm1.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.self_attn.q_proj.weight
qb = self.client_module.self_attn.q_proj.bias
kw = self.client_module.self_attn.k_proj.weight
kb = self.client_module.self_attn.k_proj.bias
vw = self.client_module.self_attn.v_proj.weight
vb = self.client_module.self_attn.v_proj.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
return qkvw, \
qkvb, \
self.client_module.self_attn.out_proj.weight, \
self.client_module.self_attn.out_proj.bias
def mlp(self, enable_training=False):
return self.client_module.mlp.fc1.weight, \
self.client_module.mlp.fc1.bias, \
self.client_module.mlp.fc2.weight, \
self.client_module.mlp.fc2.bias
def layernorm(self):
return self.client_module.layer_norm2.weight, \
self.client_module.layer_norm2.bias, \
self.client_module.layer_norm1.weight, \
self.client_module.layer_norm1.bias
| 2,822 | 37.148649 | 109 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/vae.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from ..policy import DSPolicy
from ...model_implementations.diffusers.vae import DSVAE
class VAEPolicy(DSPolicy):
def __init__(self):
super().__init__()
try:
import diffusers
if hasattr(diffusers.models.vae, "AutoencoderKL"):
self._orig_layer_class = diffusers.models.vae.AutoencoderKL
else:
# Diffusers >= 0.12.0 changes location of AutoencoderKL
self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL
except ImportError:
self._orig_layer_class = None
def match(self, module):
return isinstance(module, self._orig_layer_class)
def match_replaced(self, module):
return isinstance(module, DSVAE)
def apply(self, module, enable_cuda_graph=True):
# TODO(cmikeh2): Enable cuda graph should be an inference configuration
return DSVAE(module, enable_cuda_graph=enable_cuda_graph)
# NOTE (lekurile): Should we have a diffusers policy class?
def attention(self):
pass
| 1,163 | 30.459459 | 86 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/opt.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features import MetaTensorContainer, HybridSplitQKVContainer
from deepspeed.model_implementations.transformers.ds_opt import DeepSpeedOPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from ..policy import maybe_copy_qkv
from ..policy import maybe_get_lora
from deepspeed.utils.types import ActivationFuncType
class DS_OPTContainer(MetaTensorContainer, HybridSplitQKVContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedOPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.fc1,
self.policy.client_module.fc2,
self.policy.client_module.self_attn.q_proj,
self.policy.client_module.self_attn.k_proj,
self.policy.client_module.self_attn.v_proj,
self.policy.client_module.self_attn.out_proj,
]
]
def set_q_k_v(self):
"""
Necessary to implement for `HybridSplitQKVContainer`
"""
self.qw = self.policy.client_module.self_attn.q_proj.weight
self.qb = self.policy.client_module.self_attn.q_proj.bias
self.kw = self.policy.client_module.self_attn.k_proj.weight
self.kb = self.policy.client_module.self_attn.k_proj.bias
self.vw = self.policy.client_module.self_attn.v_proj.weight
self.vb = self.policy.client_module.self_attn.v_proj.bias
def get_lora_matched_pair(self):
fc1_lora, fc2_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params()
ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (out_lora, self.dense_w), (q_lora, self.qw),
(k_lora, self.kw), (v_lora, self.vw)]
return ret
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'self_attn.q_proj.weight', \
'self_attn.k_proj.weight', \
'self_attn.v_proj.weight', \
'self_attn.q_proj.bias', \
'self_attn.k_proj.bias', \
'self_attn.v_proj.bias', \
'self_attn.out_proj.weight', \
'self_attn.out_proj.bias', \
'fc1.weight', \
'fc1.bias', \
'fc2.weight', \
'fc2.bias', \
'final_layer_norm.weight', \
'final_layer_norm.bias', \
'self_attn_layer_norm.weight', \
'self_attn_layer_norm.bias'
)
for i in range(0, 6, 3):
maybe_copy_qkv(module.attention,
sd,
weight_quantizer,
mp_replace,
transformer_param_names[i // 3],
[prefix + param_names[i], prefix + param_names[i + 1], prefix + param_names[i + 2]],
split_qkv=self.policy.split_qkv)
for i in range(6, 8):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 4],
prefix + param_names[i])
for i in range(8, 14):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i - 4],
prefix + param_names[i])
for i in range(14, 16):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i - 4],
prefix + param_names[i])
class HFOPTLayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=True, use_load_prefix=True):
super().__init__(inference, linear_layer=True, pre_attn_norm=True, use_load_prefix=use_load_prefix)
self.client_module = client_module
try:
import transformers
HFOPTLayerPolicy._orig_layer_class = transformers.models.opt.modeling_opt.OPTDecoderLayer
except:
HFOPTLayerPolicy._orig_layer_class = None
if hasattr(TransformerPolicy, "hf_model_config") and hasattr(TransformerPolicy.hf_model_config,
"activation_function"):
if TransformerPolicy.hf_model_config.activation_function == "relu":
self.mlp_act_func_type = ActivationFuncType.ReLU
elif TransformerPolicy.hf_model_config.activation_function in ["gelu", "gelu_new"]:
self.mlp_act_func_type = ActivationFuncType.GELU
else:
raise ValueError("Unsupported activation function: {}".format(
TransformerPolicy.hf_model_config.activation_function))
else:
self.mlp_act_func_type = ActivationFuncType.ReLU # default
def get_hidden_heads(self):
return self.client_module.self_attn.embed_dim, \
self.client_module.self_attn.num_heads, \
self.client_module.self_attn_layer_norm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.self_attn.q_proj.weight
qb = self.client_module.self_attn.q_proj.bias
kw = self.client_module.self_attn.k_proj.weight
kb = self.client_module.self_attn.k_proj.bias
vw = self.client_module.self_attn.v_proj.weight
vb = self.client_module.self_attn.v_proj.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
return qkvw, \
qkvb, \
self.client_module.self_attn.out_proj.weight, \
self.client_module.self_attn.out_proj.bias
def mlp(self, enable_training=False):
return self.client_module.fc1.weight, \
self.client_module.fc1.bias, \
self.client_module.fc2.weight, \
self.client_module.fc2.bias
def layernorm(self):
return self.client_module.final_layer_norm.weight, \
self.client_module.final_layer_norm.bias, \
self.client_module.self_attn_layer_norm.weight, \
self.client_module.self_attn_layer_norm.bias
| 6,905 | 41.89441 | 111 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/megatron_gpt.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.megatron import MegatronContainer
from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference
import torch
from ..policy import TransformerPolicy
from packaging import version as pkg_version
class DS_MegatronGPTContainer(MegatronContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedMegatronGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
if self.megatron_v2:
self.module.config.rotate_half = True
self.module.config.rotate_every_two = False
return self.module
# TODO: Megatron GPT MoE inherits from Megatron policy and replaces mlp
# TODO: Generalize MoE overall goal, expand beyond Megatron
class MegatronLayerPolicy(TransformerPolicy):
_orig_layer_class = None
version = 0
moe_type = 'standard'
megatron_v2 = True
use_mup = False
def __init__(self, client_module, inference=True):
super().__init__(inference, megatron_v2=MegatronLayerPolicy.megatron_v2, use_mup=MegatronLayerPolicy.use_mup)
self.client_module = client_module
# we use megatron version to differentiate between the old and new
# megatron-lm source code
if MegatronLayerPolicy._orig_layer_class is None:
if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"):
MegatronLayerPolicy._orig_layer_class = None
else:
try:
from megatron.model.transformer import ParallelTransformerLayer
MegatronLayerPolicy._orig_layer_class = ParallelTransformerLayer
except ImportError:
MegatronLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attention.query_key_value.weight.shape[1], \
self.client_module.attention.num_attention_heads, \
self.client_module.input_layernorm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
if self.inference:
if MegatronLayerPolicy.version == 0:
attention = self.client_module.attention
else:
attention = self.client_module.self_attention
return attention.query_key_value.weight, \
attention.query_key_value.bias, \
attention.dense.weight, \
attention.dense.bias
def mlp(self, moe_type='standard', enable_training=False):
from deepspeed.moe.utils import has_moe_layers
moe, _ = has_moe_layers(self.client_module)
if moe:
moe_experts = self.client_module.mlp.deepspeed_moe.experts.deepspeed_experts if moe_type == 'standard' else \
self.client_module.mlp.moe.deepspeed_moe.experts.deepspeed_experts
num_experts = len(moe_experts)
if moe_type == 'standard':
return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \
[moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)]
else:
return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \
[moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)], \
self.client_module.mlp.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.mlp.dense_4h_to_h.bias, \
self.client_module.mlp.coefficient.weight
else:
return self.client_module.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.dense_4h_to_h.bias
def layernorm(self):
return self.client_module.post_attention_layernorm.weight, \
self.client_module.post_attention_layernorm.bias, \
self.client_module.input_layernorm.weight, \
self.client_module.input_layernorm.bias
| 5,017 | 44.207207 | 121 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/gptneox.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.meta_tensor import MetaTensorContainer
from .features.hybrid_megatron import HybridMegatronContainer
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from packaging import version as pkg_version
from ..policy import maybe_get_lora
class DS_GPTNEOXContainer(MetaTensorContainer, HybridMegatronContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
if self.megatron_v2:
self.module.config.rotate_half = True
self.module.config.rotate_every_two = False
return self.module
def get_lora_matched_pair(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
fc1_lora, fc2_lora, qkv_lora, out_lora = self.get_lora_params()
ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (qkv_lora, self.qkvw), (out_lora, self.dense_w)]
return ret
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
if GPTNEOXLayerPolicy.version == 0:
attention = self.policy.client_module.attention
else:
attention = self.policy.client_module.self_attention
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.dense_h_to_4h, self.policy.client_module.mlp.dense_4h_to_h,
attention.query_key_value, attention.dense
]
]
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'attention.query_key_value.weight', \
'attention.query_key_value.bias', \
'attention.dense.weight', \
'attention.dense.bias', \
'mlp.dense_h_to_4h.weight', \
'mlp.dense_h_to_4h.bias', \
'mlp.dense_4h_to_h.weight', \
'mlp.dense_4h_to_h.bias', \
'post_attention_layernorm.weight', \
'post_attention_layernorm.bias', \
'input_layernorm.weight', \
'input_layernorm.bias'
)
for i in range(0, 2):
maybe_copy(module.attention,
sd,
weight_quantizer,
mp_replace,
transformer_param_names[i],
prefix + param_names[i],
qkv=True,
megatron_v2=self.policy.is_megatron_v2,
split_qkv=self.policy.split_qkv,
heads=self.policy.client_module.attention.num_attention_heads)
for i in range(2, 4):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(4, 10):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(10, 12):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i], prefix + param_names[i])
class GPTNEOXLayerPolicy(TransformerPolicy):
_orig_layer_class = None
version = 0
def __init__(self, client_module, inference=True, megatron_v2=True, split_qkv=False):
super().__init__(inference, megatron_v2=megatron_v2, split_qkv=split_qkv)
self.client_module = client_module
if GPTNEOXLayerPolicy._orig_layer_class is None:
if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"):
GPTNEOXLayerPolicy._orig_layer_class = None
else:
try:
from transformers import GPTNeoXLayer
GPTNEOXLayerPolicy._orig_layer_class = GPTNeoXLayer
except ImportError:
GPTNEOXLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
if GPTNEOXLayerPolicy.version == 0:
attention = self.client_module.attention
else:
attention = self.client_module.self_attention
return self.client_module.attention.hidden_size, \
self.client_module.attention.num_attention_heads, \
self.client_module.input_layernorm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
if GPTNEOXLayerPolicy.version == 0:
attention = self.client_module.attention
else:
attention = self.client_module.self_attention
return attention.query_key_value.weight, \
attention.query_key_value.bias, \
attention.dense.weight, \
attention.dense.bias
def mlp(self, enable_training=False):
return self.client_module.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.dense_4h_to_h.bias
def layernorm(self):
return self.client_module.post_attention_layernorm.weight, \
self.client_module.post_attention_layernorm.bias, \
self.client_module.input_layernorm.weight, \
self.client_module.input_layernorm.bias
| 5,897 | 39.122449 | 117 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/gptneo.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.meta_tensor import MetaTensorContainer
from .features.split_qkv import HybridSplitQKVContainer
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from ..policy import maybe_copy_qkv
from ..policy import maybe_get_lora
class DS_GPTNEOContainer(MetaTensorContainer, HybridSplitQKVContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.c_fc, self.policy.client_module.mlp.c_proj,
self.policy.client_module.attn.attention.q_proj, self.policy.client_module.attn.attention.k_proj,
self.policy.client_module.attn.attention.v_proj, self.policy.client_module.attn.attention.out_proj
]
]
def set_q_k_v(self):
"""
Necessary to implement for `HybridSplitQKVContainer`
"""
self.qw = self.policy.client_module.attn.attention.q_proj.weight
self.qb = None
self.kw = self.policy.client_module.attn.attention.k_proj.weight
self.kb = None
self.vw = self.policy.client_module.attn.attention.v_proj.weight
self.vb = None
def get_lora_matched_pair(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
fc1_lora, fc2_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params()
ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (out_lora, self.dense_w), (q_lora, self.qw),
(k_lora, self.kw), (v_lora, self.vw)]
return ret
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'attn.attention.q_proj.weight', \
'attn.attention.k_proj.weight', \
'attn.attention.v_proj.weight', \
'attn.attention.out_proj.weight', \
'attn.attention.out_proj.bias', \
'mlp.c_fc.weight', \
'mlp.c_fc.bias', \
'mlp.c_proj.weight', \
'mlp.c_proj.bias', \
'ln_2.weight', \
'ln_2.bias', \
'ln_1.weight', \
'ln_1.bias'
)
maybe_copy_qkv(module.attention,
sd,
weight_quantizer,
mp_replace,
'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]],
split_qkv=self.policy.split_qkv)
for i in range(3, 5):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
for i in range(5, 11):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
for i in range(11, 13):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
class HFGPTNEOLayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=True):
super().__init__(inference, scale_attention=False)
self.client_module = client_module
try:
import transformers
HFGPTNEOLayerPolicy._orig_layer_class = transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoBlock
except:
HFGPTNEOLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attn.attention.embed_dim, \
self.client_module.attn.attention.num_heads, \
self.client_module.ln_1.eps, \
DEFAULT_INTERMEDIATE_SIZE
def get_q_k_v(self):
return self.client_module.attn.attention.q_proj.weight, \
None, \
self.client_module.attn.attention.k_proj.weight, \
None, \
self.client_module.attn.attention.v_proj.weight, \
None
def attention(self, enable_training=False):
qw = self.client_module.attn.attention.q_proj.weight
kw = self.client_module.attn.attention.k_proj.weight
vw = self.client_module.attn.attention.v_proj.weight
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
return qkvw, \
None, \
self.client_module.attn.attention.out_proj.weight, \
self.client_module.attn.attention.out_proj.bias
def mlp(self, enable_training=False):
return self.client_module.mlp.c_fc.weight, \
self.client_module.mlp.c_fc.bias, \
self.client_module.mlp.c_proj.weight, \
self.client_module.mlp.c_proj.bias
def layernorm(self):
return self.client_module.ln_2.weight, \
self.client_module.ln_2.bias, \
self.client_module.ln_1.weight, \
self.client_module.ln_1.bias
| 5,790 | 38.664384 | 114 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/unet.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from torch.nn.parameter import Parameter
from ..policy import DSPolicy
from ...model_implementations.diffusers.unet import DSUNet
class UNetPolicy(DSPolicy):
def __init__(self):
super().__init__()
try:
import diffusers
self._orig_layer_class = diffusers.models.unet_2d_condition.UNet2DConditionModel
except ImportError:
self._orig_layer_class = None
def match(self, module):
return isinstance(module, self._orig_layer_class)
def match_replaced(self, module):
return isinstance(module, DSUNet)
def apply(self, module, enable_cuda_graph=True):
# TODO(cmikeh2): Enable cuda graph should be an inference configuration
return DSUNet(module, enable_cuda_graph=enable_cuda_graph)
def attention(self, client_module):
qw = client_module.to_q.weight
kw = client_module.to_k.weight
vw = client_module.to_v.weight
if qw.shape[1] == kw.shape[1]:
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False)
return qkvw, \
client_module.to_out[0].weight, \
client_module.to_out[0].bias, \
qw.shape[-1], \
client_module.heads
else:
#return None
#kvw = Parameter(torch.cat((kw, vw), dim=0), requires_grad=False)
return qw, \
kw, vw, \
client_module.to_out[0].weight, \
client_module.to_out[0].bias, \
qw.shape[-1], \
client_module.heads
| 1,732 | 30.509091 | 92 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/gptj.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.meta_tensor import MetaTensorContainer
from .features.split_qkv import HybridSplitQKVContainer
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from ..policy import maybe_copy_qkv
from ..policy import maybe_get_lora
class DS_GPTJContainer(MetaTensorContainer, HybridSplitQKVContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.fc_in, self.policy.client_module.mlp.fc_out,
self.policy.client_module.attn.q_proj, self.policy.client_module.attn.k_proj,
self.policy.client_module.attn.v_proj, self.policy.client_module.attn.out_proj
]
]
def get_lora_matched_pair(self):
fc1_lora, fc2_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params()
ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (out_lora, self.dense_w), (q_lora, self.qw),
(k_lora, self.kw), (v_lora, self.vw)]
return ret
def set_q_k_v(self):
"""
Necessary to implement for `HybridSplitQKVContainer`
"""
self.qw = self.policy.client_module.attn.q_proj.weight
self.qb = None
self.kw = self.policy.client_module.attn.k_proj.weight
self.kb = None
self.vw = self.policy.client_module.attn.v_proj.weight
self.vb = None
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'attn.q_proj.weight', \
'attn.k_proj.weight', \
'attn.v_proj.weight', \
'attn.out_proj.weight', \
'mlp.fc_in.weight', \
'mlp.fc_in.bias', \
'mlp.fc_out.weight', \
'mlp.fc_out.bias', \
'ln_1.weight', \
'ln_1.bias'
)
maybe_copy_qkv(module.attention,
sd,
weight_quantizer,
mp_replace,
'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]],
split_qkv=self.policy.split_qkv)
for i in range(3, 4):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
for i in range(4, 8):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(8, 10):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i + 2],
prefix + param_names[i])
class HFGPTJLayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=True):
super().__init__(inference, scale_attention=True)
self.client_module = client_module
try:
import transformers
HFGPTJLayerPolicy._orig_layer_class = transformers.models.gptj.modeling_gptj.GPTJBlock
except:
HFGPTJLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attn.embed_dim, \
self.client_module.attn.num_attention_heads, \
self.client_module.ln_1.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.attn.q_proj.weight
kw = self.client_module.attn.k_proj.weight
vw = self.client_module.attn.v_proj.weight
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
return qkvw, \
None, \
self.client_module.attn.out_proj.weight, \
None,
def mlp(self, enable_training=False):
return self.client_module.mlp.fc_in.weight, \
self.client_module.mlp.fc_in.bias, \
self.client_module.mlp.fc_out.weight, \
self.client_module.mlp.fc_out.bias
def layernorm(self):
return None, \
None, \
self.client_module.ln_1.weight, \
self.client_module.ln_1.bias
| 5,074 | 37.157895 | 112 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/megatron_gpt_moe.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .base_moe import *
from .features.megatron import MegatronContainer
from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference
import torch
from .megatron_gpt import MegatronLayerPolicy
from packaging import version as pkg_version
class DS_MegatronGPTMoEContainer(MegatronContainer, BaseTransformerMoEContainer):
def __init__(self, policy, config, model_config, layer_id):
super().__init__(policy, config, model_config, layer_id)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedMegatronGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
if self.megatron_v2:
self.module.config.rotate_half = True
self.module.config.rotate_every_two = False
return self.module
# TODO: Megatron GPT MoE inherits from Megatron policy and replaces mlp
# TODO: Generalize MoE overall goal, expand beyond Megatron
class MegatronMoELayerPolicy(MegatronLayerPolicy):
_orig_layer_class = None
version = 0
moe_type = 'standard'
num_experts = 1
def __init__(self, client_module, inference=True):
super().__init__(inference)
self.client_module = client_module
# we use megatron version to differentiate between the old and new
# megatron-lm source code
if MegatronMoELayerPolicy._orig_layer_class is None:
if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"):
MegatronMoELayerPolicy._orig_layer_class = None
else:
try:
from megatron.model.transformer import ParallelTransformerLayer
MegatronMoELayerPolicy._orig_layer_class = ParallelTransformerLayer
except ImportError:
MegatronMoELayerPolicy._orig_layer_class = None
def get_num_experts(self):
return self.num_experts
def mlp(self, moe_type='standard', enable_training=False):
# for now, all of this is tightly coupled to megatron-deepspeed moe implementation
# todo: think and refactor this to be more general
#from deepspeed.moe.utils import has_moe_layers
#moe, _ = has_moe_layers(self.client_module)
moe_experts = self.client_module.mlp.deepspeed_moe.experts.deepspeed_experts if moe_type == 'standard' else \
self.client_module.mlp.moe.deepspeed_moe.experts.deepspeed_experts
num_experts = len(moe_experts)
self.num_experts = num_experts
if moe_type == 'standard':
return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \
[moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)]
else:
return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \
[moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)], \
self.client_module.mlp.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.mlp.dense_4h_to_h.bias, \
self.client_module.mlp.coefficient.weight
| 3,936 | 44.252874 | 117 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/gpt2.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
from ..policy import TransformerPolicy
class DS_GPT2Container(BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
class HFGPT2LayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=True):
# HuggingFace GPT2 uses convolutional layer instead of linear layer
super().__init__(inference, linear_layer=False)
self.client_module = client_module
try:
import transformers
HFGPT2LayerPolicy._orig_layer_class = transformers.models.gpt2.modeling_gpt2.GPT2Block
except:
HFGPT2LayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attn.embed_dim, \
self.client_module.attn.num_heads, \
self.client_module.ln_1.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
return self.client_module.attn.c_attn.weight, \
self.client_module.attn.c_attn.bias, \
self.client_module.attn.c_proj.weight, \
self.client_module.attn.c_proj.bias
def mlp(self, enable_training=False):
return self.client_module.mlp.c_fc.weight, \
self.client_module.mlp.c_fc.bias, \
self.client_module.mlp.c_proj.weight, \
self.client_module.mlp.c_proj.bias
def layernorm(self):
return self.client_module.ln_2.weight, \
self.client_module.ln_2.bias, \
self.client_module.ln_1.weight, \
self.client_module.ln_1.bias
| 2,221 | 35.42623 | 98 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .bert import DS_BERTContainer, HFBertLayerPolicy
from .bloom import DS_BloomContainer, BLOOMLayerPolicy, supported_models
from .distil_bert import DS_DistilBERTContainer, HFDistilBertLayerPolicy
from .gpt2 import DS_GPT2Container, HFGPT2LayerPolicy
from .gptj import DS_GPTJContainer, HFGPTJLayerPolicy
from .gptneo import DS_GPTNEOContainer, HFGPTNEOLayerPolicy
from .gptneox import DS_GPTNEOXContainer, GPTNEOXLayerPolicy
from .llama import DS_LLAMAContainer, LLAMALayerPolicy
from .megatron_gpt import DS_MegatronGPTContainer, MegatronLayerPolicy
from .megatron_gpt_moe import DS_MegatronGPTMoEContainer, MegatronMoELayerPolicy
from .opt import DS_OPTContainer, HFOPTLayerPolicy
from .clip import DS_CLIPContainer, HFCLIPLayerPolicy
from .unet import UNetPolicy
from .vae import VAEPolicy
| 893 | 43.7 | 80 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/base_moe.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Create a container object to save model-specific tensors using the policy file above.
from .base import *
from deepspeed import comm as dist
import deepspeed.ops.transformer as transformer_inference
from deepspeed.accelerator import get_accelerator
class BaseTransformerMoEContainer(BaseTransformerContainer):
def __init__(self, **kwargs):
# Call the init function of the parent class to initialize the tensors and configs from parent class
super().__init__(**kwargs)
self.num_experts = self.policy.get_num_experts()
self.ep_world_size = dist.get_world_size()
self.local_ep_size = 1 if self.num_experts < self.ep_world_size else self.num_experts // self.ep_world_size
self.layer_norm_eps = self.config.layer_norm_eps if hasattr(self.config, 'layer_norm_eps') else 1e-12,
# MoE models will have a list of mlp related tensors
self._h4h_w = []
self._h4h_b = []
self._4hh_w = []
self._4hh_b = []
# Residual MoE needs extra parameters
self._res_h4h_w = None
self._res_h4h_b = None
self._res_4hh_w = None
self._res_4hh_b = None
self._res_coef = None
def create_ds_model_config(self):
self.set_hidden_heads(*self.policy.get_hidden_heads())
assert self.num_attention_heads % self.mp_size == 0,\
"To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\
"This is because the attention computation is partitioned evenly among the parallel GPUs."
self.ds_model_config = transformer_inference.DeepSpeedMoEInferenceConfig(
hidden_size=self.hidden_size,
heads=self.num_attention_heads,
layer_norm_eps=self.layer_norm_eps,
fp16=self.fp16,
pre_layer_norm=self.pre_layer_norm,
mp_size=self.mp_size,
q_int8=self.quantize,
moe_experts=self.local_ep_size,
global_experts=self.num_experts,
mlp_type=self.config.moe.type,
scale_attn_by_inverse_layer_idx=self.scale_attn_by_inverse_layer_idx,
)
return self.ds_model_config
def initialize_tensors(self):
# Set the tensors from policy (user module) to container (DS module)
self.set_attention(*self.policy.attention())
self.set_mlp(self.config.moe.type)
self.set_layernorm(*self.policy.layernorm())
def set_mlp(self, config_moe_type):
if config_moe_type == 'standard':
self._h4h_w, self._h4h_b, \
self._4hh_w, self._4hh_b = self.policy.mlp()
else:
self._h4h_w, self._h4h_b, self._4hh_w, \
self._4hh_b, self._res_h4h_w, self._res_h4h_b, \
self._res_4hh_w, self._res_4hh_b, \
self._res_coef = self.policy.mlp(config_moe_type)
def transpose(self):
self.transpose_attention()
self.transpose_mlp()
if self.config.moe.type == 'residual':
self.transpose_residual()
def transpose_mlp(self):
self._h4h_w = [self.transpose_impl(moe_w1.data) for moe_w1 in self._h4h_w]
self._4hh_w = [self.transpose_impl(moe_w1.data) for moe_w1 in self._4hh_w]
def transpose_residual(self):
self._res_h4h_w.data = self.transpose_impl(self._res_h4h_w.data)
self._res_4hh_w.data = self.transpose_impl(self._res_4hh_w.data)
self._res_coef.data = self.transpose_impl(self._res_coef.data)
def apply_tensor_parallelism(self, mp_replace):
# setup the new Attention module
self.attention_qkv_mp(mp_replace)
self.attention_o_mp(mp_replace)
# quantize attention weights
self.attention_quantization()
# setup the new MLP module
self.mlp_mp()
def mlp_mp(self):
gpu_index = dist.get_rank()
for ep_index in range(self.local_ep_size):
# mlp inter
self.module.mlp[ep_index].inter_w.data = self._h4h_w[gpu_index * self.local_ep_size + ep_index].to(
get_accelerator().current_device_name())
self.module.mlp[ep_index].inter_b.data = self._h4h_b[gpu_index * self.local_ep_size + ep_index].to(
get_accelerator().current_device_name())
# mlp output
self.module.mlp[ep_index].output_w.data = self._4hh_w[gpu_index * self.local_ep_size + ep_index].to(
get_accelerator().current_device_name())
self.module.mlp[ep_index].output_b.data = self._4hh_b[gpu_index * self.local_ep_size + ep_index].to(
get_accelerator().current_device_name())
def copy_data_to_new_module(self):
self.module.attn_nw.data = self.attn_nw.to(get_accelerator().current_device_name())
self.module.attn_nb.data = self.attn_nb.to(get_accelerator().current_device_name())
self.module.norm_w.data.copy_(self.input_nw.to(get_accelerator().current_device_name()))
self.module.norm_b.data.copy_(self.input_nb.to(get_accelerator().current_device_name()))
if self.config.moe.type == 'residual':
self.module.res_mlp.inter_w.data = self._res_h4h_w.to(get_accelerator().current_device_name())
self.module.res_mlp.inter_b.data = self._res_h4h_b.to(get_accelerator().current_device_name())
self.module.res_mlp.output_w.data = self._res_4hh_w.to(get_accelerator().current_device_name())
self.module.res_mlp.output_b.data = self._res_4hh_b.to(get_accelerator().current_device_name())
self.module.res_coef.data = self._res_coef.to(get_accelerator().current_device_name())
| 5,756 | 42.946565 | 126 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/bloom.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.meta_tensor import MetaTensorContainer
from .features.hybrid_engine import HybridEngineContainer
from deepspeed.model_implementations.transformers.ds_bloom import DeepSpeedBloomInference
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from ..policy import maybe_get_lora
supported_models = {None}
class DS_BloomContainer(MetaTensorContainer, HybridEngineContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
self.bigscience_bloom = True
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedBloomInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
def attention_qkv_mp(self, mp_replace, reversed_dim=False):
self.module.attention.attn_qkvw = mp_replace.copy(self.module.attention.attn_qkvw, self.qkvw)
self.module.attention.attn_qkvb = mp_replace.copy(self.module.attention.attn_qkvb, self.qkvb)
def get_lora_matched_pair(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
fc1_lora, fc2_lora, qkv_lora, out_lora = self.get_lora_params()
ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (qkv_lora, self.qkvw), (out_lora, self.dense_w)]
return ret
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.dense_h_to_4h, self.policy.client_module.mlp.dense_4h_to_h, self.policy.
client_module.self_attention.query_key_value, self.policy.client_module.self_attention.dense
]
]
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'self_attention.query_key_value.weight', \
'self_attention.query_key_value.bias', \
'self_attention.dense.weight', \
'self_attention.dense.bias', \
'mlp.dense_h_to_4h.weight', \
'mlp.dense_h_to_4h.bias', \
'mlp.dense_4h_to_h.weight', \
'mlp.dense_4h_to_h.bias', \
'post_attention_layernorm.weight', \
'post_attention_layernorm.bias', \
'input_layernorm.weight', \
'input_layernorm.bias'
)
for i in range(0, 2):
maybe_copy(module.attention,
sd,
weight_quantizer,
mp_replace,
transformer_param_names[i],
prefix + param_names[i],
qkv=True,
megatron_v2=self.policy.is_megatron_v2,
split_qkv=self.policy.split_qkv)
for i in range(2, 4):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(4, 10):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(10, 12):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i], prefix + param_names[i])
class BLOOMLayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=True, use_load_prefix=True, split_qkv=False):
super().__init__(inference, linear_layer=True, use_load_prefix=use_load_prefix, split_qkv=split_qkv)
self.client_module = client_module
try:
import transformers
BLOOMLayerPolicy._orig_layer_class = transformers.models.bloom.modeling_bloom.BloomBlock
global supported_models
supported_models.update({transformers.models.bloom.modeling_bloom.BloomModel})
except Exception as e:
print(f"WARNING! Setting BLOOMLayerPolicy._orig_layer_class to None due to Exception: {e}")
BLOOMLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.self_attention.hidden_size, \
self.client_module.self_attention.num_heads, \
self.client_module.input_layernorm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
return self.client_module.self_attention.query_key_value.weight, \
self.client_module.self_attention.query_key_value.bias, \
self.client_module.self_attention.dense.weight, \
self.client_module.self_attention.dense.bias,
def mlp(self, enable_training=False):
return self.client_module.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.dense_4h_to_h.bias
def layernorm(self):
return self.client_module.post_attention_layernorm.weight, \
self.client_module.post_attention_layernorm.bias, \
self.client_module.input_layernorm.weight, \
self.client_module.input_layernorm.bias
| 5,615 | 42.2 | 118 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/features/hybrid_megatron.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from .hybrid_engine import HybridEngineContainer
from .megatron import MegatronContainer
class HybridMegatronContainer(MegatronContainer, HybridEngineContainer):
def _align_qkv(self, x: torch.Tensor):
"""
Internal helper for accepting the head-contiguous weight matrix and chunking
the query, key, and value components.
"""
attention_head_size = x.shape[0] // self.num_attention_heads
new_x_shape = (self.num_attention_heads, attention_head_size) + x.size()[1:]
x_1 = x.view(*new_x_shape)
div_dim = len(x_1.size()) - 2 if len(x.shape) == 2 else -1
(q, k, v) = torch.split(x_1, (x_1.shape[div_dim] // 3), dim=div_dim)
if len(q.shape) > 2:
x.data.copy_(
torch.cat((q.reshape(-1, q.shape[-1]), k.reshape(-1, q.shape[-1]), v.reshape(-1, q.shape[-1])),
dim=0).reshape(x.shape))
else:
x.data.copy_(torch.cat((q.reshape(-1), k.reshape(-1), v.reshape(-1)), dim=-1).reshape(x.shape))
def transform_for_inference(self) -> None:
"""
Overrides the HybridEngineContainer implementation.
The alternative layout of the QKV matrix for Megatron is such that each head's Q, K, and V
are sequential in memory. This is different from the default layout in which all of the Qs
are sequential, followed by all of the Ks, and then all of the Vs. Here, we take the default
layout and transform it to the inference layout.
"""
if hasattr(self.qkvw, 'ds_id'):
from deepspeed.runtime.zero import GatheredParameters
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
param_list = [self.qkvw, self.qkvb]
non_active_params = [param for param in param_list if (hasattr(param, 'ds_id') and \
param.ds_status == ZeroParamStatus.NOT_AVAILABLE)]
with GatheredParameters(non_active_params):
self._align_qkv(self.qkvw)
self._align_qkv(self.qkvb)
else:
self._align_qkv(self.qkvw)
self._align_qkv(self.qkvb)
def _partition_qkv(self, x: torch.Tensor):
"""
Internal helper for taking contiguous QKV and partitioning it for contiguous
heads.
"""
q_k_v = torch.split(x, (x.shape[0] // 3), dim=0)
attention_head_size = q_k_v[0].shape[0] // self.num_attention_heads
new_x_shape = (self.num_attention_heads, attention_head_size) + x.size()[1:]
q, k, v = [data.view(*new_x_shape) for data in q_k_v]
if len(q.shape) > 2:
x.data.copy_(torch.cat((q, k, v), dim=-2).reshape(-1, q.shape[-1]))
else:
x.data.copy_(torch.cat((q, k, v), dim=-1).reshape(-1))
def transform_for_training(self):
"""
Overrides the HybridEngineContainer implementation.
The alternative layout of the QKV matrix for Megatron is such that each head's Q, K, and V
are sequential in memory. This is different from the default layout in which all of the Qs
are sequential, followed by all of the Ks, and then all of the Vs. This function takes the inference format and reverts it back to the default format.
"""
# If parameter is distributed, handle gathering it
if hasattr(self.qkvw, 'ds_id'):
from deepspeed.runtime.zero import GatheredParameters
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
param_list = [self.qkvw, self.qkvb]
non_active_params = [param for param in param_list if (hasattr(param, 'ds_id') and \
param.ds_status == ZeroParamStatus.NOT_AVAILABLE)]
with GatheredParameters(non_active_params):
self._partition_qkv(self.qkvw)
self._partition_qkv(self.qkvb)
else:
self._partition_qkv(self.qkvw)
self._partition_qkv(self.qkvb)
| 4,129 | 45.931818 | 158 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/features/meta_tensor.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import ABC, abstractmethod
from packaging import version as pkg_version
import torch
class MetaTensorContainer(ABC):
"""
NOTE: If you are using this feature with a container that
also inherits from `HybridEngineContainer`, ensure that `MetaTensorContainer`
is inherited before `HybridEngineContainer` in the class definition.
"""
def __init__(self, **kwargs):
if pkg_version.parse('1.10') > pkg_version.parse(torch.__version__):
raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+")
super().__init__(**kwargs)
self.is_meta = False
self.ckpt_load_enabled = True
def initialize_tensors(self, enable_training=False):
super().initialize_tensors(enable_training=enable_training)
self.is_meta = self.qkvw.is_meta
def apply_tensor_parallelism(self, mp_replace, **kwargs):
if self.is_meta:
if self.qkvb is None:
self.module.attention.attn_qkvb = None
if self.dense_b is None:
self.module.attention.attn_ob = None
else:
super().apply_tensor_parallelism(mp_replace, **kwargs)
def copy_data_to_new_module(self):
if self.is_meta:
if self.attn_nw is None:
self.module.mlp.attn_nw = self.attn_nw
self.module.mlp.attn_nb = self.attn_nb
else:
super().copy_data_to_new_module()
def transpose(self):
if not self.is_meta:
super().transpose()
@abstractmethod
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
"""
Load all the transformer parameter from the checkpoint file (sd).
In addition to the parameter names, we require two
more parameters to help read the the data correctly
from the checkpoint and split the qkv heads in the
right order:
1. `use_load_prefix` (Default: False): this specifies
whether we need to use the name of first abstraction
layer of the model for searching the parameter's name
in a checkpoint file. For more information of how this
is used please see
https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/load_checkpoint.py
2. `split_qkv` (Default: True): we use this flag when splitting
the qkv parameter into heads. If it is False, it means the heads
of q, k, and v are stored together and needs to split in the
DeepSpeed-Inference API.
"""
raise NotImplementedError("A load_params() function must be defined in the model container \
when inheriting the MetaTensorContainer feature")
| 2,930 | 40.28169 | 109 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/features/split_qkv.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import abstractmethod
import torch
from .hybrid_engine import HybridEngineContainer
class HybridSplitQKVContainer(HybridEngineContainer):
def set_attention(self, qkvw, qkvb, dense_w, dense_b):
super().set_attention(qkvw, qkvb, dense_w, dense_b)
self.set_q_k_v()
@abstractmethod
def set_q_k_v(self):
"""
In `set_q_k_v`, it is necessary to populate the following variables (where appropriate)
for the given model:
self.qw: q weight
self.qb: q bias
self.kw: k weight
self.kb: k bias
self.vw: v weight
self.vb: v bias
"""
raise NotImplementedError("A set_q_k_v() function must be defined in the model container \
in order to set the unfused q, k, and v tensors.")
def attention_qkv_mp(self, mp_replace, reversed_dim=False):
# Only need to alter
if self.module.attention.attn_qkvw is None:
params = [
(self.module.attention.attn_qw, self.qw),
(self.module.attention.attn_qb, self.qb),
(self.module.attention.attn_kw, self.kw),
(self.module.attention.attn_kb, self.kb),
(self.module.attention.attn_vw, self.vw),
(self.module.attention.attn_vb, self.vb),
]
for dst, src in params:
dst = mp_replace.copy(
dst[:self.qw.shape[0] // mp_replace.mp_size], src, int8=reversed_dim,
allocate_tensor=reversed_dim) if src is not None else None
else:
super().attention_qkv_mp(mp_replace)
def release_qkv(self):
super().release_qkv()
split_qkv_params = [
(self.module.attention.attn_qw, self.qw),
(self.module.attention.attn_qb, self.qb),
(self.module.attention.attn_kw, self.kw),
(self.module.attention.attn_kb, self.kb),
(self.module.attention.attn_vw, self.vw),
(self.module.attention.attn_vb, self.vb),
]
self._release_params(split_qkv_params)
def reset_qkv(self):
self.qkvw.data[:self.qw.shape[0]] = self.qw.data
self.qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kw.data
self.qkvw.data[2 * self.qw.shape[0]:] = self.vw.data
qkv_data = [self.qw.data, self.kw.data, self.vw.data]
self.qw.data = self.qkvw.data[:self.qw.shape[0]]
self.kw.data = self.qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]]
self.vw.data = self.qkvw.data[2 * self.qw.shape[0]:]
if self.qkvb is not None:
self.qkvb.data[:self.qw.shape[0]] = self.qb.data
self.qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kb.data
self.qkvb.data[2 * self.qw.shape[0]:] = self.vb.data
qkv_data.extend([self.qb.data, self.kb.data, self.vb.data])
self.qb.data = self.qkvb.data[:self.qw.shape[0]]
self.kb.data = self.qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]]
self.vb.data = self.qkvb.data[2 * self.qw.shape[0]:]
for data in qkv_data:
del data
def reset_qkv_experimental(self):
"""
WIP - experimental and likely to be changed/improved.
Unused by keeping for now.
"""
if self.module.attention.attn_qkvw is None:
self.module.attention.attn_qkvw = torch.empty(self.qw.shape[0] * 3,
self.qw.shape[0],
dtype=self.qw.dtype,
device=self.qw.device)
self.module.attention.attn_qkvb = torch.empty(self.qw.shape[0] * 3,
dtype=self.qw.dtype,
device=self.qw.device)
self.module.attention.attn_qkvw.data[:self.qw.shape[0]] = self.qw.data
self.module.attention.attn_qkvb.data[:self.qw.shape[0]] = self.qb.data
self.module.attention.attn_qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kw.data
self.module.attention.attn_qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kb.data
self.module.attention.attn_qkvw.data[2 * self.qw.shape[0]:] = self.vw.data
self.module.attention.attn_qkvb.data[2 * self.qw.shape[0]:] = self.vb.data
qkv_data = [self.qw.data, \
self.qb.data, \
self.kw.data, \
self.kb.data, \
self.vw.data, \
self.vb.data]
self.qw.data = self.module.attention.attn_qkvw.data[:self.qw.shape[0]]
self.qb.data = self.module.attention.attn_qkvb.data[:self.qw.shape[0]]
self.kw.data = self.module.attention.attn_qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]]
self.kb.data = self.module.attention.attn_qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]]
self.vw.data = self.module.attention.attn_qkvw.data[2 * self.qw.shape[0]:]
self.vb.data = self.module.attention.attn_qkvb.data[2 * self.qw.shape[0]:]
for data in qkv_data:
del data
def set_attn_params_wo_copy(self, Z3_enabled=False):
self.module.attention.attn_ow = self.dense_w
self.module.attention.attn_ob = self.dense_b
if not Z3_enabled:
# In initialize_tensors, we create a fused qkvw with the appropriate shape
# and copy the qw, qb, kw, kb, vw, vb into it
self.module.attention.attn_qkvw = self.qkvw
self.module.attention.attn_qkvb = self.qkvb
# We reset the data for qw (which is the original model parameter) to point
# to the fused weight matrix we have created here
self.qw.data = self.qkvw[:self.qw.shape[0], :]
self.kw.data = self.qkvw[self.qw.shape[0]:2 * self.qw.shape[0], :]
self.vw.data = self.qkvw[self.qw.shape[0] * 2:, :]
# Assume if one of the biases is not None, then all of them are not None
if self.qb is not None:
self.qb.data = self.qkvb[:self.qw.shape[0]]
self.kb.data = self.qkvb[self.qw.shape[0]:2 * self.qw.shape[0]]
self.vb.data = self.qkvb[self.qw.shape[0] * 2:]
else:
# In ZeRO-3 this will be managed by ZeRO and handled separately in the
# forward of ds_attention
self.module.attention.attn_qw = self.qw
self.module.attention.attn_qb = self.qb
self.module.attention.attn_kw = self.kw
self.module.attention.attn_kb = self.kb
self.module.attention.attn_vw = self.vw
self.module.attention.attn_vb = self.vb
def get_attn_params(self):
params = super().get_attn_params()
params.extend([self.qw, self.qb, self.kw, self.kb, self.vw, self.vb])
return params
| 7,136 | 43.60625 | 98 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/features/megatron.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from abc import ABC
class MegatronContainer(ABC):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.megatron_v2 = self.policy.is_megatron_v2
def _align_qkv_transposed(self, x):
attention_head_size = x.shape[-1] // self.num_attention_heads
new_x_shape = x.size()[:-1] + (self.num_attention_heads, attention_head_size)
x_1 = x.view(*new_x_shape)
(q, k, v) = torch.split(x_1, (x_1.shape[-1] // 3), dim=(x_1.dim() - 1))
if len(q.shape) > 2:
return torch.cat((q.reshape(q.shape[0], -1), k.reshape(q.shape[0], -1), v.reshape(q.shape[0], -1)),
dim=-1).reshape(x.shape)
else:
return torch.cat((q.reshape(-1), k.reshape(-1), v.reshape(-1)), dim=-1).reshape(x.shape)
def transpose(self):
super().transpose()
if self.megatron_v2:
self.qkvw = torch.nn.parameter.Parameter(self._align_qkv_transposed(self.qkvw).contiguous())
self.qkvb = torch.nn.parameter.Parameter(self._align_qkv_transposed(self.qkvb).contiguous())
| 1,200 | 36.53125 | 111 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/features/gated_mlp.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import abstractmethod
from .hybrid_engine import HybridEngineContainer
class HybridGatedMLPContainer(HybridEngineContainer):
"""
The HybridGatedMLPContainer supports models for which the first MLP layer
is represented with two separate weights, one for the activation function
and one for the gating function.
"""
def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b):
super().set_mlp(_h4h_w, _h4h_b, _4hh_w, _4hh_b)
self.set_mlp_gate()
@abstractmethod
def set_mlp_gate(self):
"""
In `set_mlp_gate`, it is necessary to populate the following variables (where appropriate)
for the given model:
self.inter_up_w: inter up weight
self.inter_up_b: inter up bias
self.inter_gate_w: inter gate weight
self.inter_gate_b: inter gate bias
If the parameter does not exist in the original model, set the attribute to None.
"""
raise NotImplementedError("A set_mlp_gate() function must be defined in the model container \
in order to set the unfused inter up and gate tensors.")
def mlp_inter_mp(self, mp_replace, reversed_dim=False):
# Only need to alter behavior if we can't do the normal destructive copy
if self.module.mlp.inter_w is None:
params = [
(self.module.mlp.inter_up_w, self.inter_up_w),
(self.module.mlp.inter_up_b, self.inter_up_b),
(self.module.mlp.inter_gate_w, self.inter_gate_w),
(self.module.mlp.inter_gate_b, self.inter_gate_b),
]
for dst, src in params:
dst = mp_replace.copy(dst[:self.inter_up_w.shape[0] // mp_replace.mp_size],
src,
int8=reversed_dim,
allocate_tensor=reversed_dim) if src is not None else None
else:
self.module.mlp.inter_w = mp_replace.strided_copy(self.module.mlp.inter_w,
self._h4h_w,
num_splits=2,
int8=reversed_dim)
self.module.mlp.inter_b = mp_replace.strided_copy(self.module.mlp.inter_b,
self._h4h_b,
num_splits=2,
int8=reversed_dim)
def release_mlp(self):
super().release_mlp()
gated_mlp_params = [
(self.module.mlp.inter_up_w, self.inter_up_w),
(self.module.mlp.inter_up_b, self.inter_up_b),
(self.module.mlp.inter_gate_w, self.inter_gate_w),
(self.module.mlp.inter_gate_b, self.inter_gate_b),
]
self._release_params(gated_mlp_params)
def reset_mlp(self):
self._h4h_w.data[:self.inter_up_w.shape[0]] = self.inter_up_w.data
self._h4h_w.data[self.inter_up_w.shape[0]:] = self.inter_gate_w.data
if self.inter_up_b is not None:
self._h4h_b.data[:self.inter_up_b.shape[0]] = self.inter_up_b.data
self._h4h_b.data[self.inter_up_b.shape[0]:] = self.inter_gate_b.data
inter_data = [self.inter_up_w.data, self.inter_gate_w.data]
if self.inter_up_b is not None:
inter_data.extend([self.inter_up_b.data, self.inter_gate_b.data])
self.inter_up_w.data = self._h4h_w.data[:self.inter_up_w.shape[0]]
self.inter_gate_w.data = self._h4h_w.data[self.inter_up_w.shape[0]:]
if self.inter_up_b is not None:
self.inter_up_b.data = self._h4h_b.data[:self.inter_up_b.shape[0]]
self.inter_gate_b.data = self._h4h_b.data[self.inter_up_b.shape[0]:]
for data in inter_data:
del data
def set_mlp_params_wo_copy(self, Z3_enabled=False):
self.module.mlp.output_w = self._4hh_w
self.module.mlp.output_b = self._4hh_b
if not Z3_enabled:
# In initialize_tensors, we create a fused inter projection with the appropriate shape
# and copy the up projection and gate projection into it
self.module.mlp.inter_w = self._h4h_w
self.module.mlp.inter_b = self._h4h_b
self.inter_up_w.data = self._h4h_w[:self.inter_up_w.shape[0], :]
self.inter_gate_w.data = self._h4h_w[self.inter_up_w.shape[0]:, :]
if self.inter_up_b is not None:
self.inter_up_b.data = self._h4h_b[:self.inter_up_w.shape[0]] if self._h4h_b is not None else None
self.inter_gate_b.data = self._h4h_b[self.inter_up_w.shape[0]:] if self._h4h_b is not None else None
else:
self.module.mlp.inter_up_w = self.inter_up_w
self.module.mlp.inter_up_b = self.inter_up_b
self.module.mlp.inter_gate_w = self.inter_gate_w
self.module.mlp.inter_gate_b = self.inter_gate_b
def get_mlp_params(self):
params = super().get_mlp_params()
params.extend([self.inter_up_w, self.inter_up_b, self.inter_gate_w, self.inter_gate_b])
return params
| 5,397 | 44.361345 | 116 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/features/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .gated_mlp import HybridGatedMLPContainer
from .megatron import MegatronContainer
from .meta_tensor import MetaTensorContainer
from .split_qkv import HybridSplitQKVContainer
| 275 | 26.6 | 46 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/module_inject/containers/features/hybrid_engine.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import ABC, abstractmethod
from typing import List, Tuple
import torch
class HybridEngineContainer(ABC):
"""
This container identifies which methods need to be overridden in addition to
the base container to enable use in the RLHF pipeline. These methods are not
necessary for inference alone.
NOTE: If you are using this feature with a container that
also inherits from `MetaTensorContainer`, ensure that `MetaTensorContainer`
is inherited before `HybridEngineContainer` in the class definition.
"""
def initialize_tensors(self, enable_training=False):
"""
Same purposes as the base container, but also grabs the hooks for any LoRA
parameters. If it's necessary to override specific sub-components of the model,
it's best to augment the specific `set_[component]` itself rather than modifying
the `initialize_tensors` method. See the `HybridSplitQKVContainer` for an example.
"""
super().initialize_tensors(enable_training=enable_training)
self.set_lora_params()
def transform_for_training(self):
"""
If the views on certain parameters are largely incompatible, it may be necessary to do
more substantial transformations to the parameters. This method should be overridden to
transform the inference format to what is necessary for training.
"""
pass
def transform_for_inference(self):
"""
If the views on certain parameters are largely incompatible, it may be necessary to do
more substantial transformations to the parameters. This method should be overridden to
transform the training format to what is necessary for inference.
"""
pass
@abstractmethod
def set_lora_params(self):
"""
If available, set the LoRA parameters for the module. An implementation
for this would iterate over all parameters of the model and use the `maybe_get_lora` helper
method to check if the parameter does in fact have any LoRA params.
"""
raise NotImplementedError("A set_lora_params() function must be defined for the relevant parameters.")
@abstractmethod
def get_lora_matched_pair(self):
"""Get the pair of lora params and its matched model parameters."""
raise NotImplementedError("get_lora_matched_pair() must be defined for the relevant parameters.")
def fuse_lora(self):
"""Fuse the LoRA parameters for the inference mode."""
for maybe_lora_param, param in self.get_lora_matched_pair():
if len(maybe_lora_param) == 3:
lora_right_weight, \
lora_left_weight, \
lora_scaling = maybe_lora_param
param.data += lora_scaling * torch.matmul(lora_left_weight.t(), lora_right_weight.t())
def unfuse_lora(self):
"""Unfuse the LoRA parameters for the training mode."""
for maybe_lora_param, param in self.get_lora_matched_pair():
if len(maybe_lora_param) == 3:
lora_right_weight, \
lora_left_weight, \
lora_scaling = maybe_lora_param
param.data -= lora_scaling * torch.matmul(lora_left_weight.t(), lora_right_weight.t())
def apply_tensor_parallelism(self, mp_replace, reversed_dim=False):
"""
Add support for reversed dim in tensor parallelism. If necessary, override
the called methods to handle partitioned weights (i.e. if qkv is split, override
the `attention_qkv_mp` method). If the model component is not split, it should
be safe to use the default implementation.
"""
# Setup the new Attention module
self.attention_qkv_mp(mp_replace, reversed_dim=reversed_dim)
self.attention_o_mp(mp_replace, reversed_dim=reversed_dim)
# Setup the new MLP module
self.mlp_inter_mp(mp_replace, reversed_dim=reversed_dim)
self.mlp_output_mp(mp_replace, reversed_dim=reversed_dim)
# Apply weight quantization
# TODO(cmikeh2): Re-enable this once verified
#self.apply_weight_quantization()
def _release_params(self, param_pairs: List[Tuple[torch.Tensor, torch.Tensor]]):
"""
Helper for `release_[component]` methods. Accepts a list of tuples where the first
element is the module param that needs to be deleted, and the second is the reassignment
from the container.
"""
for module_param, container_param in param_pairs:
if module_param is not None:
del module_param
module_param = container_param
def release_memory(self):
"""
Delete module parameters if they exist and point them back to the container. The primary
purpose of this is for TP-inference with ZeRO-3. In this scenario, we need to delete the
parameters we've created for inference to free their memory.
"""
general_params = [
(self.module.attention.attn_ow, self.dense_w),
(self.module.attention.attn_ob, self.dense_b),
(self.module.mlp.attn_nw, self.attn_nw),
(self.module.mlp.attn_nb, self.attn_nb),
(self.module.norm_w, self.input_nw),
(self.module.norm_b, self.input_nb),
]
self._release_params(general_params)
self.release_qkv()
self.release_mlp()
def release_qkv(self):
"""
Release for QKV parameters (as well as any aliases).
"""
qkv_params = [
(self.module.attention.attn_qkvw, self.qkvw),
(self.module.attention.attn_qkvb, self.qkvb),
]
self._release_params(qkv_params)
def release_mlp(self):
"""
Release for MLP parameters (as well as any aliases).
"""
mlp_params = [
(self.module.mlp.inter_w, self._h4h_w),
(self.module.mlp.inter_b, self._h4h_b),
(self.module.mlp.output_w, self._4hh_w),
(self.module.mlp.output_b, self._4hh_b),
]
self._release_params(mlp_params)
def reset_params(self):
"""
The purpose of reset params is to get the weights from the FP16 training
copy of the model and copy to them to contiguous inference view. This only needs
to be performed when the container parameters cannot be used directly for inference.
"""
self.reset_qkv()
self.reset_mlp()
def reset_qkv(self):
"""
Perform any necessary resets of the model parameters for the QKV components.
"""
pass
def reset_mlp(self):
"""
Perform any necessary resets of the model parameters for the MLP components.
"""
pass
def get_lora_params(self):
"""
Return a list of all parameters that would have LoRA for the module.
"""
if not hasattr(self, "lora_params"):
self.set_lora_params()
return self.lora_params
def set_params_wo_copy(self, Z3_enabled=False):
"""
Rather than copying into, set the parameters directly. This is necessary to provide
an inexpensive (low-memory-overhead) view onto the FP16 forward weights.
"""
self.module.mlp.attn_nw = self.attn_nw
self.module.mlp.attn_nb = self.attn_nb
self.module.norm_w = self.input_nw
self.module.norm_b = self.input_nb
self.set_attn_params_wo_copy(Z3_enabled=Z3_enabled)
self.set_mlp_params_wo_copy(Z3_enabled=Z3_enabled)
def set_attn_params_wo_copy(self, **kwargs):
"""
Narrower sub-method for finer grained overriding.
"""
self.module.attention.attn_ow = self.dense_w
self.module.attention.attn_ob = self.dense_b
self.module.attention.attn_qkvw = self.qkvw
self.module.attention.attn_qkvb = self.qkvb
def set_mlp_params_wo_copy(self, **kwargs):
"""
Narrower sub-method for finer grained overriding.
"""
self.module.mlp.inter_w = self._h4h_w
self.module.mlp.inter_b = self._h4h_b
self.module.mlp.output_w = self._4hh_w
self.module.mlp.output_b = self._4hh_b
| 8,374 | 38.319249 | 110 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/comm/ccl.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''
Copyright 2021 The Microsoft DeepSpeed Team
'''
import torch
from deepspeed.accelerator import get_accelerator
from .reduce_op import ReduceOp
from .torch import TorchBackend
def build_ccl_op():
builder = get_accelerator().create_op_builder("CCLCommBuilder")
if builder is None:
return None
ccl_cpp_module = builder.load()
print(f'DeepSpeed {builder.absolute_name()} built successfully')
return ccl_cpp_module
class CCLBackend(TorchBackend):
def __init__(self, name='ccl', rank=-1, world_size=-1, mpu=None, timeout=None, init_method=None):
self.ccl_comm_op = build_ccl_op()
if self.ccl_comm_op is None:
# set CCLBackend to uninitialized state if CCLCommBuilder cannot be loaded
self.initialized = False
return
super(CCLBackend, self).__init__(backend='ccl',
name='torch',
rank=rank,
world_size=world_size,
timeout=timeout,
init_method=init_method)
self.name = 'ccl'
size = self.get_world_size()
rank = self.get_rank()
main_kvs = self.ccl_comm_op.get_kvs_addr(rank)
main_kvs = torch.tensor(main_kvs).to(torch.uint8)
super(CCLBackend, self).broadcast(main_kvs, 0)
self.ccl_comm_op.initialize(size, rank, main_kvs)
self.initialized = True
def is_initialized(self):
return self.initialized
def broadcast(self, tensor, src, group=None, async_op=False):
self.ccl_comm_op.broadcast(tensor, src, group, async_op)
def all_reduce(self, tensor, op=ReduceOp.SUM, group=None, async_op=False):
use_caching = False
if use_caching:
match_id = f"{tensor.size()}-{op}"
self.ccl_comm_op.all_reduce_caching(tensor, op, match_id, group, async_op)
else:
self.ccl_comm_op.all_reduce(tensor, op, group, async_op)
def barrier(self, group=None, async_op=False):
self.ccl_comm_op.barrier(group, async_op)
| 2,248 | 34.698413 | 101 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/comm/torch.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed import utils
from .utils import *
from .backend import *
from .comm import *
import os
DS_COMM_ALL_GATHER_OFF = False
DS_COMM_REDUCE_SCATTER_OFF = False
DS_COMM_BROADCAST_OFF = False
DS_COMM_ALL_REDUCE_OFF = False
DS_COMM_REDUCE_OFF = False
def is_torch_two():
TORCH_MAJOR = int(torch.__version__.split('.')[0])
if TORCH_MAJOR >= 2:
return True
else:
return False
def torch_ver_ge_1_13():
if is_torch_two():
return True
else:
TORCH_MAJOR = int(torch.__version__.split('.')[0])
assert TORCH_MAJOR == 1
TORCH_MINOR = int(torch.__version__.split('.')[1])
return TORCH_MINOR >= 13
def has_coalescing_manager():
has_c10d = hasattr(torch.distributed, 'distributed_c10d')
return has_c10d and hasattr(torch.distributed.distributed_c10d, '_coalescing_manager')
def has_all_reduce_coalesced():
return hasattr(torch.distributed, "all_reduce_coalesced") and torch_ver_ge_1_13()
def get_coalescing_manager(group, device, reqs):
if is_torch_two():
return torch.distributed.distributed_c10d._coalescing_manager(group, device=device, reqs=reqs)
else:
return torch.distributed.distributed_c10d._coalescing_manager(group, reqs)
##Utilities to turn comm off
##TODO: move to base comm (wrapper)
def all_gather_comm_off(flag=False):
global DS_COMM_ALL_GATHER_OFF
DS_COMM_ALL_GATHER_OFF = flag
def reduce_scatter_comm_off(flag=False):
global DS_COMM_REDUCE_SCATTER_OFF
DS_COMM_REDUCE_SCATTER_OFF = flag
def broadcast_comm_off(flag=False):
global DS_COMM_BROADCAST_OFF
DS_COMM_BROADCAST_OFF = flag
def all_reduce_comm_off(flag=False):
global DS_COMM_ALL_REDUCE_OFF
DS_COMM_ALL_REDUCE_OFF = flag
def reduce_comm_off(flag=False):
global DS_COMM_REDUCE_OFF
DS_COMM_REDUCE_OFF = flag
#assumption: all_gather and reduce scatter
## are what we care about
def backward_comm_off(flag=False):
all_gather_comm_off(flag)
reduce_scatter_comm_off(flag)
class Noop:
def wait(self):
return None
class TorchBackend(Backend):
"""
A light-weight wrapper class for torch.distributed API.
Only a subset of functions are wrapped. Once the init_process_group
is initialized, standard torch.distributed.* can be used directly
so no need to wrap all the functions. We can keep adding wrappers as
needed.
"""
def __init__(self, backend, timeout, init_method, rank=-1, world_size=-1, name='torch'):
super(TorchBackend, self).__init__()
self.has_all_reduce_coalesced = has_all_reduce_coalesced()
self.has_coalescing_manager = has_coalescing_manager()
self.all_gather_function = self.get_all_gather_function()
self.reduce_scatter_function = self.get_reduce_scatter_function()
self.initialized = True
self.name = name
# Future functionality to support ds.initialize() on a single GPU
# The idea is to fake that dist backend is initialized even when
# it is not so we can run on a single GPU without doing any init_process_group
self.single_gpu_mode = True
self.init_process_group(backend, timeout, init_method, rank, world_size)
@classmethod
def get_all_gather_function(self):
if hasattr(torch.distributed, "all_gather_into_tensor"):
return torch.distributed.all_gather_into_tensor
elif hasattr(torch.distributed, "_all_gather_base"):
return torch.distributed._all_gather_base
return None
@classmethod
def get_reduce_scatter_function(self):
if hasattr(torch.distributed, "reduce_scatter_tensor"):
return torch.distributed.reduce_scatter_tensor
elif hasattr(torch.distributed, "_reduce_scatter_base"):
return torch.distributed._reduce_scatter_base
return None
def has_all_gather_into_tensor(self):
return self.all_gather_function is not None
def has_reduce_scatter_tensor(self):
return self.reduce_scatter_function is not None
def init_process_group(self, backend, timeout, init_method, rank, world_size):
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend,
timeout=timeout,
init_method=init_method,
rank=rank,
world_size=world_size)
self.using_mpi = torch.distributed.get_backend() == 'mpi'
def all_reduce(self, tensor, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False):
op = self._reduce_op(op)
return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=async_op)
def all_reduce_coalesced(self, tensors, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False):
""" proxy func to torch.distributed.all_reduce_coalesced,
which is included in PyTorch 1.13 and above
"""
if not self.has_all_reduce_coalesced:
raise RuntimeError(f"Current torch version does not have all_reduce_coalesced "
f"api (torch.__version__: {torch.__version__})")
op = self._reduce_op(op)
return torch.distributed.all_reduce_coalesced(tensors=tensors, op=op, group=group, async_op=async_op)
def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False):
if DS_COMM_REDUCE_OFF:
if int(os.getenv('RANK', '0')) == 0:
utils.logger.warning("REDUCE is OFF")
return Noop()
return torch.distributed.reduce(tensor=tensor, dst=dst, op=self._reduce_op(op), group=group, async_op=async_op)
def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_op=False):
if DS_COMM_REDUCE_SCATTER_OFF:
if int(os.getenv('RANK', '0')) == 0:
utils.logger.warning("REDUCE SCATTER is OFF")
return Noop()
else:
return torch.distributed.reduce_scatter(output=output,
input_list=input_list,
op=self._reduce_op(op),
group=group,
async_op=async_op)
def broadcast(self, tensor, src, group=None, async_op=False):
if DS_COMM_BROADCAST_OFF:
if int(os.getenv('RANK', '0')) == 0:
utils.logger.warning("BROADCAST is OFF")
return Noop()
else:
return torch.distributed.broadcast(tensor=tensor, src=src, group=group, async_op=async_op)
def all_gather(self, tensor_list, tensor, group=None, async_op=False):
if DS_COMM_ALL_GATHER_OFF:
if int(os.getenv('RANK', '0')) == 0:
utils.logger.warning("All Gather is OFF")
return Noop()
else:
return torch.distributed.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op)
def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_op=False):
if self.has_all_gather_into_tensor():
return self.all_gather_function(output_tensor=output_tensor,
input_tensor=input_tensor,
group=group,
async_op=async_op)
def all_gather_base(self, output_tensor, input_tensor, group=None, async_op=False):
if DS_COMM_ALL_GATHER_OFF:
if int(os.getenv('RANK', '0')) == 0:
utils.logger.warning("All Gather is OFF")
return Noop()
else:
if self.has_allgather_base:
return torch.distributed.distributed_c10d._all_gather_base(output_tensor=output_tensor,
input_tensor=input_tensor,
group=group,
async_op=async_op)
else:
utils.logger.warning("unable to find torch.distributed._all_gather_base. will fall back to "
"torch.distributed.reduce_scatter which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
pass
def all_gather_coalesced(self, output_tensors, input_tensors, group=None, async_op=False):
""""""
assert len(output_tensors) == len(input_tensors), ""
if hasattr(torch.distributed.distributed_c10d, '_all_gather_base_coalesced'):
# customized PyTorch
return torch.distributed.distributed_c10d._all_gather_base_coalesced(output_tensors,
input_tensors,
group=group,
async_op=async_op)
elif has_coalescing_manager():
reqs = []
with get_coalescing_manager(group, input_tensors[0].device, reqs):
for output, input in zip(output_tensors, input_tensors):
handle = torch.distributed.distributed_c10d.all_gather_into_tensor(output,
input,
group=group,
async_op=True)
reqs.append(handle)
if async_op:
return reqs[-1]
else:
reqs[-1].wait()
def reduce_scatter_tensor(self, output_tensor, input_tensor, op=ReduceOp.SUM, group=None, async_op=False):
if self.has_reduce_scatter_tensor():
return self.reduce_scatter_function(output_tensor,
input_tensor,
op=self._reduce_op(op),
group=group,
async_op=async_op)
else:
utils.logger.warning("unable to find torch.distributed.reduce_scatter_tensor. will fall back to "
"torch.distributed.reduce_scatter which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
pass
def all_to_all_single(self,
output,
input,
output_split_sizes=None,
input_split_sizes=None,
group=None,
async_op=False):
return torch.distributed.all_to_all_single(output=output,
input=input,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=group,
async_op=async_op)
def send(self, tensor, dst, group=None, tag=0):
return torch.distributed.send(tensor=tensor, dst=dst, group=group, tag=tag)
def recv(self, tensor, src=None, group=None, tag=0):
return torch.distributed.recv(tensor=tensor, src=src, group=group, tag=tag)
def isend(self, tensor, dst, group=None, tag=0):
return torch.distributed.isend(tensor=tensor, dst=dst, group=group, tag=tag)
def irecv(self, tensor, src=None, group=None, tag=0):
return torch.distributed.irecv(tensor=tensor, src=src, group=group, tag=tag)
def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False):
return torch.distributed.gather(tensor=tensor,
gather_list=gather_list,
dst=dst,
group=group,
async_op=async_op)
def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False):
return torch.distributed.scatter(tensor=tensor,
scatter_list=scatter_list,
src=src,
group=group,
async_op=async_op)
def barrier(self, group=torch.distributed.GroupMember.WORLD, async_op=False, device_ids=None):
if group is None:
group = torch.distributed.GroupMember.WORLD
return torch.distributed.barrier(group=group, async_op=async_op, device_ids=device_ids)
def monitored_barrier(self, group=torch.distributed.GroupMember.WORLD, timeout=None, wait_all_ranks=False):
if group is None:
group = torch.distributed.GroupMember.WORLD
return torch.distributed.monitored_barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks)
def get_rank(self, group=None):
return torch.distributed.get_rank(group=group)
def get_world_size(self, group=None):
return torch.distributed.get_world_size(group=group)
def is_initialized(self):
return torch.distributed.is_initialized()
def get_backend(self, group=None):
return torch.distributed.get_backend(group=group)
def new_group(self, ranks):
return torch.distributed.new_group(ranks)
def get_global_rank(self, group, group_rank):
if hasattr(torch.distributed.distributed_c10d, "get_global_rank"):
from torch.distributed.distributed_c10d import get_global_rank as _get_global_rank
else:
from torch.distributed.distributed_c10d import _get_global_rank
return _get_global_rank(group, group_rank)
def get_world_group(self):
return torch.distributed.group.WORLD
def destroy_process_group(self, group=None):
return torch.distributed.destroy_process_group(group=group)
def _reduce_op(self, op):
'''
Helper function. If the op provided is not a torch.dist.ReduceOp, convert it and return
'''
if not isinstance(op, torch.distributed.ReduceOp):
if op == ReduceOp.SUM:
op = torch.distributed.ReduceOp.SUM
elif op == ReduceOp.PRODUCT:
op = torch.distributed.ReduceOp.PRODUCT
elif op == ReduceOp.AVG:
op = torch.distributed.ReduceOp.AVG
elif op == ReduceOp.MIN:
op = torch.distributed.ReduceOp.MIN
elif op == ReduceOp.MAX:
op = torch.distributed.ReduceOp.MAX
elif op == ReduceOp.BAND:
op = torch.distributed.ReduceOp.BAND
elif op == ReduceOp.BOR:
op = torch.distributed.ReduceOp.BOR
elif op == ReduceOp.BXOR:
op = torch.distributed.ReduceOp.BXOR
return op
# This will become a light-weight wrapper around torch.distributed functions
# TODO: create some example to show how this wrapper can help profile communication
# TODO: make sure there is no performance regression with this approach
# TODO: explore monkey-patching if this does not work
| 15,854 | 42.557692 | 119 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/comm/constants.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
NCCL_BACKEND = 'nccl'
CCL_BACKEND = 'ccl'
MPI_BACKEND = 'mpi'
GLOO_BACKEND = 'gloo'
SCCL_BACKEND = 'sccl'
DEFAULT_AML_MASTER_PORT = "54965"
DEFAULT_AML_NCCL_SOCKET_IFNAME = "^docker0,lo"
#########################################
# Comms Logger
#########################################
# Comms Logger. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
COMMS_LOGGER_FORMAT = '''
The Comms Logger can be specified as:
"comms_logger": {
"enabled": true,
"verbose": false,
"prof_all": true,
"debug": false,
"prof_ops": ["all_reduce", "custom_all_reduce_name"]
}
'''
COMMS_LOGGER = "comms_logger"
# Comms logger enable signal
COMMS_LOGGER_ENABLED = "enabled"
COMMS_LOGGER_ENABLED_DEFAULT = False
# Comms logger verbose signal
COMMS_LOGGER_VERBOSE = "verbose"
COMMS_LOGGER_VERBOSE_DEFAULT = False
# comms logger profile all ops signal
COMMS_LOGGER_PROF_ALL = "prof_all"
COMMS_LOGGER_PROF_ALL_DEFAULT = True
# comms logger show all ops signal
COMMS_LOGGER_DEBUG = "debug"
COMMS_LOGGER_DEBUG_DEFAULT = False
# comms logger profile specific ops in list
COMMS_LOGGER_PROF_OPS = "prof_ops"
COMMS_LOGGER_PROF_OPS_DEFAULT = []
| 1,276 | 24.039216 | 57 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/comm/comm.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
DeepSpeed Communication Package: deepspeed.comm
deepspeed.comm
-- import and use deepspeed.ops.comm
-- use torch.distributed directly if both this package and torch.distributed use the same NCCL version
-- use custom collectives
-- can either use torch.dist or ds.ops.comm?
Note: the old 1-bit compressed allreduce variants that resided in deepspeed.runtime.comm will be moved here as well.
deepspeed.comm API
-- must be kept fully compatible (same signatures) as torch.dist API to ensure backward/cross-framework compatibility.
-- e.g. if a client code used
from deepspeed import comm as dist
instead of
import torch.distributed as dist
The code should work without breaking any of the public torch.distributed functionality
Future:
-- deepspeed groups API should be brought into ds.comm
"""
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from .reduce_op import *
from deepspeed.accelerator import get_accelerator
from deepspeed.comm.ccl import CCLBackend
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
# Current deepspeed.comm backend (cdb) global object for simple access by client code
cdb = None
# Create global timer for ops
timers = timer.SynchronizedWallClockTimer()
timer_summary = {}
comms_logger = CommsLogger()
# Maintain objects of all initialized ds backends and assign them using the API functions in this file
nccl_backend = None
mpi_backend = None
ccl_backend = None
# This should be set here so all rank/size information from the launcher can be propagated
from deepspeed.comm.utils import *
class ProcessGroup():
def __init__(self, comm_id, ranks=[]):
self.ranks = ranks
self.comm_id = comm_id
self.size = len(ranks)
def _configure_using_config_file(config):
if config.comms_logger_enabled:
comms_logger.configure(config)
def configure(
deepspeed_config=None,
enabled=None,
prof_all=None,
prof_ops=None,
verbose=None,
debug=None,
):
if deepspeed_config is not None:
_configure_using_config_file(deepspeed_config.comms_config)
if enabled is not None:
comms_logger.enabled = enabled
if prof_all is not None:
comms_logger.prof_all = prof_all
if prof_ops is not None:
comms_logger.prof_ops = prof_ops
if verbose is not None:
comms_logger.verbose = verbose
if debug is not None:
comms_logger.debug = debug
# Logging wrapper for timing ops
def timed_op(func):
def log_wrapper(*args, **kwargs):
# Add enabled flag so that overhead to each comm op is two if conditions at most
if comms_logger.enabled:
if ('prof' in kwargs
and kwargs['prof']) or comms_logger.prof_all or ('log_name' in kwargs
and kwargs['log_name'] in comms_logger.prof_ops):
# Need func args for their defaults
func_args = get_default_args(func)
func_args.update(kwargs)
msg_size = get_msg_size_from_args(func, *args, **kwargs)
log_name = get_debug_log_name(func_args, comms_logger.debug)
timers(log_name).start()
# Return the op, then stop the op's timer
try:
return func(*args, **kwargs)
finally:
if comms_logger.enabled:
# Need to make op blocking for accurate logging
get_accelerator().synchronize()
# If we're using MPI, we can't simply sync the stream
if cdb.using_mpi:
cdb.barrier()
if ('prof' in kwargs and kwargs['prof']) or comms_logger.prof_all or (
'log_name' in kwargs and kwargs['log_name'] in comms_logger.prof_ops):
log_name = get_debug_log_name(func_args, comms_logger.debug)
raw_name = func.__name__
timers(log_name).stop()
# need temp var since 'elapsed' resets events
time_elapsed = timers(log_name).elapsed(reset=False)
comms_logger.append(raw_name, log_name, time_elapsed, msg_size)
return log_wrapper
# For compatibility with torch distributed's init_process_group, we shall retain the signature from PyTorch code.
# DeepSpeed NCCL/MPI backend may not need all these params as we will have our own implementation.
# Please read full torch.distributed API docs from https://pytorch.org/docs/stable/distributed.html
# UNUSED: Future helper function to initialize DS backends
def init_deepspeed_backend(ds_backend, timeout, init_method):
global cdb
global nccl_backend
global mpi_backend
global ccl_backend
rank = int(os.getenv('RANK', '-1'))
size = int(os.getenv('WORLD_SIZE', '-1'))
if ds_backend == NCCL_BACKEND:
utils.logger.warn("NCCL backend in DeepSpeed not yet implemented")
elif ds_backend == MPI_BACKEND:
utils.logger.warn("MPI backend in DeepSpeed not yet implemented")
elif ds_backend == GLOO_BACKEND:
utils.logger.warn("Gloo backend in DeepSpeed not yet implemented")
elif ds_backend == CCL_BACKEND:
ccl_backend = CCLBackend(rank=rank, world_size=size, timeout=timeout, init_method=init_method)
utils.logger.info(f"Initialize {ds_backend} backend")
else:
utils.logger.warn(f"DeepSpeed does not support {ds_backend} backend")
def is_initialized():
#assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb is None:
return False
else:
return cdb.is_initialized()
def destroy_process_group(group=None):
global cdb
return cdb.destroy_process_group(group=group)
def new_group(ranks):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.new_group(ranks)
def is_available() -> bool:
# Returns ``True`` if the deepspeed comm package is available.
# TODO: load other ops. Clients including deepspeed itself should use deepspeed.comm to import
# any communication related primitives from this package.
# use hasattr(deepspeed.csrc.ops, "_comm") or something
return True
def set_backend():
global cdb
global nccl_backend
global mpi_backend
global ccl_backend
backend_name = get_accelerator().communication_backend_name()
if backend_name == NCCL_BACKEND:
if nccl_backend is not None and nccl_backend.is_initialized():
cdb = nccl_backend
elif backend_name == MPI_BACKEND:
if mpi_backend is not None and mpi_backend.is_initialized():
cdb = mpi_backend
elif backend_name == CCL_BACKEND:
if ccl_backend is not None and ccl_backend.is_initialized():
cdb = ccl_backend
@timed_op
def broadcast(tensor, src, group=None, async_op=False, prof=False, log_name='broadcast', debug=get_caller_func()):
global cdb
return cdb.broadcast(tensor=tensor, src=src, group=group, async_op=async_op)
@timed_op
def all_gather(tensor_list,
tensor,
group=None,
async_op=False,
prof=False,
log_name='all_gather',
debug=get_caller_func()):
global cdb
return cdb.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op)
def has_reduce_scatter_tensor():
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.has_reduce_scatter_tensor()
def reduce_scatter_fn(output_tensor,
tensor,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
debug=get_caller_func()):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb.has_reduce_scatter_tensor():
return reduce_scatter_tensor(output_tensor,
tensor,
op=op,
group=group,
async_op=async_op,
prof=prof,
debug=debug)
else:
if get_rank() == 0:
utils.logger.warning_once("unable to find torch.distributed.reduce_scatter_tensor. will fall back to "
"torch.distributed.reduce_scatter which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
input_tensor_lst = list(torch.chunk(tensor, cdb.get_world_size(group)))
return reduce_scatter(output_tensor,
input_tensor_lst,
op=op,
group=group,
async_op=async_op,
prof=prof,
debug=debug)
@timed_op
def reduce_scatter_tensor(output_tensor,
tensor,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='reduce_scatter_tensor',
debug=get_caller_func()):
global cdb
return cdb.reduce_scatter_tensor(output_tensor=output_tensor,
input_tensor=tensor,
op=op,
group=group,
async_op=async_op)
@timed_op
def all_gather_into_tensor(output_tensor,
tensor,
group=None,
async_op=False,
prof=False,
log_name='all_gather_into_tensor',
debug=get_caller_func()):
global cdb
return cdb.all_gather_into_tensor(output_tensor=output_tensor, input_tensor=tensor, group=group, async_op=async_op)
def has_all_gather_into_tensor():
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.has_all_gather_into_tensor()
def allgather_fn(output_tensor, input_tensor, group=None, async_op=False, debug=get_caller_func()):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb.has_all_gather_into_tensor():
return all_gather_into_tensor(output_tensor, input_tensor, group=group, async_op=async_op, debug=debug)
else:
if get_rank() == 0:
utils.logger.warning_once("unable to find torch.distributed.all_gather_into_tensor. will fall back to "
"torch.distributed.all_gather which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
output_tensors = list(torch.chunk(output_tensor, cdb.get_world_size(group)))
return all_gather(output_tensors, input_tensor, group=group, async_op=async_op, debug=debug)
@timed_op
def all_to_all_single(output,
tensor,
output_split_sizes=None,
input_split_sizes=None,
group=None,
async_op=False,
prof=False,
log_name='all_to_all_single',
debug=get_caller_func()):
global cdb
return cdb.all_to_all_single(output=output,
input=tensor,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=group,
async_op=async_op)
@timed_op
def send(tensor, dst, group=None, tag=0, prof=False, log_name='send', debug=get_caller_func()):
global cdb
return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag)
@timed_op
def recv(tensor, src=None, group=None, tag=0, prof=False, log_name='recv', debug=get_caller_func()):
global cdb
return cdb.recv(tensor=tensor, src=src, group=group, tag=tag)
@timed_op
def isend(tensor, dst, group=None, tag=0, prof=False, log_name='isend', debug=get_caller_func()):
global cdb
return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag)
@timed_op
def irecv(tensor, src=None, group=None, tag=0, prof=False, log_name='irecv', debug=get_caller_func()):
global cdb
return cdb.recv(tensor=tensor, src=src, group=group, tag=tag)
@timed_op
def gather(tensor,
gather_list=None,
dst=0,
group=None,
async_op=False,
prof=False,
log_name='gather',
debug=get_caller_func()):
global cdb
return cdb.gather(tensor=tensor, gather_list=gather_list, dst=dst, group=group, async_op=async_op)
@timed_op
def scatter(tensor,
scatter_list=None,
src=0,
group=None,
async_op=False,
prof=False,
log_name='scatter',
debug=get_caller_func()):
global cdb
return cdb.scatter(tensor=tensor, scatter_list=scatter_list, src=src, group=group, async_op=async_op)
@timed_op
def barrier(group=None, async_op=False, device_ids=None, prof=False, log_name='barrier', debug=get_caller_func()):
global cdb
return cdb.barrier(group=group, async_op=async_op)
@timed_op
def monitored_barrier(group=None,
timeout=None,
wait_all_ranks=False,
prof=False,
log_name='monitored_barrier',
debug=get_caller_func()):
global cdb
return cdb.barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks)
def log_summary(show_straggler=False):
global cdb
barrier(log_name='log_summary_barrier')
if cdb.get_rank() == 0:
comms_logger.log_all(print_log=True, show_straggler=show_straggler)
else:
comms_logger.log_all(print_log=False, show_straggler=show_straggler)
barrier(log_name='log_summary_barrier')
@timed_op
def reduce(tensor,
dst,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='reduce',
debug=get_caller_func()):
global cdb
return cdb.reduce(tensor=tensor, dst=dst, op=op, group=group, async_op=async_op)
@timed_op
def reduce_scatter(output,
input_list,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='reduce_scatter',
debug=get_caller_func()):
global cdb
return cdb.reduce_scatter(output=output, input_list=input_list, op=op, group=group, async_op=async_op)
def has_all_reduce_coalesced():
""""""
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
assert cdb.has_all_reduce_coalesced is not None, 'has_all_reduce_coalesced is not yet defined'
return cdb.has_all_reduce_coalesced
def has_coalescing_manager():
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
assert cdb.has_coalescing_manager is not None, 'has_coalescing_manager is not yet defined'
return cdb.has_coalescing_manager
def all_gather_coalesced(output_tensors, input_tensors, group=None, async_op=False):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.all_gather_coalesced(output_tensors, input_tensors, group=group, async_op=async_op)
@timed_op
def all_reduce(tensor,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='all_reduce',
debug=get_caller_func()):
#if profile_comm:
# context of the timers?
# timers.start()
# TensorBoard logging for comm calls.?
global cdb
#print(f'op = {op}, cdb= {cdb.name}')
return cdb.all_reduce(tensor, op, group, async_op)
@timed_op
def all_reduce_coalesced(tensors,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='all_reduce',
debug=get_caller_func()):
global cdb
return cdb.all_reduce_coalesced(tensors, op, group, async_op)
def get_world_group():
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_world_group()
def get_world_size(group=None) -> int:
"""
Returns the number of processes in the current process group
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
The world size of the process group
-1, if not part of the group
"""
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_world_size(group)
def get_rank(group=None):
"""
Returns the rank of the current process in the provided ``group`` or the
default group if none was provided.
Rank is a unique identifier assigned to each process within a distributed
process group. They are always consecutive integers ranging from 0 to
``world_size``.
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
The rank of the process group
-1, if not part of the group
"""
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_rank(group)
def get_local_rank():
"""
Helper function to get local rank after a backend has been set and initialized
Args:
None
Returns:
local rank (= GPU device ID)
"""
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return get_local_rank_from_launcher()
def get_global_rank(group=None, group_rank=0):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_global_rank(group, group_rank)
def get_all_ranks_from_group(group=None):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
rank = 0
group_ranks = []
try:
while True:
group_ranks.append(cdb.get_global_rank(group, rank))
rank += 1
except RuntimeError:
pass
return group_ranks
# Main DeepSpeed Comms. public API.
def init_distributed(dist_backend=None,
auto_mpi_discovery=True,
distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT,
verbose=True,
timeout=default_pg_timeout,
init_method=None,
dist_init_required=None,
config=None,
rank=-1,
world_size=-1):
''' Initialize dist backend, potentially performing MPI discovery if needed
Arguments:
dist_backend: Optional (str). torch distributed backend, e.g., nccl, mpi, gloo
auto_mpi_discovery Optional (bool). if distributed environment variables are not set, attempt to discover them from MPI
distributed_port: Optional (int). torch distributed backend port
verbose: Optional (bool). verbose logging
timeout: Optional (timedelta). Timeout for operations executed against the process group. Default value equals 30 minutes.
init_method: Optional (string). Torch distributed, URL specifying how to initialize the process group. Default is “env://” if no init_method or store is specified.
config: Optional (dict). DeepSpeed configuration for setting up comms options (e.g. Comms profiling)
rank: Optional (int). The current manually specified rank. Some init_method like “tcp://” need the rank and world_size as well (see: https://pytorch.org/docs/stable/distributed.html#tcp-initialization)
world_size: Optional (int). Desired world_size for the TCP or Shared file-system initialization.
'''
global cdb
configure(deepspeed_config=config)
if dist_init_required is None:
dist_init_required = cdb is None or not cdb.is_initialized()
if cdb is None:
if torch.distributed.is_initialized():
# The user initialized torch.dist themselves, create cdb and short-circuit
cdb = TorchBackend(dist_backend, timeout, init_method)
return
else:
init_deepspeed_backend(get_accelerator().communication_backend_name(), timeout, init_method)
set_backend()
utils.logger.info(f'cdb={cdb}')
if dist_init_required is False:
assert (
cdb is not None and cdb.is_initialized() is True
), "Distributed backend is not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()"
else:
# Initialize torch distributed if needed
required_env = ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
if auto_mpi_discovery and not all(map(lambda v: v in os.environ, required_env)):
if verbose:
utils.logger.info("Not using the DeepSpeed or dist launchers, attempting to detect MPI environment...")
if in_aml() and not in_dlts():
patch_aml_env_for_torch_nccl_backend(verbose=verbose)
elif in_aws_sm():
patch_aws_sm_env_for_torch_nccl_backend(verbose=verbose)
else:
mpi_discovery(distributed_port=distributed_port, verbose=verbose)
if cdb is not None and cdb.is_initialized():
if int(os.getenv('RANK', '0')) == 0:
utils.logger.info('Distributed backend already initialized')
else:
assert isinstance(timeout, timedelta)
if dist_backend is None:
dist_backend = get_accelerator().communication_backend_name()
if int(os.getenv('RANK', '0')) == 0:
utils.logger.info('Initializing TorchBackend in DeepSpeed with backend {}'.format(dist_backend))
# Create a torch backend object, initialize torch distributed, and assign to cdb
cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size)
def mpi_discovery(distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, verbose=True):
'''
Discovery MPI environment via mpi4py and map to relevant dist state
'''
from mpi4py import MPI
import subprocess
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
world_size = comm.Get_size()
master_addr = None
if rank == 0:
hostname_cmd = ["hostname -I"]
result = subprocess.check_output(hostname_cmd, shell=True)
master_addr = result.decode('utf-8').split()[0]
master_addr = comm.bcast(master_addr, root=0)
# Determine local rank by assuming hostnames are unique
proc_name = MPI.Get_processor_name()
all_procs = comm.allgather(proc_name)
local_rank = sum([i == proc_name for i in all_procs[:rank]])
os.environ['RANK'] = str(rank)
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['LOCAL_RANK'] = str(local_rank)
os.environ['MASTER_ADDR'] = master_addr
os.environ['MASTER_PORT'] = str(distributed_port)
if verbose:
utils.logger.info(
"Discovered MPI settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}".
format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
if cdb is not None and cdb.is_initialized():
assert cdb.get_rank() == rank, "MPI rank {} does not match torch rank {}".format(rank, cdb.get_rank())
assert cdb.get_world_size() == world_size, "MPI world size {} does not match torch world size {}".format(
world_size, cdb.get_world_size())
def in_aml():
# Are we running inside an Azure Machine Learning (AML) environment?
return 'AZUREML_EXPERIMENT_ID' in os.environ
def in_aws_sm():
# Are we running inside an AWS SageMaker environment?
return 'SM_TRAINING_ENV' in os.environ
def in_dlts():
# Are we running on a DLTS cluster?
return 'DLTS_JOB_ID' in os.environ
def patch_aml_env_for_torch_nccl_backend(master_port=6105, verbose=True):
"""Helper routine to get and set environment variables.
This is adapted from Azure ML's documentation available from:
https://azure.github.io/azureml-web/docs/cheatsheet/distributed-training/#environment-variables-from-openmpi
"""
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
single_node = int(os.environ["OMPI_COMM_WORLD_LOCAL_SIZE"]) == int(os.environ["WORLD_SIZE"])
if not single_node:
master_node_params = os.environ["AZ_BATCH_MASTER_NODE"].split(":")
os.environ["MASTER_ADDR"] = master_node_params[0]
# Do not overwrite master port with that defined in AZ_BATCH_MASTER_NODE
if "MASTER_PORT" not in os.environ:
os.environ["MASTER_PORT"] = str(master_port)
else:
os.environ["MASTER_ADDR"] = os.environ["AZ_BATCHAI_MPI_MASTER_NODE"]
os.environ["MASTER_PORT"] = DEFAULT_AML_MASTER_PORT
if verbose:
utils.logger.info("NCCL_SOCKET_IFNAME original value = {}".format(os.environ["NCCL_SOCKET_IFNAME"]))
os.environ["NCCL_SOCKET_IFNAME"] = DEFAULT_AML_NCCL_SOCKET_IFNAME
os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
if verbose:
utils.logger.info(
"Discovered AzureML settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
.format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
def patch_aws_sm_env_for_torch_nccl_backend(verbose=True):
"""Helper routine to get and set environment variables when running inside an AWS SageMaker environment.
"""
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
if verbose:
utils.logger.info(
"Discovered AWS SageMaker settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
.format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
| 28,373 | 36.983936 | 209 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/comm/utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import inspect
from deepspeed.utils import get_caller_func
def get_local_rank_from_launcher():
# DeepSpeed launcher will set it so get from there
rank = os.environ.get('LOCAL_RANK')
if rank is None:
rank = os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK')
# Make it a single process job and set rank to 0
if rank is None:
rank = 0
return int(rank)
def get_world_rank_from_launcher():
# DeepSpeed launcher will set it so get from there
rank = os.environ.get('RANK')
if rank is None:
rank = os.environ.get('OMPI_COMM_WORLD_RANK')
# Make it a single process job and set rank to 0
if rank is None:
rank = 0
return int(rank)
def get_world_size_from_launcher():
# DeepSpeed launcher will set it so get from there
size = os.environ.get('WORLD_SIZE')
rank = os.environ.get('RANK')
if size is None:
size = os.environ.get('OMPI_COMM_WORLD_SIZE')
# Make it a single process job and set size to 1
if size is None:
size = 1
if rank == 0:
print(f"set world size to {size}")
return int(size)
def get_default_args(func):
signature = inspect.signature(func)
return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
# We need this hacky function since torch doesn't consistently name or place the input tensor args
def get_tensor_position(func):
sig_params = inspect.signature(func).parameters
arg = None
# most colls
if 'tensor' in sig_params:
arg = 'tensor'
# all_reduce_coalesced coll
elif 'tensors' in sig_params:
arg = 'tensors'
# reduce scatter coll
elif 'input_list' in sig_params:
arg = 'input_list'
# all_to_all and torch multiGPU colls
elif 'input_tensor_list' in sig_params:
arg = 'input_tensor_list'
if arg is None:
return -1
else:
return list(sig_params).index(arg)
def get_tensor_kwarg(func, kwargs):
func_args = get_default_args(func)
func_args.update(kwargs)
arg = None
if 'tensor' in func_args:
arg = func_args['tensor']
elif 'tensors' in func_args:
arg = func_args['tensors']
elif 'input_list' in func_args:
arg = func_args['input_list']
elif 'input_tensor_list' in func_args:
arg = func_args['input_tensor_list']
return arg
def get_msg_size_from_args(func, *args, **kwargs):
# 3 cases:
# - tensor arg is in args
# - tensor arg is in kwargs
# - tensor arg is not present (e.g. barrier)
tensor_arg_position = -1
tensor_arg = None
# check if tensor arg is in args
if len(args) > 0:
tensor_arg_position = get_tensor_position(func)
if tensor_arg_position > -1:
tensor_arg = args[get_tensor_position(func)]
# check if tensor arg is in kwargs
if tensor_arg is None and len(kwargs) > 0:
tensor_arg = get_tensor_kwarg(func, kwargs)
# if tensor arg is not present, no data is being transmitted
if tensor_arg is None:
return 0
else:
# Sum of tensor sizes for list colls such as torch's all_to_all
# NOTE: msg_size for list colls will not be the actual size transmitted by a given MPI/NCCL call within the coll op. Instead, it's the total amount of data transmitted.
if type(tensor_arg) is list:
return sum(x.element_size() * x.nelement() for x in tensor_arg)
else:
return tensor_arg.element_size() * tensor_arg.nelement()
def get_debug_log_name(func_args, debug):
if debug:
return func_args['log_name'] + ' | [Caller Func: ' + get_caller_func() + ']'
else:
return func_args['log_name']
| 3,842 | 27.679104 | 176 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/comm/reduce_op.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from enum import Enum
class ReduceOp(Enum):
SUM = 0
PRODUCT = 1
MIN = 2
MAX = 3
BAND = 4
BOR = 5
BXOR = 6
AVG = 7
UNUSED = 8
| 259 | 12.684211 | 38 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/comm/config.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from pydantic import BaseModel
from .constants import *
class CommsConfig(BaseModel):
class Config:
validate_all = True
validate_assignment = True
use_enum_values = True
extra = 'forbid'
class CommsLoggerConfig(CommsConfig):
enabled: bool = COMMS_LOGGER_ENABLED_DEFAULT
prof_all: bool = COMMS_LOGGER_PROF_ALL_DEFAULT
prof_ops: list = COMMS_LOGGER_PROF_OPS_DEFAULT
verbose: bool = COMMS_LOGGER_VERBOSE_DEFAULT
debug: bool = COMMS_LOGGER_DEBUG_DEFAULT
class DeepSpeedCommsConfig:
def __init__(self, ds_config):
self.comms_logger_enabled = 'comms_logger' in ds_config
if self.comms_logger_enabled:
self.comms_logger = CommsLoggerConfig(**ds_config['comms_logger'])
| 855 | 24.176471 | 78 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/comm/backend.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
DeepSpeed Communication Backend.
In the future, directly use NCCL/MPI/Gloo/etc without requiring torch.distributed. Simply wrap torch.distributed for now.
# Custom DS Backends -- Direct C/Ops
- NCCL -- [EXPERIMENTAL]
- MPI -- [EXPERIMENTAL]
- RCCL -- [EXPERIMENTAL]
- GLOO -- [EXPERIMENTAL]
# DS backend wrapper for torch.distributed [DEFAULT]
- T-NCCL -- [DEFAULT]
- T-GLOO
- T-MPI
# Backend is the base class
-- NcclBackend, MpiBackend, and TorchBackend are the main subclasses. TorchBackend is the only officially supported backend for now.
"""
class Backend(object):
def __init__(self, name='backend', rank=0, size=1):
self.name = name
# The world size and rank of the world process group
self.world_group = None
self.world_size = size
self.world_rank = rank
# Single process group (pg) implementation for now but keep a list for future
self.process_groups = []
self.initialized = False
def is_initialized(self):
return self.initialized
def new_group(self):
# create a new pg and add it to pg list
pass
def init_process_group(self):
# subclasses will initialize them fully
# - initialize a default world process group and add it to pg list
self.initialized = True
| 1,416 | 28.520833 | 137 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/comm/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .utils import *
from .comm import *
| 137 | 16.25 | 38 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/monitor/tensorboard.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .utils import check_tb_availability
from .monitor import Monitor
import os
import deepspeed.comm as dist
class TensorBoardMonitor(Monitor):
def __init__(self, tensorboard_config):
super().__init__(tensorboard_config)
check_tb_availability()
self.summary_writer = None
self.enabled = tensorboard_config.enabled
self.output_path = tensorboard_config.output_path
self.job_name = tensorboard_config.job_name
if self.enabled and dist.get_rank() == 0:
self.get_summary_writer()
def get_summary_writer(self, base=os.path.join(os.path.expanduser("~"), "tensorboard")):
if self.enabled and dist.get_rank() == 0:
from torch.utils.tensorboard import SummaryWriter
if self.output_path is not None:
log_dir = os.path.join(self.output_path, self.job_name)
# NOTE: This code path currently is never used since the default output_path is an empty string and not None. Saving it in case we want this functionality in the future.
else:
if "DLWS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLWS_JOB_ID"]
elif "DLTS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLTS_JOB_ID"]
else:
infra_job_id = "unknown-job-id"
summary_writer_dir_name = os.path.join(infra_job_id, "logs")
log_dir = os.path.join(base, summary_writer_dir_name, self.output_path)
os.makedirs(log_dir, exist_ok=True)
self.summary_writer = SummaryWriter(log_dir=log_dir)
return self.summary_writer
def write_events(self, event_list, flush=True):
if self.enabled and self.summary_writer is not None and dist.get_rank() == 0:
for event in event_list:
self.summary_writer.add_scalar(*event)
if flush:
self.summary_writer.flush()
def flush(self):
if self.enabled and self.summary_writer is not None and dist.get_rank() == 0:
self.summary_writer.flush()
| 2,227 | 38.087719 | 181 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/monitor/utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
def check_tb_availability():
try:
# torch.utils.tensorboard will fail if `tensorboard` is not available,
# see their docs for more details: https://pytorch.org/docs/1.8.0/tensorboard.html
import tensorboard # noqa: F401
except ImportError:
print('If you want to use tensorboard logging, please `pip install tensorboard`')
raise
def check_wandb_availability():
try:
import wandb # noqa: F401
except ImportError:
print(
'If you want to use wandb logging, please `pip install wandb` and follow the instructions at https://docs.wandb.ai/quickstart'
)
raise
| 754 | 29.2 | 138 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/monitor/csv_monitor.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .monitor import Monitor
import os
import deepspeed.comm as dist
class csvMonitor(Monitor):
def __init__(self, csv_config):
super().__init__(csv_config)
self.filenames = []
self.enabled = csv_config.enabled
self.output_path = csv_config.output_path
self.job_name = csv_config.job_name
self.log_dir = self.setup_log_dir()
def setup_log_dir(self, base=os.path.join(os.path.expanduser("~"), "csv_monitor")):
if self.enabled and dist.get_rank() == 0:
if self.output_path is not None:
log_dir = os.path.join(self.output_path, self.job_name)
# NOTE: This code path currently is never used since the default tensorboard_output_path is an empty string and not None. Saving it in case we want this functionality in the future.
else:
if "DLWS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLWS_JOB_ID"]
elif "DLTS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLTS_JOB_ID"]
else:
infra_job_id = "unknown-job-id"
csv_monitor_dir_name = os.path.join(infra_job_id, "logs")
log_dir = os.path.join(base, csv_monitor_dir_name, self.job_name)
os.makedirs(log_dir, exist_ok=True)
return log_dir
def write_events(self, event_list):
if self.enabled and dist.get_rank() == 0:
import csv
# We assume each event_list element is a tensorboard-style tuple in the format: (log_name: String, value, step: Int)
for event in event_list:
log_name = event[0]
value = event[1]
step = event[2]
# Set the header to the log_name
# Need this check because the deepspeed engine currently formats log strings to separate with '/'
if '/' in log_name:
record_splits = log_name.split('/')
header = record_splits[len(record_splits) - 1]
else:
header = log_name
# sanitize common naming conventions into filename
filename = log_name.replace('/', '_').replace(' ', '_')
fname = self.log_dir + '/' + filename + '.csv'
# Open file and record event. Insert header if this is the first time writing
with open(fname, 'a+') as csv_monitor_file:
csv_monitor_writer = csv.writer(csv_monitor_file)
if filename not in self.filenames:
self.filenames.append(filename)
csv_monitor_writer.writerow(['step', header])
csv_monitor_writer.writerow([step, value])
| 2,907 | 41.764706 | 193 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/monitor/config.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from pydantic import root_validator
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
def get_monitor_config(param_dict):
monitor_dict = {key: param_dict.get(key, {}) for key in ("tensorboard", "wandb", "csv_monitor")}
return DeepSpeedMonitorConfig(**monitor_dict)
class TensorBoardConfig(DeepSpeedConfigModel):
"""Sets parameters for TensorBoard monitor."""
enabled: bool = False
""" Whether logging to Tensorboard is enabled. Requires `tensorboard` package is installed. """
output_path: str = ""
"""
Path to where the Tensorboard logs will be written. If not provided, the
output path is set under the training script’s launching path.
"""
job_name: str = "DeepSpeedJobName"
""" Name for the current job. This will become a new directory inside `output_path`. """
class WandbConfig(DeepSpeedConfigModel):
"""Sets parameters for WandB monitor."""
enabled: bool = False
""" Whether logging to WandB is enabled. Requires `wandb` package is installed. """
group: str = None
""" Name for the WandB group. This can be used to group together runs. """
team: str = None
""" Name for the WandB team. """
project: str = "deepspeed"
""" Name for the WandB project. """
class CSVConfig(DeepSpeedConfigModel):
"""Sets parameters for CSV monitor."""
enabled: bool = False
""" Whether logging to local CSV files is enabled. """
output_path: str = ""
"""
Path to where the csv files will be written. If not provided, the output
path is set under the training script’s launching path.
"""
job_name: str = "DeepSpeedJobName"
""" Name for the current job. This will become a new directory inside `output_path`. """
class DeepSpeedMonitorConfig(DeepSpeedConfigModel):
"""Sets parameters for various monitoring methods."""
tensorboard: TensorBoardConfig = {}
""" TensorBoard monitor, requires `tensorboard` package is installed. """
wandb: WandbConfig = {}
""" WandB monitor, requires `wandb` package is installed. """
csv_monitor: CSVConfig = {}
""" Local CSV output of monitoring data. """
@root_validator
def check_enabled(cls, values):
values["enabled"] = values.get("tensorboard").enabled or values.get("wandb").enabled or values.get(
"csv_monitor").enabled
return values
| 2,481 | 30.025 | 107 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/monitor/monitor.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Support different forms of monitoring such as wandb and tensorboard
"""
from abc import ABC, abstractmethod
import deepspeed.comm as dist
class Monitor(ABC):
@abstractmethod
def __init__(self, monitor_config):
self.monitor_config = monitor_config
@abstractmethod
def write_events(self, event_list):
pass
from .wandb import WandbMonitor
from .tensorboard import TensorBoardMonitor
from .csv_monitor import csvMonitor
class MonitorMaster(Monitor):
def __init__(self, monitor_config):
super().__init__(monitor_config)
self.tb_monitor = None
self.wandb_monitor = None
self.csv_monitor = None
self.enabled = monitor_config.enabled
if dist.get_rank() == 0:
if monitor_config.tensorboard.enabled:
self.tb_monitor = TensorBoardMonitor(monitor_config.tensorboard)
if monitor_config.wandb.enabled:
self.wandb_monitor = WandbMonitor(monitor_config.wandb)
if monitor_config.csv_monitor.enabled:
self.csv_monitor = csvMonitor(monitor_config.csv_monitor)
def write_events(self, event_list):
if dist.get_rank() == 0:
if self.tb_monitor is not None:
self.tb_monitor.write_events(event_list)
if self.wandb_monitor is not None:
self.wandb_monitor.write_events(event_list)
if self.csv_monitor is not None:
self.csv_monitor.write_events(event_list)
| 1,604 | 28.722222 | 80 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/monitor/wandb.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .utils import check_wandb_availability
from .monitor import Monitor
import deepspeed.comm as dist
class WandbMonitor(Monitor):
def __init__(self, wandb_config):
super().__init__(wandb_config)
check_wandb_availability()
import wandb
self.enabled = wandb_config.enabled
self.group = wandb_config.group
self.team = wandb_config.team
self.project = wandb_config.project
if self.enabled and dist.get_rank() == 0:
wandb.init(project=self.project, group=self.group, entity=self.team)
def log(self, data, step=None, commit=None, sync=None):
if self.enabled and dist.get_rank() == 0:
import wandb
return wandb.log(data, step=step, commit=commit, sync=sync)
def write_events(self, event_list):
if self.enabled and dist.get_rank() == 0:
for event in event_list:
label = event[0]
value = event[1]
step = event[2]
self.log({label: value}, step=step)
| 1,150 | 28.512821 | 80 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/monitor/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
| 140 | 22.5 | 44 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/elasticity/elastic_agent.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
from typing import Any, Dict, Optional, Tuple
from datetime import datetime
from torch.distributed.elastic.agent.server.api import log, _get_socket_with_port
from torch.distributed.elastic.metrics import put_metric
from torch.distributed.elastic.agent.server.api import (
RunResult,
WorkerGroup,
WorkerSpec,
WorkerState,
)
from torch.distributed import Store
import time
import os
from torch.distributed.elastic.multiprocessing import start_processes
from torch.distributed.elastic.utils import macros
import shutil
import copy
from contextlib import closing
import subprocess
class DSElasticAgent(LocalElasticAgent):
def __init__(
self,
spec: WorkerSpec,
env: Dict,
start_method="spawn",
exit_barrier_timeout: float = 300,
log_dir: Optional[str] = None,
):
super().__init__(spec, start_method, exit_barrier_timeout, log_dir)
self.ds_env = env
@staticmethod
def _set_master_addr_port(store: Store, master_addr: Optional[str], master_port: Optional[int]):
if master_port is None:
sock = _get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
if master_addr is None:
# master_addr = _get_fq_hostname()
result = subprocess.check_output("hostname -I", shell=True)
master_addr = result.decode('utf-8').split()[0]
store.set("MASTER_ADDR", master_addr.encode(encoding="UTF-8"))
store.set("MASTER_PORT", str(master_port).encode(encoding="UTF-8"))
def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
spec = worker_group.spec
store = worker_group.store
assert store is not None
master_addr, master_port = super()._get_master_addr_port(store)
restart_count = spec.max_restarts - self._remaining_restarts
use_agent_store = spec.rdzv_handler.get_backend() == "static"
args: Dict[int, Tuple] = {}
envs: Dict[int, Dict[str, str]] = {}
for worker in worker_group.workers:
local_rank = worker.local_rank
worker_env_ds = copy.deepcopy(self.ds_env)
worker_env_elastic = {
"LOCAL_RANK": str(local_rank),
"RANK": str(worker.global_rank),
"GROUP_RANK": str(worker_group.group_rank),
"ROLE_RANK": str(worker.role_rank),
"ROLE_NAME": spec.role,
"LOCAL_WORLD_SIZE": str(spec.local_world_size),
"WORLD_SIZE": str(worker.world_size),
"GROUP_WORLD_SIZE": str(worker_group.group_world_size),
"ROLE_WORLD_SIZE": str(worker.role_world_size),
"MASTER_ADDR": master_addr,
"MASTER_PORT": str(master_port),
"TORCHELASTIC_RESTART_COUNT": str(restart_count),
"TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts),
"TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(),
"TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store),
"NCCL_ASYNC_ERROR_HANDLING": os.getenv("NCCL_ASYNC_ERROR_HANDLING", str(1)),
}
worker_env_ds.update(worker_env_elastic)
if "OMP_NUM_THREADS" in os.environ:
worker_env_ds["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"]
envs[local_rank] = worker_env_ds
worker_args = list(spec.args)
worker_args = macros.substitute(worker_args, str(local_rank))
args[local_rank] = tuple(worker_args)
# scaling events do not count towards restarts (gets same attempt #)
# remove existing log dir if this restart is due to a scaling event
attempt_log_dir = os.path.join(self._log_dir, f"attempt_{restart_count}")
shutil.rmtree(attempt_log_dir, ignore_errors=True)
os.makedirs(attempt_log_dir)
assert spec.entrypoint is not None
self._pcontext = start_processes(
name=spec.role,
entrypoint=spec.entrypoint,
args=args,
envs=envs,
log_dir=attempt_log_dir,
start_method=self._start_method,
redirects=spec.redirects,
tee=spec.tee,
)
return self._pcontext.pids()
def _invoke_run(self, role: str = "default") -> RunResult:
# NOTE: currently only works for a single role
spec = self._worker_group.spec
role = spec.role
log.info(f"[{role}] starting workers for entrypoint: {spec.get_entrypoint_name()}")
self._initialize_workers(self._worker_group)
monitor_interval = spec.monitor_interval
rdzv_handler = spec.rdzv_handler
participants = rdzv_handler._state_holder.state.participants
while True:
assert self._worker_group.state != WorkerState.INIT
time.sleep(monitor_interval)
run_result = self._monitor_workers(self._worker_group)
state = run_result.state
self._worker_group.state = state
expire_time = datetime.utcnow() - (rdzv_handler._settings.keep_alive_interval *
rdzv_handler._settings.keep_alive_max_attempt)
_dead_nodes = [
node for node, last_heartbeat in rdzv_handler._state_holder.state.last_heartbeats.items()
if last_heartbeat < expire_time
]
put_metric(f"workers.{role}.remaining_restarts", self._remaining_restarts)
put_metric(f"workers.{role}.{state.name.lower()}", 1)
if state == WorkerState.SUCCEEDED:
log.info(f"[{role}] worker group successfully finished."
f" Waiting {self._exit_barrier_timeout} seconds for other agents to finish.")
self._exit_barrier()
return run_result
elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED
} or len(participants) > len(rdzv_handler._state_holder.state.participants):
if self._remaining_restarts > 0:
log.info(f"[{role}] Worker group {state.name}. "
f"{self._remaining_restarts}/{spec.max_restarts} attempts left;"
f" will restart worker group")
self._remaining_restarts -= 1
# rdzv_handler._state_holder.state.restart = False
self._restart_workers(self._worker_group)
participants = rdzv_handler._state_holder.state.participants
else:
self._stop_workers(self._worker_group)
self._worker_group.state = WorkerState.FAILED
self._exit_barrier()
return run_result
elif state == WorkerState.HEALTHY:
# membership changes do not count as retries
num_nodes_waiting = rdzv_handler.num_nodes_waiting()
group_rank = self._worker_group.group_rank
if num_nodes_waiting > 0:
log.info(f"[{role}] Detected {num_nodes_waiting} "
f"new nodes from group_rank={group_rank}; "
f"will restart worker group")
self._restart_workers(self._worker_group)
participants = rdzv_handler._state_holder.state.participants
else:
raise Exception(f"[{role}] Worker group in {state.name} state")
| 7,762 | 41.653846 | 105 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/elasticity/constants.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#########################################
# Elasticity
#########################################
''' Elasticity Utility in DeepSpeed can be used to create highly elastic jobs compatible
with a large number of GPUs. For elastic jobs, DeepSpeed will provide a batch size that
can support a large number of GPUs based on the user specified parameters
'''
FORMAT = '''
Elasticity should be enabled as:
"elasticity": {
"enabled": true,
"max_train_batch_size": 2000,
"micro_batch_sizes": [2,4,6],
"min_gpus": 1,
"max_gpus" : 10000,
"min_time": 20,
"prefer_larger_batch": true,
"ignore_non_elastic_batch_info": false,
"version": 0.1
}
'''
ELASTICITY = 'elasticity'
# Current elasticity version
LATEST_ELASTICITY_VERSION = 0.2
ENABLED = 'enabled'
ENABLED_DEFAULT = False
# Max acceptable train_batch_size
MAX_ACCEPTABLE_BATCH_SIZE = 'max_train_batch_size'
MAX_ACCEPTABLE_BATCH_SIZE_DEFAULT = 2000
# Acceptable micro batch sizes, same as train_micro_batch_size_per_gpu
MICRO_BATCHES = 'micro_batch_sizes'
MICRO_BATCHES_DEFAULT = [2, 4, 6]
# Min/max of GPUs to search over
MIN_GPUS = 'min_gpus'
MIN_GPUS_DEFAULT = 1
MAX_GPUS = 'max_gpus'
MAX_GPUS_DEFAULT = 10000
NUM_GPUS_PER_NODE = 'num_gpus_per_node'
NUM_GPUS_PER_NODE_DEFAULT = 1
MODEL_PARALLEL_SIZE = "model_parallel_size"
MODEL_PARALLEL_SIZE_DEFAULT = 1
# Minimum running time (minutes) before the scheduler will scale us, 0 implies it's unknown
MIN_TIME = "min_time"
MIN_TIME_DEFAULT = 0
# When finding a suitable batch size, attempt to find one that is closest
# to the max train batch size given.
PREFER_LARGER_BATCH = 'prefer_larger_batch'
PREFER_LARGER_BATCH_DEFAULT = True
# In order to reduce confusion, if elastic mode is enabled we
# require (via assert) that no batch info is set outside of the
# elastic config. You can turn off this assert via this config
# but keep in mind that all batch info defined outside the
# elastic mode *will be ignored*.
IGNORE_NON_ELASTIC_BATCH_INFO = 'ignore_non_elastic_batch_info'
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT = False
# Version of elastic logic to use
VERSION = "version"
VERSION_DEFAULT = LATEST_ELASTICITY_VERSION
# Minimum deepspeed version to use elasticity
MINIMUM_DEEPSPEED_VERSION = "0.3.8"
# Environment variable storing elastic config from resource scheduler
DEEPSPEED_ELASTICITY_CONFIG = "DEEPSPEED_ELASTICITY_CONFIG"
| 2,454 | 28.939024 | 91 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/elasticity/utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
def is_torch_elastic_compatible():
'''
Helper to lookup torch version. Elastic training is
introduced in 1.11.x
'''
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 1 and TORCH_MINOR >= 11:
return True
else:
return False
| 459 | 22 | 59 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/elasticity/config.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import json
from .constants import *
class ElasticityError(Exception):
"""
Base exception for all elasticity related errors
"""
class ElasticityConfigError(ElasticityError):
"""
Elasticity configuration error
"""
class ElasticityIncompatibleWorldSize(ElasticityError):
"""
Attempting to run a world size that is incompatible with a given elastic config
"""
class ElasticityConfig:
"""
Elastic config object, constructed from a param dictionary that only contains elastic
config parameters, example below:
If elasticity is enabled, user must specify (at least) max_train_batch_size
and micro_batch_sizes.
{
"enabled": true,
"max_train_batch_size": 2000,
"micro_batch_sizes": [2,4,6],
"min_gpus": 1,
"max_gpus" : 10000
"min_time": 20
"ignore_non_elastic_batch_info": false
"version": 0.1
}
"""
def __init__(self, param_dict):
self.enabled = param_dict.get(ENABLED, ENABLED_DEFAULT)
if self.enabled:
if MAX_ACCEPTABLE_BATCH_SIZE in param_dict:
self.max_acceptable_batch_size = param_dict[MAX_ACCEPTABLE_BATCH_SIZE]
else:
raise ElasticityConfigError(f"Elasticity config missing {MAX_ACCEPTABLE_BATCH_SIZE}")
if MICRO_BATCHES in param_dict:
self.micro_batches = param_dict[MICRO_BATCHES]
else:
raise ElasticityConfigError(f"Elasticity config missing {MICRO_BATCHES}")
else:
self.max_acceptable_batch_size = param_dict.get(MAX_ACCEPTABLE_BATCH_SIZE,
MAX_ACCEPTABLE_BATCH_SIZE_DEFAULT)
self.micro_batches = param_dict.get(MICRO_BATCHES, MICRO_BATCHES_DEFAULT)
if not isinstance(self.micro_batches, list):
raise ElasticityConfigError(
f"Elasticity expected value of {MICRO_BATCHES} to be a "
f"list of micro batches, instead is: {type(self.micro_batches)}, containing: {self.micro_batches}")
if not all(map(lambda m: isinstance(m, int), self.micro_batches)):
raise ElasticityConfigError(f"Elasticity expected {MICRO_BATCHES} to only contain a list of integers, "
f"instead contains: f{self.micro_batches}")
if not all(map(lambda m: m > 0, self.micro_batches)):
raise ElasticityConfigError(f"Elasticity expected {MICRO_BATCHES} to only contain positive integers, "
f"instead contains: f{self.micro_batches}")
self.min_gpus = param_dict.get(MIN_GPUS, MIN_GPUS_DEFAULT)
self.max_gpus = param_dict.get(MAX_GPUS, MAX_GPUS_DEFAULT)
if self.min_gpus < 1 or self.max_gpus < 1:
raise ElasticityConfigError("Elasticity min/max gpus must be > 0, "
f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}")
if self.max_gpus < self.min_gpus:
raise ElasticityConfigError("Elasticity min_gpus cannot be greater than max_gpus, "
f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}")
self.model_parallel_size = param_dict.get(MODEL_PARALLEL_SIZE, MODEL_PARALLEL_SIZE_DEFAULT)
if self.model_parallel_size < 1:
raise ElasticityConfigError("Model-Parallel size cannot be less than 1, "
f"given model-parallel size: {self.model_parallel_size}")
self.num_gpus_per_node = param_dict.get(NUM_GPUS_PER_NODE, NUM_GPUS_PER_NODE_DEFAULT)
if self.num_gpus_per_node < 1:
raise ElasticityConfigError("Number of GPUs per node cannot be less than 1, "
f"given number of GPUs per node: {self.num_gpus_per_node}")
self.min_time = param_dict.get(MIN_TIME, MIN_TIME_DEFAULT)
if self.min_time < 0:
raise ElasticityConfigError(f"Elasticity min time needs to be >= 0: given {self.min_time}")
self.version = param_dict.get(VERSION, VERSION_DEFAULT)
self.prefer_larger_batch_size = param_dict.get(PREFER_LARGER_BATCH, PREFER_LARGER_BATCH_DEFAULT)
self.ignore_non_elastic_batch_info = param_dict.get(IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT)
def repr(self):
return self.__dict__
def __repr__(self):
return json.dumps(self.__dict__, sort_keys=True, indent=4)
| 4,703 | 41.378378 | 115 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/elasticity/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .elasticity import compute_elastic_config, elasticity_enabled, ensure_immutable_elastic_config
from .utils import is_torch_elastic_compatible
from .constants import ENABLED, ENABLED_DEFAULT, ELASTICITY
if is_torch_elastic_compatible():
from .elastic_agent import DSElasticAgent
| 383 | 33.909091 | 99 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/elasticity/elasticity.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import json
import numpy as np
import math
from packaging import version as pkg_version
from .config import ElasticityConfig, ElasticityConfigError, ElasticityError, \
ElasticityIncompatibleWorldSize
from .constants import ELASTICITY, ENABLED, ENABLED_DEFAULT, LATEST_ELASTICITY_VERSION, \
MINIMUM_DEEPSPEED_VERSION, DEEPSPEED_ELASTICITY_CONFIG
from ..git_version_info import version as __version__
from ..utils import logger
# Thirty eight smallest highly composite numbers. The list should
# be enough to support up to 720K batch size.
HCN_LIST = [
1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680, 2520, 5040, 7560, 10080, 15120, 20160,
25200, 27720, 45360, 50400, 55440, 83160, 110880, 166320, 221760, 277200, 332640, 498960, 554400, 665280, 720720
]
def get_candidate_batch_sizes(base_list, max_acceptable_batch_size):
candidate_batch_size = []
for base in base_list:
if base >= max_acceptable_batch_size:
candidate_batch_size.append(base)
else:
value = max_acceptable_batch_size // base
index = np.argmax(np.asarray(HCN_LIST) > value)
candidate_batch_size.append(HCN_LIST[index - 1] * base)
candidate_batch_size = list(set(candidate_batch_size))
logger.info(f"Candidate batch size: {candidate_batch_size}")
return candidate_batch_size
def get_valid_gpus(batch_size, micro_batches, min_valid_gpus, max_valid_gpus):
valid_gpus = []
for micro_batch in micro_batches:
if batch_size % micro_batch == 0:
max_gpus = batch_size // micro_batch
if max_gpus >= min_valid_gpus and max_gpus <= max_valid_gpus:
valid_gpus.append(max_gpus)
# find all factors less than max_gpus / 2
for i in range(1, max_gpus // 2 + 1):
if i > max_valid_gpus:
break
if i < min_valid_gpus:
continue
if max_gpus % i == 0:
valid_gpus.append(i)
valid_gpus = set(valid_gpus)
valid_gpus = sorted(list(valid_gpus))
return valid_gpus
def get_best_candidates(candidate_batch_sizes, micro_batches, min_gpus, max_gpus, prefer_larger):
max_valid_gpus = 0
valid_gpus = None
final_batch_size = int(min(micro_batches))
for batch_size in candidate_batch_sizes:
current_valid_gpus = get_valid_gpus(batch_size, micro_batches, min_gpus, max_gpus)
if (len(current_valid_gpus) > max_valid_gpus or (len(current_valid_gpus) == max_valid_gpus and
((prefer_larger and batch_size > final_batch_size) or
(not prefer_larger and batch_size < final_batch_size)))):
max_valid_gpus = len(current_valid_gpus)
valid_gpus = current_valid_gpus
final_batch_size = batch_size
return final_batch_size, valid_gpus
def _get_compatible_gpus_v01(micro_batches,
max_acceptable_batch_size,
min_gpus=None,
max_gpus=None,
prefer_larger=True):
'''We use two heuristics to compute the batch size
1. We use the Lowest Common Multiple of the micro-batches
as the base batch size and scale it by a HCN such that the result is
the largest batch size less than the max_acceptable batch size
2. We use each of the micro batches as a base and scale it
by a HCN such that the result is the largest batch size less than the
max_acceptable batch size.
We then use brute force to count the number of compatible GPU count for
each of the aforementioned cases, and return the batch size with the most number of
compatible GPU counts in the min-max GPU range if provided, other wise
we return the batch size with the most number of total compatible GPU counts.
Returns:
final_batch_size
valid_gpus
'''
min_gpus = min_gpus or 1
max_gpus = max_gpus or max_acceptable_batch_size // min(micro_batches)
if not all(mb <= max_acceptable_batch_size for mb in micro_batches):
raise ValueError(f"All micro batches must be less than \
or equal to max_acceptable_batch_size: {max_acceptable_batch_size}")
lcm = np.lcm.reduce(micro_batches)
base_list = []
base_list.extend(micro_batches)
base_list.append(lcm)
candidate_batch_sizes = get_candidate_batch_sizes(base_list, max_acceptable_batch_size)
final_batch_size, valid_gpus = get_best_candidates(candidate_batch_sizes, micro_batches, min_gpus, max_gpus,
prefer_larger)
return final_batch_size, valid_gpus
def _get_compatible_gpus_v02(micro_batches,
max_acceptable_batch_size,
current_num_gpus,
min_gpus=None,
max_gpus=None,
prefer_larger=True,
num_gpus_per_node=1,
model_parallel_size=1):
'''
Returns:
final_batch_size
valid_gpus
micro-batch size
'''
if num_gpus_per_node % model_parallel_size != 0:
raise ElasticityError(
f"In Elasticity v0.2, number of GPUs per node:" \
f"{num_gpus_per_node} should be divisible by " \
f"model parallel size {model_parallel_size}")
def get_microbatch(final_batch_size):
candidate_microbatch = None
for micro_batch in micro_batches:
if final_batch_size // current_num_gpus % micro_batch == 0:
if candidate_microbatch is None:
candidate_microbatch = micro_batch
if prefer_larger and candidate_microbatch < micro_batch:
candidate_microbatch = micro_batch
return candidate_microbatch
dp_size_per_node = num_gpus_per_node // model_parallel_size
final_batch_size, valid_world_size = _get_compatible_gpus_v01(
micro_batches,
int(max_acceptable_batch_size / dp_size_per_node),
int(min_gpus / num_gpus_per_node),
int(max_gpus / num_gpus_per_node), # Passing number of max nodes as Elasticity v2 works at node level
prefer_larger=prefer_larger)
final_batch_size = int(final_batch_size) * dp_size_per_node
valid_dp_world_size = [i * dp_size_per_node for i in valid_world_size]
if current_num_gpus // model_parallel_size in valid_dp_world_size:
candidate_microbatch = get_microbatch(final_batch_size)
return final_batch_size, valid_dp_world_size, candidate_microbatch
current_dp_size = (current_num_gpus / num_gpus_per_node) * dp_size_per_node
candidate_batch_sizes = []
for micro_batch in micro_batches:
min_batch_size = micro_batch * current_dp_size
factor = math.floor(max_acceptable_batch_size / float(min_batch_size))
candidate_batch_sizes.append(factor * min_batch_size)
used_microbatch = None
if prefer_larger:
candidate_batch_size = max(candidate_batch_sizes)
else:
candidate_batch_size = min(candidate_batch_sizes)
candidate_microbatch = get_microbatch(candidate_batch_size)
return candidate_batch_size, [int(current_dp_size)], candidate_microbatch
def _compatible_ds_version_check(target_deepspeed_version: str):
min_version = pkg_version.parse(MINIMUM_DEEPSPEED_VERSION)
target_version = pkg_version.parse(target_deepspeed_version)
err_str = f"Target deepspeed version of {target_deepspeed_version} is not compatible " \
f"with minimum version {MINIMUM_DEEPSPEED_VERSION} supporting elasticity."
if target_version < min_version:
raise ElasticityError(err_str)
return True
def elasticity_enabled(ds_config: dict):
if ELASTICITY not in ds_config:
return False
return ds_config[ELASTICITY].get(ENABLED, ENABLED_DEFAULT)
def ensure_immutable_elastic_config(runtime_elastic_config_dict: dict):
"""
Ensure the resource scheduler saw the same elastic config we are using at runtime
"""
if DEEPSPEED_ELASTICITY_CONFIG in os.environ:
scheduler_elastic_config_dict = json.loads(os.environ[DEEPSPEED_ELASTICITY_CONFIG])
scheduler_elastic_config = ElasticityConfig(scheduler_elastic_config_dict)
runtime_elastic_config = ElasticityConfig(runtime_elastic_config_dict)
err_str = "Elastic config '{}={}' seen by resource scheduler does not match config passed to runtime {}={}"
if runtime_elastic_config.max_acceptable_batch_size != scheduler_elastic_config.max_acceptable_batch_size:
raise ElasticityConfigError(
err_str.format('max_acceptable_batch_size', scheduler_elastic_config.max_acceptable_batch_size,
'max_acceptable_batch_size', runtime_elastic_config.max_acceptable_batch_size))
if runtime_elastic_config.micro_batches != scheduler_elastic_config.micro_batches:
raise ElasticityConfigError(
err_str.format('micro_batches', scheduler_elastic_config.micro_batches, 'micro_batches',
runtime_elastic_config.micro_batches))
if runtime_elastic_config.version != scheduler_elastic_config.version:
raise ElasticityConfigError(
err_str.format('version', scheduler_elastic_config.version, 'version', runtime_elastic_config.version))
else:
logger.warning("Unable to find DEEPSPEED_ELASTICITY_CONFIG environment variable, cannot " \
"guarantee resource scheduler will scale this job using compatible GPU counts.")
def compute_elastic_config(ds_config: dict, target_deepspeed_version: str, world_size=0, return_microbatch=False):
"""Core deepspeed elasticity API. Given an elastic config (similar to the example below)
DeepSpeed will compute a total train batch size corresponding valid GPU count list that
provides a high level of elasticity. Elasticity in this case means we are safe to scale
the training job up/down across the GPU count list *without* any negative impacts on
training convergence. This is achievable primarily due to DeepSpeed's gradient accumulation
feature which allows us to decompose a global training batch size into:
micro-batch-size * gradient-accumulation-steps * world-size.
"elasticity": {
"enabled": true,
"max_train_batch_size": 2000,
"micro_batch_sizes": [2,4,6],
"min_gpus": 1,
"max_gpus" : 10000
"min_time": 20
"version": 0.1
}
Intended to be called both by scheduling infrastructure and deepspeed runtime.
For the same `ds_config` we should return deterministic results.
Args:
ds_config (dict): DeepSpeed config dictionary/json
target_deepspeed_version (str): When called from scheduling
infrastructure we want to ensure that the target deepspeed version is
compatible with the elasticity version used in the backend.
world_size (int, optional): Intended/current DP world size, will do some sanity
checks to ensure world size is actually valid with the config.
return_microbatch (bool, optional): whether to return micro batch size or not.
Raises:
ElasticityConfigError: Missing required elasticity config or elasticity disabled
ElasticityError: If target deepspeed version is not compatible with current version
Returns:
final_batch_size (int): total batch size used for training
valid_gpus (list(int)): list of valid GPU counts with this config
micro_batch_size (int, optional): if world_size is provided will return
specific micro batch size
"""
if not isinstance(ds_config, dict):
raise ValueError("Expected ds_config to be a dictionary but received " \
f"a {type(ds_config)}, containing: {ds_config}")
if ELASTICITY not in ds_config:
raise ElasticityConfigError(f"'{ELASTICITY}' is missing from config json," \
" please add it if running an elastic training job.")
elastic_config_dict = ds_config[ELASTICITY]
if not elastic_config_dict.get(ENABLED, ENABLED_DEFAULT):
raise ElasticityConfigError("Elasticity is disabled, please enable it " \
"('enabled':true) if running an elastic training job.")
elastic_config = ElasticityConfig(elastic_config_dict)
model_parallel_size = elastic_config.model_parallel_size
num_gpus_per_node = elastic_config.num_gpus_per_node
if model_parallel_size > 1 and float(elastic_config.version) != 0.2:
raise ElasticityConfigError(f"Elasticity V{elastic_config.version} " \
f"does not support model-parallel training. Given model-parallel size: " \
f"{model_parallel_size}")
if float(elastic_config.version) > LATEST_ELASTICITY_VERSION:
raise ElasticityConfigError("Attempting to run elasticity version " \
f"{elastic_config.version} but runtime only supports up " \
f"to {LATEST_ELASTICITY_VERSION}")
# Ensure target deepspeed version works with intended elasticity version
if not _compatible_ds_version_check(target_deepspeed_version):
raise ElasticityError("Unable to run elasticity on target deepspeed version of" \
f" {target_deepspeed_version}, currently {__version__}")
if float(elastic_config.version) == 0.1:
final_batch_size, valid_gpus = _get_compatible_gpus_v01(
micro_batches=elastic_config.micro_batches,
max_acceptable_batch_size=elastic_config.max_acceptable_batch_size,
min_gpus=elastic_config.min_gpus,
max_gpus=elastic_config.max_gpus,
prefer_larger=elastic_config.prefer_larger_batch_size)
# ensure batch size is int dtype
final_batch_size = int(final_batch_size)
elif float(elastic_config.version) == 0.2:
if world_size != 0:
current_num_gpus = world_size
else:
if "WORLD_SIZE" in os.environ and \
os.getenv('WORLD_SIZE').isnumeric():
current_num_gpus = int(os.getenv('WORLD_SIZE'))
else:
WORLD_SIZE = os.getenv('WORLD_SIZE')
raise ElasticityConfigError(
'Elasticity V 0.2 needs WORLD_SIZE '\
'to compute valid batch size. '\
'Either give it as argument to function compute_elastic_config '\
'or set it as an environment variable. '\
f'Value of WORLD_SIZE as environment variable is {WORLD_SIZE}')
final_batch_size, valid_gpus, candidate_microbatch_size = _get_compatible_gpus_v02(
micro_batches=elastic_config.micro_batches,
max_acceptable_batch_size=elastic_config.max_acceptable_batch_size,
current_num_gpus=current_num_gpus,
min_gpus=elastic_config.min_gpus,
max_gpus=elastic_config.max_gpus,
prefer_larger=elastic_config.prefer_larger_batch_size,
num_gpus_per_node=num_gpus_per_node,
model_parallel_size=model_parallel_size)
# ensure batch size is int dtype
final_batch_size = int(final_batch_size)
else:
raise NotImplementedError(f"Unable to find elastic logic for version: {elastic_config.version}")
logger.info(f"Valid World Size (GPUs / Model Parallel Size): {valid_gpus}")
if world_size > 0:
if world_size not in valid_gpus:
raise ElasticityIncompatibleWorldSize(f"World size ({world_size}) is not valid " \
f"with the current list of valid GPU counts: {valid_gpus}")
# Pick largest valid micro batch size
micro_batch_size = None
for mbsz in sorted(list(set(elastic_config.micro_batches)), reverse=True):
if final_batch_size // world_size % mbsz == 0:
micro_batch_size = mbsz
break
assert micro_batch_size is not None, "Unable to find divisible micro batch size" \
f" world_size={world_size}, final_batch_size={final_batch_size}, and " \
f" micro_batches={elastic_config.micro_batches}."
return final_batch_size, valid_gpus, micro_batch_size
if return_microbatch:
# Pick a valid micro batch size
if float(elastic_config.version) == 0.2:
return final_batch_size, valid_gpus, candidate_microbatch_size
else:
micro_batch_size = None
for mbsz in sorted(list(set(elastic_config.micro_batches)), reverse=True):
if final_batch_size // world_size % mbsz == 0:
micro_batch_size = mbsz
break
assert micro_batch_size is not None, "Unable to find divisible micro batch size" \
f" world_size={world_size}, final_batch_size={final_batch_size}, and " \
f" micro_batches={elastic_config.micro_batches}."
return final_batch_size, valid_gpus, micro_batch_size
return final_batch_size, valid_gpus
| 17,374 | 44.844327 | 119 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/timer.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import time
from numpy import mean
from deepspeed.utils.logging import log_dist
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
try:
import psutil
PSUTILS_INSTALLED = True
except ImportError:
PSUTILS_INSTALLED = False
pass
class CudaEventTimer(object):
def __init__(self, start_event: get_accelerator().Event, end_event: get_accelerator().Event):
self.start_event = start_event
self.end_event = end_event
def get_elapsed_msec(self):
get_accelerator().current_stream().wait_event(self.end_event)
self.end_event.synchronize()
return self.start_event.elapsed_time(self.end_event)
class SynchronizedWallClockTimer:
"""Group of timers. Borrowed from Nvidia Megatron code"""
class Timer:
"""Timer."""
def __init__(self, name):
self.name_ = name
self.started_ = False
self.event_timers = []
self.use_host_timer = get_accelerator().is_synchronized_device()
self.start_event = None
self.elapsed_records = None
self.start_time = 0.0
self.end_time = 0.0
def start(self):
"""Start the timer."""
assert not self.started_, f"{self.name_} timer has already been started"
if self.use_host_timer:
self.start_time = time.time()
else:
event_class = get_accelerator().Event
self.start_event = event_class(enable_timing=True)
self.start_event.record()
self.started_ = True
def stop(self, reset=False, record=False):
"""Stop the timer."""
assert self.started_, "timer is not started"
event_class = get_accelerator().Event
if self.use_host_timer:
self.end_time = time.time()
self.event_timers.append(self.end_time - self.start_time)
else:
event_class = get_accelerator().Event
end_event = event_class(enable_timing=True)
end_event.record()
self.event_timers.append(CudaEventTimer(self.start_event, end_event))
self.start_event = None
self.started_ = False
def _get_elapsed_msec(self):
if self.use_host_timer:
self.elapsed_records = [et * 1000.0 for et in self.event_timers]
else:
self.elapsed_records = [et.get_elapsed_msec() for et in self.event_timers]
self.event_timers.clear()
return sum(self.elapsed_records)
def reset(self):
"""Reset timer."""
self.started_ = False
self.start_event = None
self.elapsed_records = None
self.event_timers.clear()
def elapsed(self, reset=True):
"""Calculate the elapsed time."""
started_ = self.started_
# If the timing in progress, end it first.
if self.started_:
self.stop()
# Get the elapsed time.
elapsed_ = self._get_elapsed_msec()
# Reset the elapsed time
if reset:
self.reset()
# If timing was in progress, set it back.
if started_:
self.start()
return elapsed_
def mean(self):
self.elapsed(reset=False)
return trim_mean(self.elapsed_records, 0.1)
def __init__(self):
self.timers = {}
def get_timers(self):
return self.timers
def __call__(self, name):
if name not in self.timers:
self.timers[name] = self.Timer(name)
return self.timers[name]
@staticmethod
def memory_usage():
alloc = "mem_allocated: {:.4f} GB".format(get_accelerator().memory_allocated() / (1024 * 1024 * 1024))
max_alloc = "max_mem_allocated: {:.4f} GB".format(get_accelerator().max_memory_allocated() /
(1024 * 1024 * 1024))
cache = "cache_allocated: {:.4f} GB".format(get_accelerator().memory_cached() / (1024 * 1024 * 1024))
max_cache = "max_cache_allocated: {:.4f} GB".format(get_accelerator().max_memory_cached() /
(1024 * 1024 * 1024))
return " | {} | {} | {} | {}".format(alloc, max_alloc, cache, max_cache)
def log(self, names, normalizer=1.0, reset=True, memory_breakdown=False, ranks=None):
"""Log a group of timers."""
assert normalizer > 0.0
string = f"rank={dist.get_rank()} time (ms)"
for name in names:
if name in self.timers:
elapsed_time = (self.timers[name].elapsed(reset=reset) / normalizer)
string += " | {}: {:.2f}".format(name, elapsed_time)
log_dist(string, ranks=ranks or [0])
def get_mean(self, names, normalizer=1.0, reset=True):
"""Get the mean of a group of timers."""
assert normalizer > 0.0
means = {}
for name in names:
if name in self.timers:
elapsed_time = (self.timers[name].mean() * 1000.0 / normalizer)
means[name] = elapsed_time
return means
class ThroughputTimer:
def __init__(
self,
batch_size,
start_step=2,
steps_per_output=50,
monitor_memory=False,
logging_fn=None,
):
from deepspeed.utils import logger
self.start_time = 0
self.end_time = 0
self.started = False
self.batch_size = 1 if batch_size is None else batch_size
self.start_step = start_step
self.epoch_count = 0
self.micro_step_count = 0
self.global_step_count = 0
self.total_elapsed_time = 0
self.step_elapsed_time = 0
self.steps_per_output = steps_per_output
self.monitor_memory = monitor_memory
self.logging = logging_fn
if self.logging is None:
self.logging = logger.info
self.initialized = False
if self.monitor_memory and not PSUTILS_INSTALLED:
raise ImportError("Unable to import 'psutils', please install package")
def update_epoch_count(self):
self.epoch_count += 1
self.micro_step_count = 0
def _init_timer(self):
self.initialized = True
def start(self):
self._init_timer()
self.started = True
if self.global_step_count >= self.start_step:
get_accelerator().synchronize()
self.start_time = time.time()
def stop(self, global_step=False, report_speed=True):
if not self.started:
return
self.started = False
self.micro_step_count += 1
if global_step:
self.global_step_count += 1
if self.start_time > 0:
get_accelerator().synchronize()
self.end_time = time.time()
duration = self.end_time - self.start_time
self.total_elapsed_time += duration
self.step_elapsed_time += duration
if global_step:
if report_speed and self.global_step_count % self.steps_per_output == 0:
self.logging(
"epoch={}/micro_step={}/global_step={}, RunningAvgSamplesPerSec={}, CurrSamplesPerSec={}, "
"MemAllocated={}GB, MaxMemAllocated={}GB".format(
self.epoch_count,
self.micro_step_count,
self.global_step_count,
self.avg_samples_per_sec(),
self.batch_size / self.step_elapsed_time,
round(get_accelerator().memory_allocated() / 1024**3, 2),
round(get_accelerator().max_memory_allocated() / 1024**3, 2),
))
if self.monitor_memory:
virt_mem = psutil.virtual_memory()
swap = psutil.swap_memory()
self.logging("epoch={}/micro_step={}/global_step={}, vm %: {}, swap %: {}".format(
self.epoch_count,
self.micro_step_count,
self.global_step_count,
virt_mem.percent,
swap.percent,
))
self.step_elapsed_time = 0
def avg_samples_per_sec(self):
if self.global_step_count > 0:
total_step_offset = self.global_step_count - self.start_step
avg_time_per_step = self.total_elapsed_time / total_step_offset
# training samples per second
return self.batch_size / avg_time_per_step
return float("-inf")
def trim_mean(data, trim_percent):
"""Compute the trimmed mean of a list of numbers.
Args:
data (list): List of numbers.
trim_percent (float): Percentage of data to trim.
Returns:
float: Trimmed mean.
"""
assert trim_percent >= 0.0 and trim_percent <= 1.0
n = len(data)
# Account for edge case of empty list
if len(data) == 0:
return 0
data.sort()
k = int(round(n * (trim_percent)))
return mean(data[k:n - k])
| 9,477 | 34.766038 | 115 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/comms_logging.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
from deepspeed.utils import log_dist
def get_caller_func(frame=3):
import sys
return sys._getframe(frame).f_code.co_name
def print_rank_0(message):
import deepspeed.comm as dist
if dist.get_rank() == 0:
print(message)
# Helper function to pretty-print message sizes
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
# Helper function to calculate algbw and busbw.
# See https://gist.github.com/jeffra/b5e80466b4c86be00ea3b6f130fb7a36 and https://github.com/NVIDIA/nccl-tests/blob/master/doc/PERFORMANCE.md
def calc_bw_log(comm_op, size, duration):
import deepspeed.comm as dist
n = dist.get_world_size()
tput = 0
busbw = 0
if comm_op == "all_to_all_single":
tput = (size / duration)
busbw = (size / duration) * ((n - 1) / n)
elif comm_op == "all_gather" or comm_op == "all_gather_into_tensor" or comm_op == "reduce_scatter" or comm_op == "reduce_scatter_tensor":
size *= n
tput = (size / duration)
busbw = (size / duration) * ((n - 1) / n)
elif comm_op == "all_reduce" or comm_op == "all_reduce_coalesced":
tput = (size * 2 / duration)
busbw = (size / duration) * (2 * (n - 1) / n)
elif comm_op == "send" or comm_op == "recv" or comm_op == "isend" or comm_op == "irecv" or comm_op == "broadcast" or comm_op == "reduce" or comm_op == "gather" or comm_op == "scatter" or comm_op == "barrier":
tput = (size / duration)
busbw = tput
else:
print_rank_0("wrong comm_op specified") # noqa: F821
exit(0)
# convert to Gbps
tput *= 8
busbw *= 8
tput /= 1e6
busbw /= 1e6
return tput, busbw
class CommsLogger:
def __init__(self):
from deepspeed.comm.constants import COMMS_LOGGER_VERBOSE_DEFAULT, COMMS_LOGGER_DEBUG_DEFAULT, COMMS_LOGGER_PROF_OPS_DEFAULT, COMMS_LOGGER_PROF_ALL_DEFAULT, COMMS_LOGGER_ENABLED_DEFAULT
self.comms_dict = {}
self.verbose = COMMS_LOGGER_VERBOSE_DEFAULT
self.debug = COMMS_LOGGER_DEBUG_DEFAULT
self.prof_ops = COMMS_LOGGER_PROF_OPS_DEFAULT
self.prof_all = COMMS_LOGGER_PROF_ALL_DEFAULT
self.enabled = COMMS_LOGGER_ENABLED_DEFAULT
def configure(self, comms_config):
self.enabled = comms_config.comms_logger_enabled
if self.enabled:
self.verbose = comms_config.comms_logger.verbose
self.debug = comms_config.comms_logger.debug
self.prof_ops = comms_config.comms_logger.prof_ops
self.prof_all = comms_config.comms_logger.prof_all
# There are three settings for the op profiler:
# - Global profiling (profile all comms)
# - Op-type profiling (e.g. profile all all_reduce comms)
# - Op profiling (e.g. profile a specific all_reduce op)
def start_profiling_comms(self):
self.prof_all = True
def stop_profiling_comms(self):
self.prof_all = True
# E.g. start_profiling_op('all_reduce')
def start_profiling_op(self, op_name_list):
self.prof_ops = list(set(self.prof_ops) | set(op_name_list))
def stop_profiling_op(self, op_name_list):
self.prof_ops = [op for op in self.prof_ops if op not in op_name_list]
# Add log entry
def append(self, raw_name, record_name, latency, msg_size):
import deepspeed.comm as dist
algbw, busbw = calc_bw_log(raw_name, msg_size, latency)
if record_name in self.comms_dict.keys():
# If this comm_op has already been logged with this message size, just add to existing record
if msg_size in self.comms_dict[record_name].keys():
self.comms_dict[record_name][msg_size][0] += 1
self.comms_dict[record_name][msg_size][1].append(latency)
self.comms_dict[record_name][msg_size][2].append(algbw)
self.comms_dict[record_name][msg_size][3].append(busbw)
# If this is a new message size for this comm_op, add new record under existing comm_op
else:
self.comms_dict[record_name][msg_size] = [1, [latency], [algbw], [busbw]]
else:
# Create entirely new record
self.comms_dict[record_name] = {msg_size: [1, [latency], [algbw], [busbw]]}
# If verbose, print every comm op
# TODO: Add to tensorboard
if self.verbose:
n = dist.get_world_size()
log_str = f"rank={dist.get_rank()} | comm op: " + record_name + " | time (ms): {:.2f}".format(latency)
log_str += " | msg size: " + convert_size(msg_size)
log_str += " | algbw (Gbps): {:.2f} ".format(algbw)
log_str += " | busbw (Gbps): {:.2f} ".format(busbw)
log_dist(log_str, [0])
# Print summary at end of iteration, epoch, or training
def log_all(self, print_log=True, show_straggler=False):
import torch
from deepspeed.utils.timer import trim_mean
import deepspeed.comm as dist
from deepspeed.comm.reduce_op import ReduceOp
if print_log:
print(
f"{'Comm. Op': <20}{'Message Size': <20}{'Count': <20}{'Total Latency(ms)': <20}{'Avg Latency(ms)': <20}{'tput_avg (Gbps)': <20}{'busbw_avg (Gbps)': <20}"
)
for record_name in self.comms_dict.keys():
if print_log:
print(record_name)
for msg_size, vals in sorted(self.comms_dict[record_name].items()):
# vals[0] is the count for each msg size
count = vals[0]
# vals[1] is a list of latency records for each msg size
total_lat = sum(vals[1])
# vals[2] and vals[3] are the lists of algbw and busbw, respectively
# Get rid of outliers when we print
avg_lat = trim_mean(vals[1], 0.1)
avg_algbw = trim_mean(vals[2], 0.1)
avg_busbw = trim_mean(vals[3], 0.1)
if print_log:
print(
f"{' ': <20}{convert_size(msg_size): <20}{count: <20}{total_lat: <20.2f}{avg_lat: <20.2f}{avg_algbw: <20.2f}{avg_busbw: <20.2f}"
)
if show_straggler:
if print_log:
print("_______________________________")
print("Breakdown with straggler effect")
print("-------------------------------")
print(
f"{'Comm. Op': <20}{'Message Size': <20}{'Count': <20}{'Total comm lat(ms)': <20}{'Total straggler(ms)': <20}{'Avg comm lat(ms)': <20}{'Avg straggler(ms)': <20}"
)
for record_name in self.comms_dict.keys():
if print_log:
print(record_name)
for msg_size, vals in sorted(self.comms_dict[record_name].items()):
# vals[0] is the count for each msg size
count = vals[0]
# vals[1] is a list of latency records for each msg size
lats = torch.tensor(vals[1])
min_lats = torch.tensor(vals[1])
dist.all_reduce(min_lats, op=ReduceOp.MIN)
total_lat = min_lats.sum().item()
total_straggler = (lats - min_lats).sum().item()
avg_lat = trim_mean(min_lats.tolist(), 0.1)
avg_straggler = trim_mean((lats - min_lats).tolist(), 0.1)
if print_log:
print(
f"{' ': <20}{convert_size(msg_size): <20}{count: <20}{total_lat: <20.2f}{total_straggler: <20.2f}{avg_lat: <20.2f}{avg_straggler: <20.2f}"
)
| 8,024 | 42.61413 | 212 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/exceptions.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
class DeprecatedException(Exception):
pass
| 144 | 15.111111 | 38 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/nvtx.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.accelerator import get_accelerator
def instrument_w_nvtx(func):
"""decorator that causes an NVTX range to be recorded for the duration of the
function call."""
def wrapped_fn(*args, **kwargs):
get_accelerator().range_push(func.__qualname__)
ret_val = func(*args, **kwargs)
get_accelerator().range_pop()
return ret_val
return wrapped_fn
| 499 | 24 | 81 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/logging.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import functools
import logging
import sys
import os
log_levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
class LoggerFactory:
@staticmethod
def create_logger(name=None, level=logging.INFO):
"""create a logger
Args:
name (str): name of the logger
level: level of logger
Raises:
ValueError is name is None
"""
if name is None:
raise ValueError("name for logger cannot be None")
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] "
"[%(filename)s:%(lineno)d:%(funcName)s] %(message)s")
logger_ = logging.getLogger(name)
logger_.setLevel(level)
logger_.propagate = False
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(level)
ch.setFormatter(formatter)
logger_.addHandler(ch)
return logger_
logger = LoggerFactory.create_logger(name="DeepSpeed", level=logging.INFO)
@functools.lru_cache(None)
def warning_once(*args, **kwargs):
"""
This method is identical to `logger.warning()`, but will emit the warning with the same message only once
Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache.
The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to
another type of cache that includes the caller frame information in the hashing function.
"""
logger.warning(*args, **kwargs)
logger.warning_once = warning_once
def print_configuration(args, name):
logger.info("{}:".format(name))
for arg in sorted(vars(args)):
dots = "." * (29 - len(arg))
logger.info(" {} {} {}".format(arg, dots, getattr(args, arg)))
def log_dist(message, ranks=None, level=logging.INFO):
from deepspeed import comm as dist
"""Log message when one of following condition meets
+ not dist.is_initialized()
+ dist.get_rank() in ranks if ranks is not None or ranks = [-1]
Args:
message (str)
ranks (list)
level (int)
"""
should_log = not dist.is_initialized()
ranks = ranks or []
my_rank = dist.get_rank() if dist.is_initialized() else -1
if ranks and not should_log:
should_log = ranks[0] == -1
should_log = should_log or (my_rank in set(ranks))
if should_log:
final_message = "[Rank {}] {}".format(my_rank, message)
logger.log(level, final_message)
def print_json_dist(message, ranks=None, path=None):
from deepspeed import comm as dist
"""Print message when one of following condition meets
+ not dist.is_initialized()
+ dist.get_rank() in ranks if ranks is not None or ranks = [-1]
Args:
message (str)
ranks (list)
path (str)
"""
should_log = not dist.is_initialized()
ranks = ranks or []
my_rank = dist.get_rank() if dist.is_initialized() else -1
if ranks and not should_log:
should_log = ranks[0] == -1
should_log = should_log or (my_rank in set(ranks))
if should_log:
message['rank'] = my_rank
import json
with open(path, 'w') as outfile:
json.dump(message, outfile)
os.fsync(outfile)
def get_current_level():
"""
Return logger's current log level
"""
return logger.getEffectiveLevel()
def should_log_le(max_log_level_str):
"""
Args:
max_log_level_str: maximum log level as a string
Returns ``True`` if the current log_level is less or equal to the specified log level. Otherwise ``False``.
Example:
``should_log_le("info")`` will return ``True`` if the current log level is either ``logging.INFO`` or ``logging.DEBUG``
"""
if not isinstance(max_log_level_str, str):
raise ValueError(f"{max_log_level_str} is not a string")
max_log_level_str = max_log_level_str.lower()
if max_log_level_str not in log_levels:
raise ValueError(f"{max_log_level_str} is not one of the `logging` levels")
return get_current_level() <= log_levels[max_log_level_str]
| 4,375 | 27.601307 | 127 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/mixed_precision_linkage.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import types
from deepspeed.utils import get_full_hp_param, get_full_hp_grad, get_hp_fragment_mapping
def link_hp_params(lp_param_list, flat_hp_partition, gradient_dict, offload_gradient_dict, use_offload,
param_group_index, partition_start, partition_size, partition_optimizer_state, dp_group):
local_lp_param_and_offset = _init_lp_to_hp_mapping(lp_param_list, partition_start, partition_size, dp_group)
for lp_param, lp_start in local_lp_param_and_offset:
lp_param._hp_mapping = get_hp_fragment_mapping(lp_param, lp_start, flat_hp_partition, gradient_dict,
offload_gradient_dict, use_offload, param_group_index,
partition_start, partition_size, partition_optimizer_state)
def _init_lp_to_hp_mapping(lp_param_list, partition_start, partition_size, dp_group):
current_offset = 0
param_and_offset_list = []
partition_end = partition_start + partition_size
index_in_param_group = 0
for i, lp_param in enumerate(lp_param_list):
lp_param._hp_mapping = None
lp_param._dp_group = dp_group
lp_param.get_full_hp_param = types.MethodType(get_full_hp_param, lp_param)
lp_param.get_full_hp_grad = types.MethodType(get_full_hp_grad, lp_param)
# lp_param overlaps with partition if both are true
# 1) current_offset < partition_end,
# 2) current_offset + lp_param.numel() >= partition_start
lp_param_end = current_offset + lp_param.numel()
if current_offset < partition_end and lp_param_end > partition_start:
param_and_offset_list.append((lp_param, current_offset))
lp_param._index_in_param_group = index_in_param_group
# Indices for params in this partition/GPU
index_in_param_group += 1
current_offset += lp_param.numel()
return param_and_offset_list
| 2,041 | 46.488372 | 114 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/groups.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# The file has been adapted from https://github.com/NVIDIA/Megatron-LM and retains the following license from the original file
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Support different forms of parallelism in DeepSpeed using multiple process groups.
Given that there are multiple scenarios and use-cases, this file is going to be updated
frequently. For now, the group creation needed for the training scenario is being implemented.
For inference and other new scenarios, the code will be either reused or added to this file.
"""
from deepspeed import comm as dist
from deepspeed.utils import log_dist
from deepspeed.utils.exceptions import DeprecatedException
from deepspeed.accelerator import get_accelerator
# Expert parallel group that the current rank belongs to.
_EXPERT_PARALLEL_GROUP = {}
# Expert data parallel group that the current rank belongs to.
_EXPERT_DATA_PARALLEL_GROUP = {}
# dist world group needs to be cloned for some cases
_WORLD_GROUP = None
# ZeRO parameter partitioning group that the current rank belongs to.
_ZERO_PARAM_INTRA_PARALLEL_GROUP = None
# global object to maintain mpu object if passed by a Megatron client
mpu = None
# global object that stores tensor parallel world size for experts
expert_tensor_parallel_world_size = 1
# All to All quantized graident communication groups
_ALL_TO_ALL_GROUP = {}
_DATA_PARALLEL_GROUP = None
# Deprecated groups initialize function.
def initialize(ep_size=1, mpu=None):
""" Deprecated function. Retained to inform the users."""
raise DeprecatedException(
"Please do not use the groups.initialize() API as it is deprecated. Instead, pass the desired ep_size to deepspeed.moe.layer.MoE(..,ep_size,..)"
)
def _ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, '{} is not divisible by {}'.format(numerator, denominator)
# Not currently used. Helper function to create a model (tensor) parallel group.
def _create_model_parallel(model_parallel_size_):
"""
Initialize model data parallel groups.
Arguments:
model_parallel_size: number of GPUs used to parallelize model.
Returns:
Tuple of data parallel group and model parallel group
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model. The present function will
create 4 model parallel groups and 2 data parallel groups as:
4 model parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 data parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
log_dist(f'Creating model parallel group with size {model_parallel_size_}', ranks=[0])
# Get world size and rank. Ensure some consistencies.
assert dist.is_initialized()
world_size = dist.get_world_size()
model_parallel_size = min(model_parallel_size_, world_size)
_ensure_divisibility(world_size, model_parallel_size)
rank = dist.get_rank()
_DATA_PARALLEL_GROUP = None
_MODEL_PARALLEL_GROUP = None
# Build the data parallel groups.
for i in range(model_parallel_size):
ranks = range(i, world_size, model_parallel_size)
group = dist.new_group(ranks)
if i == (rank % model_parallel_size):
_DATA_PARALLEL_GROUP = group
# Build the model parallel groups.
for i in range(world_size // model_parallel_size):
ranks = range(i * model_parallel_size, (i + 1) * model_parallel_size)
group = dist.new_group(ranks)
if i == (rank // model_parallel_size):
_MODEL_PARALLEL_GROUP = group
return _DATA_PARALLEL_GROUP, _MODEL_PARALLEL_GROUP
def _create_expert_and_data_parallel(expert_parallel_size_):
"""
Create expert and data parallel groups.
Note: Caller of this function is responsible to check if the groups already exist.
Example - E + D parallel
world_size = 16
expert_parallel_size = 2 # number of experts in same group
expert_data_parallel_group = [0,2,4,6,8,10,12,14], [1,3,5,7,9,11,13,15] - all reduce is only on MoE params
expert_parallel_group = [0, 1], [2,3], [4,5], [6,7], [8,9] - no all reduce, but all to all
data_parallel_group = [0,1,...,15] - all reduce is only on non-MoE
"""
assert dist.is_initialized()
log_dist(f'Creating expert and data parallel groups with size {expert_parallel_size_}', ranks=[0])
world_size = dist.get_world_size()
rank = dist.get_rank()
_ensure_divisibility(world_size, expert_parallel_size_)
group_name = f"ep_size_{expert_parallel_size_}"
# Build the expert data parallel groups.
global _EXPERT_DATA_PARALLEL_GROUP
# Only create group if it does not already exist
if group_name not in _EXPERT_DATA_PARALLEL_GROUP:
for i in range(expert_parallel_size_):
ranks = range(i, world_size, expert_parallel_size_)
group = dist.new_group(ranks)
log_dist(f'Creating expert data parallel process group named {group_name} with ranks: {list(ranks)}', [0])
if i == (rank % expert_parallel_size_):
_EXPERT_DATA_PARALLEL_GROUP[group_name] = group
# Build the expert parallel groups.
global _EXPERT_PARALLEL_GROUP
# Only create group if it does not already exist
if group_name not in _EXPERT_PARALLEL_GROUP:
for i in range(world_size // expert_parallel_size_):
ranks = range(i * expert_parallel_size_, (i + 1) * expert_parallel_size_)
group = dist.new_group(ranks)
log_dist(f'creating expert parallel process group named {group_name} with ranks: {list(ranks)}', [0])
if i == (rank // expert_parallel_size_):
_EXPERT_PARALLEL_GROUP[group_name] = group
def _get_expert_parallel_ranks(world_size, model_parallel_size_, expert_parallel_size_):
"""Generate expert parallel and expert data parallel group ranks list.
Example - E + M + D parallel
world_size = 16
model_degree = 2
expert_degree = 4 # number of experts in same group
mp_group = [0, 1], [2,3], [4,5] ...
data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15]
expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15]
expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15]
Args:
world_size (int): Distributed world size.
model_parallel_size_ (int): Model parallel group size.
expert_parallel_size_ (int): Expert parallel group size.
Returns:
Expert parallel group ranks and Expert data parallel group ranks list.
"""
_ensure_divisibility(world_size, model_parallel_size_)
dp_world_size = world_size // model_parallel_size_
_ensure_divisibility(dp_world_size, expert_parallel_size_)
# Generate data parallel groups
data_parallel_groups = []
dp_group_size = model_parallel_size_
for i in range(dp_group_size):
data_parallel_groups.append(list(range(i, world_size, dp_group_size)))
expert_parallel_groups = []
expert_data_parallel_groups = []
for dp_ranks in data_parallel_groups:
# partition of expert parallel groups, e.g. [0,2,4,6], [8,10,12,14]
part_ep_groups = []
for i in range(0, dp_world_size, expert_parallel_size_):
part_ep_groups.append(dp_ranks[i:i + expert_parallel_size_])
expert_parallel_groups.extend(part_ep_groups)
# zip part_ep_groups get expert data parallel ranks, e.g [0,8],[2,10],[4,12],[6,14]
for expert_dp_ranks in zip(*part_ep_groups):
expert_data_parallel_groups.append(list(expert_dp_ranks))
return expert_parallel_groups, expert_data_parallel_groups
def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu):
"""
Create expert and data parallel groups based on MPU (model parallel) group.
Note: Caller of this function is responsible to check if the groups already exist.
Example - E + M + D parallel
world_size = 16
model_degree = 2
expert_degree = 4 # number of experts in same group
mp_group = [0, 1], [2,3], [4,5] ...
data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15]
expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15]
expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15]
"""
assert dist.is_initialized(), "dist is not initialized"
model_parallel_size_ = mpu.get_model_parallel_world_size()
global expert_tensor_parallel_world_size
expert_tensor_parallel_world_size = model_parallel_size_
world_size = dist.get_world_size()
rank = dist.get_rank()
dp_world_size = mpu.get_data_parallel_world_size()
dp_rank = mpu.get_data_parallel_rank()
_ensure_divisibility(world_size, model_parallel_size_)
_ensure_divisibility(dp_world_size, expert_parallel_size_)
log_dist(
f"Creating deepspeed groups with model parallel size {model_parallel_size_}, expert parallel size {expert_parallel_size_}, world size {world_size}, dp world size {dp_world_size}",
[0])
global _EXPERT_PARALLEL_GROUP, _EXPERT_DATA_PARALLEL_GROUP
# Get world size and rank. Ensure some consistencies.
_DATA_PARALLEL_GROUP = mpu.get_data_parallel_group()
_MODEL_PARALLEL_GROUP = mpu.get_model_parallel_group()
group_name = f"ep_size_{expert_parallel_size_}"
# Only create groups if they don't already exist
# Need to check conditions outside the group creation loop because of the way torch.dist group creation works
if group_name not in _EXPERT_DATA_PARALLEL_GROUP and group_name not in _EXPERT_PARALLEL_GROUP:
expert_parallel_groups, expert_data_parallel_groups = _get_expert_parallel_ranks(
world_size, model_parallel_size_, expert_parallel_size_)
for ranks in expert_parallel_groups:
group = dist.new_group(ranks)
if rank in list(ranks):
_EXPERT_PARALLEL_GROUP[group_name] = group
for ranks in expert_data_parallel_groups:
group = dist.new_group(ranks)
if rank in list(ranks):
_EXPERT_DATA_PARALLEL_GROUP[group_name] = group
def _get_max_expert_size():
"""Get the maximum ep_size from all the created groups."""
assert _EXPERT_PARALLEL_GROUP is not None, "Warning! Process group not initialized"
keylist = []
for key in _EXPERT_PARALLEL_GROUP.keys():
# index 2 is ep_size in the group name: ep_size_<ep_size>
index = 2
keylist.append(int(key.split('_')[index]))
return max(keylist) if len(keylist) > 0 else None
def _get_max_expert_size_name():
"""Get the name of the group with max. ep_size"""
return f'ep_size_{_get_max_expert_size()}'
def _get_max_expert_parallel_group():
"""Get the max expert parallel size."""
return _get_expert_parallel_group(_get_max_expert_size_name())
def _get_expert_parallel_group(group_name):
"""Get the expert parallel group the caller rank belongs to."""
assert group_name in _EXPERT_PARALLEL_GROUP, \
'expert parallel group is not initialized'
return _EXPERT_PARALLEL_GROUP[group_name]
def _get_expert_parallel_group_dict():
"""Get the expert parallel group dict."""
return _EXPERT_PARALLEL_GROUP
def _get_expert_data_parallel_group(group_name):
"""Get the expert data parallel group the caller rank belongs to."""
assert group_name in _EXPERT_DATA_PARALLEL_GROUP, \
'expert data parallel group is not initialized'
return _EXPERT_DATA_PARALLEL_GROUP[group_name]
def _get_expert_data_parallel_group_dict():
"""Get the expert data parallel group dict."""
return _EXPERT_DATA_PARALLEL_GROUP
def _clone_world_group():
"""Create a clone of the world group
Note: We need to clone the dist world group because we
use dist.get_global_rank() utility function in DeepSpeed at many places.
As that function does not work on dist.group.WORLD, we
need to keep a clone of it.
"""
assert dist.is_initialized(), "dist is not initialized"
global _WORLD_GROUP
if _WORLD_GROUP is None:
# If not cloned already, clone the world group
_WORLD_GROUP = dist.new_group(ranks=range(dist.get_world_size()))
return _WORLD_GROUP
def _get_local_all_to_all_group():
assert dist.is_initialized(), 'dist is not initialized'
global _ALL_TO_ALL_GROUP
device_per_node = get_accelerator().device_count()
num_local = dist.get_world_size() // device_per_node
if num_local == 0 and dist.get_world_size() > 0:
assert dist.get_world_size() >= 1, 'num_gpus must >=1, cannot initialize All-To-All'
cur_rank = []
for i in range(dist.get_world_size()):
cur_rank.append(i)
_ALL_TO_ALL_GROUP['local_0'] = dist.new_group(ranks=cur_rank)
elif num_local == 1:
assert dist.get_world_size(
) == device_per_node, 'num_gpus not equal to device per node, cannot initialize All-To-All'
_ALL_TO_ALL_GROUP['local_0'] = dist.new_group(ranks=[i for i in range(device_per_node)])
else:
assert dist.get_world_size() > device_per_node, 'num_nodes<2 cannot initialize All-To-All'
for i in range(num_local):
local_rank = [j + device_per_node * i for j in range(device_per_node)]
_ALL_TO_ALL_GROUP[f"local_{i}"] = dist.new_group(ranks=local_rank)
for i in range(device_per_node):
cur_rank = []
for j in range(num_local):
cur_rank.append(i + j * device_per_node)
_ALL_TO_ALL_GROUP[f"global_{i}"] = dist.new_group(ranks=cur_rank)
return _ALL_TO_ALL_GROUP
def _get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
assert dist.is_initialized(), 'dist is not initialized'
global mpu
if mpu is not None:
return mpu.get_data_parallel_group()
# Return the clone of dist world group
return _clone_world_group()
def _get_broadcast_src_rank():
return dist.get_global_rank(_get_data_parallel_group(), 0)
def _get_expert_broadcast_src_rank(group_name):
return dist.get_global_rank(_get_expert_data_parallel_group(group_name), 0)
def _get_expert_parallel_world_size(group_name):
"""Return world size for the expert parallel group."""
return dist.get_world_size(group=_get_expert_parallel_group(group_name))
def _get_expert_data_parallel_world_size(group_name):
"""Return world size for the expert data parallel group."""
return dist.get_world_size(group=_get_expert_data_parallel_group(group_name))
def _get_expert_parallel_rank(group_name):
"""Return my rank for the expert parallel group."""
return dist.get_rank(group=_get_expert_parallel_group(group_name))
def _get_expert_parallel_src_rank(group_name):
"""Calculate the global rank corresponding to a local rank zero
in the expert parallel group."""
global_rank = dist.get_rank()
local_world_size = _get_expert_parallel_world_size(group_name)
return (global_rank // local_world_size) * local_world_size
def _get_expert_data_parallel_rank(group_name):
"""Return my rank for the expert data parallel group."""
return dist.get_rank(group=_get_expert_data_parallel_group(group_name))
def _get_data_parallel_world_size():
"""Return world size for the data parallel group."""
global mpu
if mpu is not None:
return mpu.get_data_parallel_world_size()
return dist.get_world_size(group=_get_data_parallel_group())
def _get_model_parallel_world_size():
"""Return world size for the model parallel group."""
global mpu
if mpu is not None:
return mpu.get_model_parallel_world_size()
return 1
def _get_data_parallel_rank():
"""Return my rank for the data parallel group."""
global mpu
if mpu is not None:
return mpu.get_data_parallel_rank()
return dist.get_rank(group=_get_data_parallel_group())
def _get_expert_model_parallel_world_size():
global expert_tensor_parallel_world_size
return expert_tensor_parallel_world_size
def _create_zero_param_parallel_group(group_size):
"""
Create parameter partitioning group within ZeRO data parallel groups.
Example - ZP + D parallel
world_size = 16
zero_hpz_partition_size = 2 # number of ranks with with replicated params (dual partitioning)
zero_param_intra_parallel_group = [0, 1], [2,3], [4,5], [6,7], [8,9] - segmented (subgroup) with rep partition
data_parallel_group = [0,1,...,15] - all reduce is on ZeRO model
"""
assert dist.is_initialized()
global _ZERO_PARAM_INTRA_PARALLEL_GROUP
# Only create group if it does not already exist
assert _ZERO_PARAM_INTRA_PARALLEL_GROUP is None, \
'ZeRO parameter intra parallel group is already initialized'
world_size = dist.get_world_size()
rank = dist.get_rank()
zero_param_parallel_size_ = min(group_size, world_size)
_ensure_divisibility(world_size, zero_param_parallel_size_)
# Build the ZeRO param intra parallel groups.
for i in range(world_size // zero_param_parallel_size_):
ranks = range(i * zero_param_parallel_size_, (i + 1) * zero_param_parallel_size_)
group = dist.new_group(ranks)
if i == (rank // zero_param_parallel_size_):
_ZERO_PARAM_INTRA_PARALLEL_GROUP = group
def _get_zero_param_intra_parallel_group():
"""Get the ZeRO parameter partitioning intra parallel group the caller rank belongs to."""
#assert _ZERO_PARAM_INTRA_PARALLEL_GROUP is not None, \
# 'ZeRO parameter partitioning group is not initialized'
#TODO: Add warning
return _ZERO_PARAM_INTRA_PARALLEL_GROUP
def _zero_param_parallel_is_initialized():
"""Check if ZeRO data parallel with parameter partititioning groups are initialized."""
###TODO: assert that MPU is not set
if _ZERO_PARAM_INTRA_PARALLEL_GROUP is None and _DATA_PARALLEL_GROUP is None:
return False
def _get_zero_param_intra_parallel_rank_in_mygroup():
"""Return my rank for the ZeRO parameter inter parallel group."""
return dist.get_rank(group=_get_zero_param_intra_parallel_group())
def _get_zero_param_intra_parallel_group_world_size():
"""Return world size for the ZeRO parameter parallel group."""
return dist.get_world_size(group=_get_zero_param_intra_parallel_group())
def _get_zero_param_intra_parallel_group_ranks():
"""Return all ranks for the ZeRO parameter intra parallel group."""
return dist.get_all_ranks_from_group(group=_get_zero_param_intra_parallel_group())
| 19,762 | 39.664609 | 187 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/types.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from enum import IntEnum
class ActivationFuncType(IntEnum):
UNKNOWN = 0
GELU = 1
ReLU = 2
GATED_GELU = 3
GATED_SILU = 4
GATED_ACTIVATION_TYPES = [
ActivationFuncType.GATED_GELU,
ActivationFuncType.GATED_SILU,
]
class NormType(IntEnum):
UNKNOWN = 0
LayerNorm = 1
GroupNorm = 2
RMSNorm = 3
| 434 | 14.535714 | 38 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/init_on_device.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from typing import Callable
from torch import Tensor
from packaging import version as pkg_version
class OnDevice(object):
"""
Create modules/tensors w. specific devices and dtypes. Examples:
Create MyModule which consists of many different sub-modules and parameters. In this case we can create
MyModule as a collection of 'meta' tensors by passing `device='meta'` or we can create the module _directly_
on a CUDA device by passing `device=f'cuda:{local_rank}'` (where `local_rank` is the local GPU id.
with OnDevice(dtype=torch.float16, device='meta'):
model = MyModel()
with OnDevice(dtype=torch.float16, device=f'cuda:{local_rank}'):
model = MyModel()
"""
_orig_torch_empty = torch.empty
_orig_torch_zeros = torch.zeros
_orig_torch_ones = torch.ones
_orig_torch_full = torch.full
def __init__(self, dtype, device="meta", enabled=True):
self.dtype = dtype
self.enabled = enabled
self.device = device
if device == "meta":
if pkg_version.parse('1.10') > pkg_version.parse(torch.__version__):
raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+")
def fp_tensor_constructor(self, fn: Callable, target_fp_dtype: torch.dtype) -> Callable:
def wrapped_fn(*args, **kwargs) -> Tensor:
if kwargs.get("device", None) is None:
kwargs['device'] = self.device
tensor: Tensor = fn(*args, **kwargs)
if tensor.is_floating_point():
tensor = tensor.to(target_fp_dtype)
return tensor
return wrapped_fn
def get_new_tensor_fn_for_dtype(self, dtype: torch.dtype) -> Callable:
def new_tensor(cls, *args) -> Tensor:
tensor = OnDevice._orig_torch_empty(0, device=self.device).new_empty(*args)
if tensor.is_floating_point():
tensor = tensor.to(dtype)
return tensor
return new_tensor
def __enter__(self):
if not self.enabled:
return
torch.Tensor.__old_new__ = torch.Tensor.__new__
torch.Tensor.__new__ = self.get_new_tensor_fn_for_dtype(self.dtype)
torch.empty = self.fp_tensor_constructor(self._orig_torch_empty, self.dtype)
torch.zeros = self.fp_tensor_constructor(self._orig_torch_zeros, self.dtype)
torch.ones = self.fp_tensor_constructor(self._orig_torch_ones, self.dtype)
torch.full = self.fp_tensor_constructor(self._orig_torch_full, self.dtype)
def __exit__(self, exc_type, exc_value, traceback):
if not self.enabled:
return
torch.Tensor.__new__ = torch.Tensor.__old_new__
torch.empty = self._orig_torch_empty
torch.zeros = self._orig_torch_zeros
torch.ones = self._orig_torch_ones
torch.full = self._orig_torch_full
| 3,004 | 35.646341 | 112 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/numa.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# return a list of list for cores to numa mapping
# [
# [ cores for numa 0 ]
# [ cores belong to numa 1 ]
# ...
# ]
import distutils
import os
import psutil
import subprocess
# return a list of list for cores to numa mapping
# [
# [ cores for numa 0 ]
# [ cores belong to numa 1 ]
# ...
# ]
def get_numa_cores():
ret = []
output = subprocess.check_output(['numactl', '--hardware']).decode("utf-8")
lines = output.split('\n')
for line in lines:
if line.startswith('available:'):
num_numas = int(line.split(' ')[1])
break
for numa in range(num_numas):
for line in lines:
if line.startswith(f'node {numa} cpus:'):
cores = line.split(' ')[3:]
ret.append([int(core) for core in cores])
return ret
def check_for_numactl_pkg():
libs = dict(
dpkg=["-l", "numactl", "apt"],
pacman=["-Q", "numactl", "pacman"],
rpm=["-q", "numactl", "yum"],
)
found = False
for pkgmgr, data in libs.items():
flag, lib, tool = data
path = distutils.spawn.find_executable(pkgmgr)
if path is not None:
cmd = f"{pkgmgr} {flag} {lib}"
result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
if result.wait() == 0:
found = True
else:
print(f"please install the {lib} package with {tool}")
break
return found
def parse_range(rng):
try:
value = int(rng)
return range(value, value + 1)
except ValueError:
# value is not a single number
parts = rng.split('-')
if len(parts) != 2:
raise ValueError("Bad range: '%s', range must be either a number or two number separated by dash" %
(rng, ))
start = int(parts[0])
end = int(parts[1])
if start > end:
raise ValueError("Bad range: '%s', range end must larger than or equal to start" % (rng, ))
return range(start, end + 1)
# parse comma and dash separated range list into list
# i.e. "0,2-4,6" --> [0, 2, 3, 4, 6]
# rules:
# 1. Range list number be comma separated, each item are either a single number,
# or a range marked by two numbers (both number are included in the range)
# 2. Sub ranges must be in ascend order and not overlap with each other
# 3. No space in the range expression
def parse_range_list(range_str):
number_list = []
last = -1
range_list = range_str.split(',')
for sub_range in range_list:
sub_number_list = parse_range(sub_range)
if sub_number_list[0] <= last:
raise ValueError(
"Bad range: '%s', sub ranges must not overlap with each other and should be in ascend order" %
(range_str, ))
last = sub_number_list[-1]
number_list.extend(sub_number_list)
return number_list
def get_numactl_cmd(bind_core_list, num_local_procs, local_rank):
numactl_cmd = []
check_for_numactl_pkg()
if 'KMP_AFFINITY' in os.environ.keys():
raise ValueError("Environment variable KMP_AFFINITY conflicts with numactl "
"because it interfere with how many CPU cores numactl can set. "
"Unset KMP_AFFINITY before launching deepspeed.\n\n"
"\t$ unset KMP_AFFINITY\n"
"\t$ deepspeed <deepspeed command parameters>")
if bind_core_list is not None:
core_list = parse_range_list(bind_core_list)
total_cores = len(core_list)
else:
total_cores = psutil.cpu_count(logical=False)
core_list = range(total_cores)
cores_per_rank = total_cores // num_local_procs
assert cores_per_rank >= 1, "At least one core needs to be assigned to each rank"
core_list_for_rank = core_list[cores_per_rank * local_rank:cores_per_rank * (local_rank + 1)]
numactl_cmd.append("numactl")
# check if all cores belong to same numa, if true, bind process to that numa domain with -m parameter
numa_cores = get_numa_cores()
num_numas = len(numa_cores)
for i in range(num_numas):
if set(core_list_for_rank) <= set(numa_cores[i]):
numactl_cmd.append("-m")
numactl_cmd.append(f"{i}")
break
numactl_cmd.append("-C")
last_core = core_list_for_rank[0]
first_core = last_core
core_list_str = f"{last_core}"
for core_id in core_list_for_rank[1:]:
if core_id == last_core + 1:
last_core = core_id
continue
else:
if first_core == last_core:
core_list_str = f"{core_list_str},{core_id}"
else:
core_list_str = f"{core_list_str}-{last_core},{core_id}"
first_core = core_id
last_core = core_id
if first_core != last_core:
core_list_str = f"{core_list_str}-{last_core}"
numactl_cmd.append(f"{core_list_str}")
return cores_per_rank, numactl_cmd
| 5,162 | 33.651007 | 111 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/tensor_fragment.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from dataclasses import dataclass
from deepspeed import comm as dist
from typing import Dict
@dataclass
class fragment_address:
numel: int
start: int
@dataclass
class tensor_fragment:
lp_fragment: torch.Tensor
lp_fragment_address: fragment_address
hp_fragment: torch.Tensor
hp_fragment_address: fragment_address
optim_fragment: Dict
gradient_dict: Dict
offload_gradient_dict: Dict
use_offload: bool
param_group_index: int
def update_hp(self):
self.hp_fragment.data.copy_(self.lp_fragment.data)
def update_lp(self):
self.lp_fragment.data.copy_(self.hp_fragment.data)
def get_optim_state_fragment(self, key):
if key in self.optim_fragment:
return self.optim_fragment[key]
else:
raise ValueError(f'{key} not found in optimizer state fragment')
def get_hp_fragment_address(self):
return self.hp_fragment_address
def get_optim_state_keys(self):
return list(self.optim_fragment.keys())
def get_full_hp_param(self, optim_state_key=None):
reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten()
if self._hp_mapping is not None:
lp_frag_address = self._hp_mapping.lp_fragment_address
reduce_fragment = torch.narrow(reduce_buffer, 0, lp_frag_address.start, lp_frag_address.numel)
if optim_state_key is None:
hp_fragment = self._hp_mapping.hp_fragment
else:
hp_fragment = self._hp_mapping.get_optim_state_fragment(optim_state_key)
reduce_fragment.data.copy_(hp_fragment.data)
dist.all_reduce(reduce_buffer, group=self._dp_group)
return reduce_buffer.reshape_as(self)
def get_full_hp_grad(self):
reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten()
if self._hp_mapping is not None:
hp_mapping = self._hp_mapping
if hp_mapping.use_offload:
gradient_dict = hp_mapping.offload_gradient_dict
else:
gradient_dict = hp_mapping.gradient_dict
if hp_mapping.param_group_index not in gradient_dict or gradient_dict[hp_mapping.param_group_index] is None:
raise ValueError("Gradients are only available immediately after backward and before engine step")
lp_grad_fragment = gradient_dict[hp_mapping.param_group_index][self._index_in_param_group]
hp_grad_fragment = lp_grad_fragment.to(torch.float32).flatten()
lp_frag_address = self._hp_mapping.lp_fragment_address
reduce_fragment = torch.narrow(reduce_buffer, 0, lp_frag_address.start, lp_frag_address.numel)
if self.view(-1).shape == hp_grad_fragment.shape:
reduce_buffer.data.copy_(hp_grad_fragment.data)
else:
reduce_fragment.data.copy_(hp_grad_fragment.data)
dist.all_reduce(reduce_buffer, group=self._dp_group)
return reduce_buffer.reshape_as(self)
def safe_get_full_fp32_param(param):
"""Assemble and return the fp32 parameter of a low-precision (e.g., fp16) parameter.
Args:
param (``torch.nn.Parameter``): A model parameter
"""
# ZeRO stage 3 param
if hasattr(param, 'ds_id'):
return param._z3_optimizer.get_full_hp_param(param)
# ZeRO stage 1, 2, and bf16_optimizer params
if hasattr(param, '_hp_mapping'):
return param.get_full_hp_param()
return None
def safe_get_full_optimizer_state(param, optim_state_key):
"""Assemble and return the fp32 optimizer state of a low-precision (e.g., fp16) parameter.
Args:
param (``torch.nn.Parameter``): A model parameter
"""
# ZeRO stage 3 param
if hasattr(param, 'ds_id'):
return param._z3_optimizer.get_full_hp_param(param, optim_state_key)
# ZeRO stage 1, 2, and bf16_optimizer params
if hasattr(param, '_hp_mapping'):
return param.get_full_hp_param(optim_state_key)
return None
# TODO: Figure out the correct return dtype
def safe_get_full_grad(param):
"""Assemble and return the fp32 gradient of a low-precision (e.g., fp16) parameter.
Args:
param (``torch.nn.Parameter``): A model parameter
"""
if param.grad is not None:
return param.grad
# ZeRO stage 3 param
if hasattr(param, 'ds_id'):
return param._z3_optimizer.get_fp32_grad_for_param(param)
# ZeRO stage 1, 2, and bf16_optimizer params
if hasattr(param, '_hp_mapping'):
return param.get_full_hp_grad()
return None
def get_hp_fragment_mapping(lp_param, lp_start, flat_hp_partition, gradient_dict, offload_gradient_dict, use_offload,
param_group_index, partition_start, partition_size, optimizer_state_dict):
lp_end = lp_param.numel() + lp_start
hp_start = partition_start
hp_end = partition_start + partition_size
fragment_start = max(lp_start, hp_start)
fragment_end = min(lp_end, hp_end)
assert fragment_start < fragment_end, \
f'fragment start {fragment_start} should be < fragment_end {fragment_end}'
fragment_numel = fragment_end - fragment_start
hp_frag_address = fragment_address(start=fragment_start - hp_start, numel=fragment_numel)
hp_fragment_tensor = flat_hp_partition.narrow(0, hp_frag_address.start, hp_frag_address.numel)
optim_fragment = {
key: value.narrow(0, hp_frag_address.start, hp_frag_address.numel)
for key, value in optimizer_state_dict.items()
if torch.is_tensor(value) and value.shape == flat_hp_partition.shape
}
lp_frag_address = fragment_address(start=fragment_start - lp_start, numel=fragment_numel)
lp_fragment_tensor = lp_param.flatten().narrow(0, lp_frag_address.start, lp_frag_address.numel)
return tensor_fragment(lp_fragment=lp_fragment_tensor,
lp_fragment_address=lp_frag_address,
hp_fragment=hp_fragment_tensor,
hp_fragment_address=hp_frag_address,
optim_fragment=optim_fragment,
gradient_dict=gradient_dict,
offload_gradient_dict=offload_gradient_dict,
use_offload=use_offload,
param_group_index=param_group_index)
'''
Logic for lp_param to hp_param mapping
lp lp0 lp1 lp2 lp3 lp4 <------- indices/names
lp [ ][ ][ ][ ][ ] <-------- tensors
flat_lp [ ] <-------- flat lp params
flat_hp [ ] <------------------ flat hp partition on current rank
full_hp [ ] <------- full flat hp params
lp2
full numel = 16
lp_frag
numel = 12
frag_start = 3
frag_end = 15
hp_frag
numel = 12
frag_start = 0
frag_end = 11
hp_frag.copy_(lp_frag)
lp3:
full numel = 4
lp_frag
numel = 4
start = 0
end = 3
hp_frag
numel = 4
start = 12
end = 15
lp4:
full numel = 12
lp_frag
numel = 4
start = 0
end = 3
hp_frag
numel = 4
start = 16
end = 19
Visual depiction of above
lp { }
flat_lp [ ]
flat_hp ( )
flat_lp [ { ( } ) ]
lx hx ly hy
ly-hx
lp { }
flat_lp [ ]
flat_hp ( )
flat_lp [ ( { ) } ]
hx lx hy ly
hy-lx
lp { }
flat_lp [ ]
flat_hp ( )
flat_lp [ ( { } ) ]
hx lx ly hy
ly-lx
lp -> (lx, hy)
flat_hp -> (hx, hy)
'''
| 8,027 | 29.876923 | 117 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/zero_to_fp32.py
|
#!/usr/bin/env python
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
# application.
#
# example: python zero_to_fp32.py . pytorch_model.bin
import argparse
import torch
import glob
import math
import os
import re
from collections import OrderedDict
from dataclasses import dataclass
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
# DeepSpeed data structures it has to be available in the current python environment.
from deepspeed.utils import logger
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
@dataclass
class zero_model_state:
buffers: dict()
param_shapes: dict()
shared_params: list
ds_version: int
frozen_param_shapes: dict()
frozen_param_fragments: dict()
debug = 0
# load to cpu
device = torch.device('cpu')
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split(r'(\d+)', text)]
def get_model_state_file(checkpoint_dir, zero_stage):
if not os.path.isdir(checkpoint_dir):
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
# there should be only one file
if zero_stage <= 2:
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
elif zero_stage == 3:
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
if not os.path.exists(file):
raise FileNotFoundError(f"can't find model states file at '{file}'")
return file
def get_checkpoint_files(checkpoint_dir, glob_pattern):
# XXX: need to test that this simple glob rule works for multi-node setup too
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
if len(ckpt_files) == 0:
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
return ckpt_files
def get_optim_files(checkpoint_dir):
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
def get_model_state_files(checkpoint_dir):
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
def parse_model_states(files):
zero_model_states = []
for file in files:
state_dict = torch.load(file, map_location=device)
if BUFFER_NAMES not in state_dict:
raise ValueError(f"{file} is not a model state checkpoint")
buffer_names = state_dict[BUFFER_NAMES]
if debug:
print("Found buffers:", buffer_names)
# recover just the buffers while restoring them to fp32 if they were saved in fp16
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
param_shapes = state_dict[PARAM_SHAPES]
# collect parameters that are included in param_shapes
param_names = []
for s in param_shapes:
for name in s.keys():
param_names.append(name)
# update with frozen parameters
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
if frozen_param_shapes is not None:
if debug:
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
param_names += list(frozen_param_shapes.keys())
# handle shared params
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
ds_version = state_dict.get(DS_VERSION, None)
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
z_model_state = zero_model_state(buffers=buffers,
param_shapes=param_shapes,
shared_params=shared_params,
ds_version=ds_version,
frozen_param_shapes=frozen_param_shapes,
frozen_param_fragments=frozen_param_fragments)
zero_model_states.append(z_model_state)
return zero_model_states
def parse_optim_states(files, ds_checkpoint_dir):
total_files = len(files)
state_dicts = []
for f in files:
state_dicts.append(torch.load(f, map_location=device))
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
raise ValueError(f"{files[0]} is not a zero checkpoint")
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
# parameters can be different from data parallelism for non-expert parameters. So we can just
# use the max of the partition_count to get the dp world_size.
if type(world_size) is list:
world_size = max(world_size)
if world_size != total_files:
raise ValueError(
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
)
# the groups are named differently in each stage
if zero_stage <= 2:
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
elif zero_stage == 3:
fp32_groups_key = FP32_FLAT_GROUPS
else:
raise ValueError(f"unknown zero stage {zero_stage}")
if zero_stage <= 2:
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
elif zero_stage == 3:
# if there is more than one param group, there will be multiple flattened tensors - one
# flattened tensor per group - for simplicity merge them into a single tensor
#
# XXX: could make the script more memory efficient for when there are multiple groups - it
# will require matching the sub-lists of param_shapes for each param group flattened tensor
fp32_flat_groups = [
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
]
return zero_stage, world_size, fp32_flat_groups
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
"""
Returns fp32 state_dict reconstructed from ds checkpoint
Args:
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
"""
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
optim_files = get_optim_files(ds_checkpoint_dir)
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
model_files = get_model_state_files(ds_checkpoint_dir)
zero_model_states = parse_model_states(model_files)
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
if zero_stage <= 2:
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
elif zero_stage == 3:
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
def _zero2_merge_frozen_params(state_dict, zero_model_states):
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
return
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
if debug:
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
wanted_params = len(frozen_param_shapes)
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
print(f'Frozen params: Have {avail_numel} numels to process.')
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
total_params = 0
total_numel = 0
for name, shape in frozen_param_shapes.items():
total_params += 1
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
state_dict[name] = frozen_param_fragments[name]
if debug:
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
param_shapes = zero_model_states[0].param_shapes
# Reconstruction protocol:
#
# XXX: document this
if debug:
for i in range(world_size):
for j in range(len(fp32_flat_groups[0])):
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
# XXX: memory usage doubles here (zero2)
num_param_groups = len(fp32_flat_groups[0])
merged_single_partition_of_fp32_groups = []
for i in range(num_param_groups):
merged_partitions = [sd[i] for sd in fp32_flat_groups]
full_single_fp32_vector = torch.cat(merged_partitions, 0)
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
avail_numel = sum(
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
if debug:
wanted_params = sum([len(shapes) for shapes in param_shapes])
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
# not asserting if there is a mismatch due to possible padding
print(f"Have {avail_numel} numels to process.")
print(f"Need {wanted_numel} numels in {wanted_params} params.")
# params
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
# out-of-core computing solution
total_numel = 0
total_params = 0
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
offset = 0
avail_numel = full_single_fp32_vector.numel()
for name, shape in shapes.items():
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
total_params += 1
if debug:
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
offset += unpartitioned_numel
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
# live optimizer object, so we are checking that the numbers are within the right range
align_to = 2 * world_size
def zero2_align(x):
return align_to * math.ceil(x / align_to)
if debug:
print(f"original offset={offset}, avail_numel={avail_numel}")
offset = zero2_align(offset)
avail_numel = zero2_align(avail_numel)
if debug:
print(f"aligned offset={offset}, avail_numel={avail_numel}")
# Sanity check
if offset != avail_numel:
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
state_dict = OrderedDict()
# buffers
buffers = zero_model_states[0].buffers
state_dict.update(buffers)
if debug:
print(f"added {len(buffers)} buffers")
_zero2_merge_frozen_params(state_dict, zero_model_states)
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
# recover shared parameters
for pair in zero_model_states[0].shared_params:
if pair[1] in state_dict:
state_dict[pair[0]] = state_dict[pair[1]]
return state_dict
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
remainder = unpartitioned_numel % world_size
padding_numel = (world_size - remainder) if remainder else 0
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
return partitioned_numel, padding_numel
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
return
if debug:
for i in range(world_size):
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
wanted_params = len(frozen_param_shapes)
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
print(f'Frozen params: Have {avail_numel} numels to process.')
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
total_params = 0
total_numel = 0
for name, shape in zero_model_states[0].frozen_param_shapes.items():
total_params += 1
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
if debug:
print(
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
)
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
param_shapes = zero_model_states[0].param_shapes
avail_numel = fp32_flat_groups[0].numel() * world_size
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
# param, re-consolidating each param, while dealing with padding if any
# merge list of dicts, preserving order
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
if debug:
for i in range(world_size):
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
wanted_params = len(param_shapes)
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
# not asserting if there is a mismatch due to possible padding
avail_numel = fp32_flat_groups[0].numel() * world_size
print(f"Trainable params: Have {avail_numel} numels to process.")
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
# params
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
# out-of-core computing solution
offset = 0
total_numel = 0
total_params = 0
for name, shape in param_shapes.items():
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
total_params += 1
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
if debug:
print(
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
)
# XXX: memory usage doubles here
state_dict[name] = torch.cat(
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
0).narrow(0, 0, unpartitioned_numel).view(shape)
offset += partitioned_numel
offset *= world_size
# Sanity check
if offset != avail_numel:
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
state_dict = OrderedDict()
# buffers
buffers = zero_model_states[0].buffers
state_dict.update(buffers)
if debug:
print(f"added {len(buffers)} buffers")
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
# recover shared parameters
for pair in zero_model_states[0].shared_params:
if pair[1] in state_dict:
state_dict[pair[0]] = state_dict[pair[1]]
return state_dict
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
"""
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
via a model hub.
Args:
- ``checkpoint_dir``: path to the desired checkpoint folder
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
Returns:
- pytorch ``state_dict``
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
the checkpoint.
A typical usage might be ::
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
# do the training and checkpoint saving
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
model = model.cpu() # move to cpu
model.load_state_dict(state_dict)
# submit to model hub or save the model to share with others
In this example the ``model`` will no longer be usable in the deepspeed context of the same
application. i.e. you will need to re-initialize the deepspeed engine, since
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
"""
if tag is None:
latest_path = os.path.join(checkpoint_dir, 'latest')
if os.path.isfile(latest_path):
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
else:
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
if not os.path.isdir(ds_checkpoint_dir):
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
"""
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
Args:
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
"""
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
print(f"Saving fp32 state dict to {output_file}")
torch.save(state_dict, output_file)
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
"""
1. Put the provided model to cpu
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
3. Load it into the provided model
Args:
- ``model``: the model object to update
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
Returns:
- ``model`: modified model
Make sure you have plenty of CPU memory available before you call this function. If you don't
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
conveniently placed for you in the checkpoint folder.
A typical usage might be ::
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
# submit to model hub or save the model to share with others
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
"""
logger.info(f"Extracting fp32 weights")
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
logger.info(f"Overwriting model with fp32 weights")
model = model.cpu()
model.load_state_dict(state_dict, strict=False)
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint_dir",
type=str,
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
parser.add_argument(
"output_file",
type=str,
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
args = parser.parse_args()
debug = args.debug
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
| 23,610 | 39.778929 | 197 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.