repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .logging import logger, log_dist
from .comms_logging import get_caller_func
#from .distributed import init_distributed
from .init_on_device import OnDevice
from .groups import *
from .nvtx import instrument_w_nvtx
# TODO: Move tensor fragment and mixed precision to zero utils
from .tensor_fragment import tensor_fragment, get_full_hp_param, get_hp_fragment_mapping, fragment_address, get_full_hp_grad
from .tensor_fragment import safe_get_full_fp32_param, safe_get_full_grad, safe_get_full_optimizer_state
from .mixed_precision_linkage import link_hp_params
from deepspeed.runtime.dataloader import RepeatingLoader
from .numa import get_numactl_cmd
| 751 | 40.777778 | 124 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/utils/debug.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# For lazy import with printflock()
fcntl = None
# for debug purposes map module and param objects to their fully qualified names
module_names = {}
param_names = {}
def debug_extract_module_and_param_names(model):
# extract the fully qualified names as soon as the model is acquired
global module_names
global param_names
# XXX: can probably make a map of param2module and vice-versa
module_names = {module: name for name, module in model.named_modules()}
param_names = {param: name for name, param in model.named_parameters()}
def debug_module2name(module):
if module in module_names:
return module_names[module]
else:
return "unknown"
def debug_module2name_id(module):
return f"name={debug_module2name(module)} id={module.id}"
def debug_module2name_class(module):
return f"name={debug_module2name(module)} {module.__class__.__name__}"
def debug_param2name(param):
if param in param_names:
return param_names[param]
else:
return "unknown"
def debug_param2name_id(param):
return f"name={debug_param2name(param)} id={param.ds_id}"
def debug_param2name_id_shape(param):
return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape}"
def debug_param2name_id_shape_device(param):
return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} device={param.device}"
def debug_param2name_id_numel(param):
return f"name={debug_param2name(param)} id={param.ds_id} numel={param.numel()}"
def debug_param2name_id_shape_status(param):
return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} status={param.ds_status}"
def printflock(*msgs):
"""
For printing messages for all concurrent gpus w/o getting interleaved text.
This is useful when debugging issues where multi-gpus don't sync.
1. Enable the force debug in say partitioning and zero3 files
2. Override the usual versions with ::
def print_rank_0(message, debug=False, force=False):
rank = deepspeed.comm.get_rank()
printflock(f"[{rank}] {message}")
3. run the program and you get both logs non-interleaved
But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper
function might be more useful, as it's easier to send each log stream into a separate file and
then compare those.
"""
global fcntl
if fcntl is None:
import fcntl
with open(__file__, "r") as fh:
fcntl.flock(fh, fcntl.LOCK_EX)
try:
print(*msgs)
finally:
fcntl.flock(fh, fcntl.LOCK_UN)
fh = None
def log_rank_file(rank, *msgs):
"""
Print to a log file of the given rank
This is useful for debugging hanging in sync processes. Here is a possible workflow:
1. Enable the force debug in say partitioning and zero3 files
2. Override the usual versions of print_rank_0 in those files with ::
def print_rank_0(message, debug=False, force=False):
rank = deepspeed.comm.get_rank()
log_rank_file(rank, message)
3. run the program
4. fix up the expected differences, e.g. different cuda numbers ::
perl -pi -e 's|cuda:1|cuda:0|' log_rank_*
5. now diff and see where names and ids diverge - you will find where the gpus don't do the same
work (e.g. when some layers get conditionally skipped on one gpu but not all)
diff -u log_rank_0.txt log_rank_1.txt | less
"""
global fh
if fh is None:
fh = open(f"log_rank_{rank}.txt", "w")
for m in msgs:
fh.write(f"{m}\n")
fh.flush()
def print_backward_tensors(tensor):
def _print_bwd_tensors(grad_fn):
print(f"Backward tensors in {grad_fn}")
for funcs in grad_fn.next_functions:
if funcs[0]:
try:
tensor = getattr(funcs[0], 'variable')
print(funcs[0])
print(f"Tensor - id: {id(tensor)}, shape: {tensor.shape}, data: {tensor}, grad: {tensor.grad}")
except AttributeError as e:
_print_bwd_tensors(funcs[0])
if hasattr(tensor, 'grad_fn'):
_print_bwd_tensors(tensor.grad_fn)
| 4,370 | 28.734694 | 115 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/inference/engine.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import time
import os
from deepspeed import comm as dist
from deepspeed.utils.logging import log_dist
from torch.nn.modules import Module
from packaging import version as pkg_version
from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine
from deepspeed.utils.timer import SynchronizedWallClockTimer
from ..runtime.state_dict_factory import SDLoaderFactory
from ..runtime.weight_quantizer import WeightQuantization
from ..module_inject import replace_transformer_layer, generic_injection
from ..comm.comm import init_distributed
from ..pipe import PipelineModule
from ..moe.utils import has_moe_layers
from ..module_inject import LinearAllreduce, LinearLayer, Normalize, ReplaceWithTensorSlicing
from deepspeed.accelerator import get_accelerator
from ..module_inject.policy import TransformerPolicy
from ..module_inject.auto_tp import AutoTP
from ..module_inject.replace_policy import generic_policies
DS_INFERENCE_ENABLED = False
from torch import nn
INFERENCE_MODEL_TIMER = "model-forward-inference"
def build_bloom_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
"""
Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it
relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value
`softmax(l+a) = softmax(l)`. Based on
https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742
TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly.
Args:
Returns tensor shaped (batch_size * num_heads, 1, max_seq_len)
attention_mask (`torch.Tensor`):
Token-wise attention mask, this should be of shape (batch_size, max_seq_len).
num_heads (`int`, *required*):
number of heads
dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`):
dtype of the output tensor
"""
import math
batch_size, seq_length = attention_mask.shape
closest_power_of_2 = 2**math.floor(math.log2(num_heads))
base = torch.tensor(2**(-(2**-(math.log2(closest_power_of_2) - 3))),
device=attention_mask.device,
dtype=torch.float32)
powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32)
slopes = torch.pow(base, powers)
if closest_power_of_2 != num_heads:
extra_base = torch.tensor(2**(-(2**-(math.log2(2 * closest_power_of_2) - 3))),
device=attention_mask.device,
dtype=torch.float32)
num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32)
slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
# Note: alibi will added to the attention bias that will be applied to the query, key product of attention
# => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length)
# => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length)
# => the query_length dimension will then be broadcasted correctly
# This is more or less identical to T5's relative position bias:
# https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527
arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :]
alibi = slopes[..., None] * arange_tensor
if dist.is_initialized():
num_heads_per_rank = int(num_heads / dist.get_world_size())
offset = dist.get_rank() * num_heads_per_rank
alibi = alibi.view(batch_size, num_heads, 1, seq_length)
alibi = alibi[:, offset:num_heads_per_rank + offset, :, :]
return alibi.reshape(batch_size * num_heads_per_rank, 1, seq_length).to(dtype)
else:
return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype)
class InferenceEngine(Module):
inference_mp_group = None
inference_ep_group = None
expert_mp_group = None
def __init__(self, model, config):
"""
Args:
model: torch.nn.Module
config: DeepSpeedInferenceConfig
"""
global DS_INFERENCE_ENABLED
DS_INFERENCE_ENABLED = True
super().__init__()
self.module = model
self._config = config
self._get_model_config_generate(config) # keep for weird backward compatibility
# patch model generate with ours if model uses it
if hasattr(self.module, "generate"):
self.generate = self._generate
if hasattr(self.module, "config"):
TransformerPolicy.hf_model_config = self.module.config
# todo: keep this self.injection_dict because we don't use to change config.injection_policy API
# todo: this will get changed when Molly's PR on auto injection dict is merged
self.injection_dict = config.injection_policy
# todo: refactor the mp_group and mp_size related in the next refactor
self.mp_group = config.tensor_parallel.tp_group
self.mpu = config.tensor_parallel.mpu
#self._validate_args(self.mpu, config.replace_with_kernel_inject)
self.quantize_merge_count = 1
self.quantization_scales = None
# these are not needed in the config as we are creating them ourselves in the inference engine
self.ep_group = None # config.moe.ep_group
self.expert_mp_group = None # config.moe.ep_mp_group
self.cuda_graph_created = False
self.checkpoint_engine = TorchCheckpointEngine()
quantization_setting = None
self._init_quantization_setting(
quantization_setting) # todo: update with the new quant config for weight quant
self.model_profile_enabled = False
self._model_times = []
if not self.injection_dict and config.replace_with_kernel_inject:
# This is a hack to remove the prepare_mask function on HF side for BLOOM architecture
self.remove_mask_prepare_for_bloom()
if self.injection_dict or not config.replace_with_kernel_inject:
# This is a hack to redefine the alibi func due to TP
if config.tensor_parallel.tp_size > 1:
self.build_alibi_tensor()
if get_accelerator().device_name() == 'cuda' and config.enable_cuda_graph:
assert pkg_version.parse(torch.__version__) >= pkg_version.parse("1.10"), \
"If you want to use cuda graph, please upgrade torch to at least v1.10"
# Check if model passed to engine is loaded w/ meta tensors, in which case
# kernel injection must be enabled.
# NOTE: This check assumes a Hugging Face hierarchy for the device type i.e. module.device.type
self.model_meta_device = self.module.device.type == 'meta' if hasattr(self.module, "device") else False
# convert model to intended dtype
if config.dtype:
self._convert_to_dtype(config)
if self.mpu:
config.tensor_parallel.tp_size = dist.get_world_size(group=self.mpu.get_model_parallel_group())
self.mp_group = self.mpu.get_model_parallel_group()
elif config.tensor_parallel.tp_size > 1:
self._create_model_parallel_group(config)
config.tensor_parallel.tp_group = self.mp_group
if isinstance(self.module, torch.nn.Module):
moe, _ = has_moe_layers(self.module)
else:
moe = False
if moe and dist.get_world_size() > 1:
self._create_ep_parallel_group(config.moe.moe_experts)
# We only support three modes: 1) user specified policy for tensor-parallelism, 2) kernel injection (replace_with_kernel_inject), and 3) automatic tensor parallelism if tp_size > 1.
if self.injection_dict:
# 1. User specified Tensor Parallelism
assert not config.replace_with_kernel_inject, "Cannot use both user specified injection policy and kernel injection"
for client_module, injection_policy in self.injection_dict.items():
# construct the tuple and pass that instead of a string or dict.
if isinstance(injection_policy, str):
config.injection_policy_tuple = (injection_policy, )
else:
config.injection_policy_tuple = injection_policy
self._apply_injection_policy(config, client_module)
else:
if config.replace_with_kernel_inject:
# 2. DeepSpeed Kernel Injection
self._apply_injection_policy(config)
elif config.tensor_parallel.tp_size > 1:
# 3. Automatic Tensor Parallelism
parser_dict = AutoTP.tp_parser(model)
print("AutoTP: ", parser_dict)
for client_module, injection_policy in parser_dict:
if isinstance(injection_policy, str):
config.injection_policy_tuple = (injection_policy, )
else:
config.injection_policy_tuple = injection_policy
self._apply_injection_policy(config, client_module)
device = get_accelerator().current_device_name()
self.module.to(device)
if config.tensor_parallel.tp_size > 1:
_rng_state = get_accelerator().get_rng_state().to(get_accelerator().current_device_name())
dist.broadcast(_rng_state, 0)
get_accelerator().set_rng_state(_rng_state.cpu())
if config.tensor_parallel.tp_size > 1:
assert not config.enable_cuda_graph, "Cuda graph is not supported for model parallelism"
# Check if local CUDA graphs can be created in replacement modules
self.local_cuda_graph = self._local_cuda_graph_used(self.module)
def profile_model_time(self, use_cuda_events=True):
if not self.model_profile_enabled and not self._config.enable_cuda_graph:
self.module.register_forward_pre_hook(self._pre_forward_hook)
self.module.register_forward_hook(self._post_forward_hook)
self.model_profile_enabled = True
self.use_cuda_events = use_cuda_events
if self.use_cuda_events:
self.timers = SynchronizedWallClockTimer()
# todo: remove this once all the config dicts are centralized from top level pydantic config
def _get_model_config_generate(self, config):
# this is being passed to replace_transformer_layer(config=self.user_model_config_dict)
self.config = getattr(self.module, 'config', None) if config.config is None else config.config
def remove_mask_prepare_for_bloom(self):
if hasattr(self.module, 'transformer'):
if hasattr(self.module.transformer, '_prepare_attn_mask'):
self.module.transformer._prepare_attn_mask = lambda attention_mask, *args, **kwargs: attention_mask
def build_alibi_tensor(self):
if hasattr(self.module, 'transformer'):
if hasattr(self.module.transformer, 'build_alibi_tensor'):
self.module.transformer.build_alibi_tensor = build_bloom_alibi_tensor
def _pre_forward_hook(self, module, *inputs, **kwargs):
if self.use_cuda_events:
self.timers(INFERENCE_MODEL_TIMER).start()
else:
get_accelerator().synchronize()
self._start = time.time()
def _post_forward_hook(self, module, input, output):
if self.use_cuda_events:
self.timers(INFERENCE_MODEL_TIMER).stop()
elapsed_time = self.timers(INFERENCE_MODEL_TIMER).elapsed(reset=True)
else:
get_accelerator().synchronize()
self._end = time.time()
elapsed_time = (self._end - self._start) * 1e3 # convert seconds to ms
self._model_times.append(elapsed_time)
def _create_model_parallel_group(self, config):
# Call the init process
if InferenceEngine.inference_mp_group is None:
init_distributed()
local_rank = int(os.getenv('LOCAL_RANK', '0'))
get_accelerator().set_device(local_rank)
ranks = [i for i in range(config.tensor_parallel.tp_size)]
self.mp_group = dist.new_group(ranks)
InferenceEngine.inference_mp_group = self.mp_group
else:
self.mp_group = InferenceEngine.inference_mp_group
def _create_ep_parallel_group(self, moe_experts):
# Call the init process
self.ep_group = {}
self.expert_mp_group = {}
moe_experts = moe_experts if type(moe_experts) is list else [moe_experts]
for e in moe_experts:
self.ep_group.update({e: None})
self.expert_mp_group.update({e: None})
for moe_ep_size in self.ep_group.keys():
num_ep_groups = dist.get_world_size() // moe_ep_size
for i in range(num_ep_groups):
ep_cnt = i * moe_ep_size
size = dist.get_world_size() if moe_ep_size > dist.get_world_size() else moe_ep_size
ranks = list(range(ep_cnt, ep_cnt + size))
_ep_group = dist.new_group(ranks)
if dist.get_rank() in ranks:
self.ep_group.update({moe_ep_size: _ep_group})
if dist.get_world_size() > moe_ep_size:
num_expert_mp_groups = dist.get_world_size() // num_ep_groups
expert_mp_size = dist.get_world_size() // moe_ep_size
for i in range(num_expert_mp_groups):
expert_mp_comm_ranks = [i + nr * moe_ep_size for nr in range(expert_mp_size)]
_expert_mp_group = dist.new_group(expert_mp_comm_ranks)
if dist.get_rank() in expert_mp_comm_ranks:
self.expert_mp_group.update({moe_ep_size: _expert_mp_group})
def _init_quantization_setting(self, quantization_setting):
self.quantize_bits = 8
self.mlp_extra_grouping = False
self.quantize_groups = 1
if type(quantization_setting) is tuple:
self.mlp_extra_grouping, \
self.quantize_groups = quantization_setting
elif quantization_setting is not None:
self.quantize_groups = quantization_setting
log_dist(
f"quantize_bits = {self.quantize_bits} "
f"mlp_extra_grouping = {self.mlp_extra_grouping}, "
f"quantize_groups = {self.quantize_groups}", [0])
# TODO: remove this function and add this functionality to pydantic config checking
def _validate_args(self, mpu, replace_with_kernel_inject):
# TODO: to support SD pipeline we need to avoid this check for now
if replace_with_kernel_inject and not isinstance(self.module, Module):
raise ValueError(f"model must be a torch.nn.Module, got {type(self.module)}")
if not isinstance(self._config.tensor_parallel.tp_size, int) or self._config.tensor_parallel.tp_size < 1:
raise ValueError(f"mp_size must be an int >= 1, got {self._config.tensor_parallel.tp_size}")
if mpu:
methods = ["get_model_parallel_group", "get_data_parallel_group"]
for method in methods:
if not hasattr(mpu, method):
raise ValueError(f"mpu is missing {method}")
if self._config.checkpoint is not None and not isinstance(self._config.checkpoint, (str, dict)):
raise ValueError(f"checkpoint must be None, str or dict, got {type(self._config.checkpoint)}")
supported_dtypes = [None, torch.half, torch.int8, torch.float]
if self._config.dtype not in supported_dtypes:
raise ValueError(f"{self._config.dtype} not supported, valid dtype: {supported_dtypes}")
if self.injection_dict is not None and not isinstance(self.injection_dict, dict):
raise ValueError(f"injection_dict must be None or a dict, got: {self.injection_dict}")
def load_model_with_checkpoint(self, r_module):
self.mp_replace = ReplaceWithTensorSlicing(
mp_group=self.mp_group, mp_size=self._config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1)
error_msgs = []
def load(module, state_dict, prefix):
args = (state_dict, prefix, {}, True, [], [], error_msgs)
if hasattr(module, 'weight'):
if module.weight.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.weight.data,
device="cpu"),
requires_grad=module.weight.data.requires_grad)
if 'query_key_value' in prefix:
module.weight = self.mp_replace.strided_copy(module.weight.data,
state_dict[prefix + 'weight'],
num_splits=3)
else:
module.weight = self.mp_replace.copy(module.weight.data, state_dict[prefix + 'weight'])
else:
if module.norm.weight.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.norm.weight = torch.nn.parameter.Parameter(
data=torch.empty_like(module.norm.weight.data, device="cpu"),
requires_grad=module.norm.weight.data.requires_grad)
module.norm.weight = self.mp_replace.copy(module.norm.weight.data, state_dict[prefix + 'weight'])
if prefix + 'bias' in self.key_list:
if hasattr(module, 'norm'):
if module.norm.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.norm.bias = torch.nn.parameter.Parameter(
data=torch.empty_like(module.norm.bias.data, device="cpu"),
requires_grad=module.norm.bias.data.requires_grad)
module.norm.bias = self.mp_replace.copy(module.norm.bias, state_dict[prefix + 'bias'])
else:
if module.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data,
device="cpu"),
requires_grad=module.bias.data.requires_grad)
data = state_dict[prefix + 'bias']
data = data.to(get_accelerator().current_device_name())
module.bias = self.mp_replace.copy(module.bias, data)
layer_policies = {
nn.Linear: load,
nn.Embedding: load,
nn.LayerNorm: load,
LinearLayer: load,
LinearAllreduce: load
}
def load_module_recursive(module, prefix='', level=0):
for name, child in module.named_children():
if child.__class__ in layer_policies:
checking_key = prefix + name + '.'
if not any(checking_key in item for item in self.key_list):
continue
if len(list(child.parameters())) > 0 and list(child.parameters())[0].numel() == 0:
if len(child.weight.ds_shape) == 1:
child = Normalize(dim=child.weight.ds_shape[-1], dtype=child.weight.dtype, eps=child.eps)
setattr(module, name, child)
load(child, self.sd, prefix + name + '.')
else:
load_module_recursive(child, prefix if level == 0 else prefix + name + '.', level + 1)
load_module_recursive(r_module)
embedding_weight = None
for n, p in r_module.named_parameters():
if "word_embeddings." in n or "embed_tokens." in n or "wte." in n:
embedding_weight = p
if embedding_weight is not None and hasattr(r_module, "lm_head") and hasattr(
r_module.lm_head, "weight") and r_module.lm_head.weight.is_meta:
r_module.lm_head.weight = embedding_weight
def _apply_injection_policy(self, config, client_module=None):
# client_module is only passed when using the injection_dict method.
checkpoint_dir = config.checkpoint
checkpoint = SDLoaderFactory.get_sd_loader_json(checkpoint_dir,
self.checkpoint_engine) if checkpoint_dir is not None else None
generic_injection(self.module,
fp16=(config.dtype == torch.half) or (config.dtype == torch.int8),
bf16=(config.dtype == torch.bfloat16),
enable_cuda_graph=config.enable_cuda_graph)
if isinstance(self.module, torch.nn.Module):
# config is our DeepSpeedInferenceConfig and self.config is the HF model config
replace_transformer_layer(client_module, self.module, checkpoint, config, self.config)
def _get_all_ckpt_names(self, checkpoints_path, tag):
ckpt_file_pattern = self._get_ckpt_name(checkpoints_path, tag, mp_placeholder="*")
import glob
ckpt_files = glob.glob(ckpt_file_pattern)
ckpt_files.sort()
return ckpt_files
def _get_ckpt_name(self, checkpoints_path, tag, mp_placeholder=None):
if mp_placeholder is not None:
mp_rank_str = mp_placeholder
else:
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
mp_rank_str = "{:02d}".format(mp_rank)
ckpt_name = os.path.join(
checkpoints_path,
"mp_rank_" + mp_rank_str + "_model_states.pt",
)
return ckpt_name
def _load_checkpoint(self, load_dir, load_module_strict=True, tag=None):
is_pipe_parallel = isinstance(self.module, PipelineModule)
if is_pipe_parallel:
raise RuntimeError('pipeline parallelism is currently not supported in inference.')
if not isinstance(load_dir, dict) and os.path.isdir(load_dir):
if tag is None:
latest_path = os.path.join(load_dir, "latest")
if os.path.isfile(latest_path):
with open(latest_path, "r") as fd:
tag = fd.read().strip()
ckpt_list = self._get_all_ckpt_names(load_dir, tag)
sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list, self.checkpoint_engine)
else:
sd_loader = SDLoaderFactory.get_sd_loader_json(load_dir, self.checkpoint_engine)
checkpoint = sd_loader['checkpoints']
if type(checkpoint) is list:
self.sd = torch.load(checkpoint[0], map_location='cpu')
self.key_list = list(self.sd.keys())
self.load_model_with_checkpoint(self.module)
for i in range(1, len(checkpoint)):
if not dist.is_initialized() or dist.get_rank() == 0:
print(f"loading checkpoint ({i})")
self.sd = torch.load(checkpoint[i], map_location=get_accelerator().device_name())
self.key_list = list(self.sd.keys())
self.load_model_with_checkpoint(self.module)
else:
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
load_path, checkpoint, quantize_config = sd_loader.load(self._config.tensor_parallel.tp_size,
mp_rank,
is_pipe_parallel=is_pipe_parallel,
quantize=(self._config.dtype is torch.int8),
quantize_groups=self.quantize_groups,
mlp_extra_grouping=self.mlp_extra_grouping)
self.quantization_scales, self.quantize_merge_count = quantize_config
moe, _ = has_moe_layers(self.module)
if moe:
from deepspeed.runtime.engine import DeepSpeedEngine
old_moe_load = False
if not isinstance(checkpoint['num_experts'], list):
old_moe_load = True
DeepSpeedEngine.load_moe_state_dict(load_dir,
tag,
state_dict=checkpoint[self._choose_module_key(checkpoint)],
old_moe_load=old_moe_load,
model=self.module,
mpu=self.mpu,
checkpoint_engine=self.checkpoint_engine)
self.module.load_state_dict(state_dict=checkpoint[self._choose_module_key(checkpoint)],
strict=load_module_strict)
def _choose_module_key(self, sd):
assert not ('module' in sd
and 'model' in sd), "checkpoint has both 'model' and 'module' keys, not sure how to proceed"
assert 'module' in sd or 'model' in sd, "checkpoint contains neither 'model' or 'module' keys, not sure how to proceed"
if 'module' in sd:
return 'module'
elif 'model' in sd:
return 'model'
def _convert_to_dtype(self, config):
if not isinstance(self.module, torch.nn.Module):
return
if False: #config.dtype is torch.int8 and self.quantization_scales is None:
quantizer = WeightQuantization(mlp_extra_grouping=self.mlp_extra_grouping)
model, self.quantization_scales = quantizer.model_quantize(self.module, self.injection_dict,
self.quantize_bits, self.quantize_groups)
elif config.dtype == torch.half:
self.module.half()
elif config.dtype == torch.bfloat16:
self.module.bfloat16()
elif config.dtype == torch.float:
self.module.float()
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = get_accelerator().Stream()
cuda_stream.wait_stream(get_accelerator().current_stream())
with get_accelerator().stream(cuda_stream):
for i in range(3):
ret = self.module(*inputs, **kwargs)
get_accelerator().current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._cuda_graphs = torch.cuda.CUDAGraph()
self.static_inputs = inputs
self.static_kwargs = kwargs
with torch.cuda.graph(self._cuda_graphs):
self.static_output = self.module(*self.static_inputs, **self.static_kwargs)
self.cuda_graph_created = True
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[k].copy_(kwargs[k])
self._cuda_graphs.replay()
return self.static_output
def model_times(self):
assert self.model_profile_enabled, "model profiling is not enabled"
model_times = self._model_times
if self._config.enable_cuda_graph and len(self._model_times) == 0:
raise ValueError("Model times are empty and cuda graph is enabled. If "
"this is a GPT-style model this combo is not supported. If this is a "
"BERT-style model this is a bug, please report it. "
f"Model type is: {type(self.module)}")
self._model_times = []
return model_times
def _module_match(self, module):
for policy in generic_policies:
policy = policy()
if policy.match_replaced(module):
return True
return False
def _local_cuda_graph_used(self, module):
if isinstance(module, torch.nn.Module):
return False
else:
sub_module_cuda_graph = False
for name in module.__dict__.keys():
sub_module = getattr(module, name)
if self._module_match(sub_module) and hasattr(sub_module, "enable_cuda_graph"):
sub_module_cuda_graph = True
return sub_module_cuda_graph
def forward(self, *inputs, **kwargs):
"""Execute forward propagation
Arguments:
*inputs: Variable length input list
**kwargs: variable length keyword arguments
"""
start = None
if self.model_profile_enabled and get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph:
get_accelerator().synchronize()
start = time.time()
if get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph and not self.local_cuda_graph:
if self.cuda_graph_created:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
else:
outputs = self.module(*inputs, **kwargs)
if self.model_profile_enabled and self._config.enable_cuda_graph:
get_accelerator().synchronize()
duration = (time.time() - start) * 1e3 # convert seconds to ms
self._model_times.append(duration)
return outputs
def _generate(self, *inputs, **kwargs):
# Reset KV-cache at the beginning of generate
if hasattr(self.module, 'reset_cache'):
self.module.reset_cache()
num_beams = 1
if "generation_config" in kwargs:
gen_config = kwargs["generation_config"]
num_beams = getattr(gen_config, "num_beams", 1)
if "num_beams" in kwargs:
num_beams = kwargs["num_beams"]
if num_beams > 1:
raise NotImplementedError("DeepSpeed does not support `num_beams` > 1, if this is important to you please "
"add your request to: https://github.com/microsoft/DeepSpeed/issues/2506")
return self.module.generate(*inputs, **kwargs)
| 31,553 | 48.535322 | 189 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/inference/config.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
from deepspeed.runtime.zero.config import DeepSpeedZeroConfig
from pydantic import Field
from pydantic import validator
from typing import Dict, Union
from enum import Enum
class DtypeEnum(Enum):
# The torch dtype must always be the first value (so we return torch.dtype)
fp16 = torch.float16, "torch.float16", "fp16", "float16", "half"
fp32 = torch.float32, "torch.float32", "fp32", "float32", "float"
bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16", "bfloat"
int8 = torch.int8, "torch.int8", "int8"
# Copied from https://stackoverflow.com/a/43210118
# Allows us to use multiple values for each Enum index and returns first
# listed value when Enum is called
def __new__(cls, *values):
obj = object.__new__(cls)
# first value is canonical value
obj._value_ = values[0]
for other_value in values[1:]:
cls._value2member_map_[other_value] = obj
obj._all_values = values
return obj
def __repr__(self):
return "<%s.%s: %s>" % (
self.__class__.__name__,
self._name_,
", ".join([repr(v) for v in self._all_values]),
)
class MoETypeEnum(str, Enum):
residual = "residual"
standard = "standard"
class DeepSpeedTPConfig(DeepSpeedConfigModel):
""" Configure tensor parallelism settings """
enabled: bool = True
""" Turn tensor parallelism on/off. """
tp_size: int = 1
""" Number of devices to split the model across using tensor parallelism. """
mpu: object = None
"""
A model parallelism unit object that implements
``get_{model,data}_parallel_{rank,group,world_size}()``.
"""
tp_group: object = None
class DeepSpeedMoEConfig(DeepSpeedConfigModel):
""" Sets parameters for MoE """
enabled: bool = True
ep_size: int = 1
"""
The expert-parallelism size which is used for partitioning the experts
across the GPUs in the expert-parallel group.
"""
moe_experts: list = Field([1], alias="num_experts")
""" The global number of experts used in an MoE layer. """
type: MoETypeEnum = MoETypeEnum.standard
"""
Specify the type of MoE layer. We have two types of MoE layer: 'Standard'
and 'Residual'.
"""
ep_mp_group: object = None
ep_group: object = Field(None, alias="expert_group")
class QuantTypeEnum(str, Enum):
asym = "asymmetric"
sym = "symmetric"
class BaseQuantConfig(DeepSpeedConfigModel):
enabled = True
num_bits = 8
q_type: QuantTypeEnum = QuantTypeEnum.sym
q_groups: int = 1
class WeightQuantConfig(BaseQuantConfig):
enabled = True
class ActivationQuantConfig(BaseQuantConfig):
enabled = True
class QKVQuantConfig(DeepSpeedConfigModel):
enabled = True
class QuantizationConfig(DeepSpeedConfigModel):
enabled: bool = True
activation: ActivationQuantConfig = ActivationQuantConfig()
weight: WeightQuantConfig = WeightQuantConfig()
qkv: QKVQuantConfig = QKVQuantConfig()
# todo: brainstorm on how to do ckpt loading for DS inference
class InferenceCheckpointConfig(DeepSpeedConfigModel):
checkpoint_dir: str = None
save_mp_checkpoint_path: str = None
base_dir: str = None
class DeepSpeedInferenceConfig(DeepSpeedConfigModel):
""" Sets parameters for DeepSpeed Inference Engine. """
replace_with_kernel_inject: bool = Field(False, alias="kernel_inject")
"""
Set to true to inject inference kernels for models such as, Bert, GPT2,
GPT-Neo and GPT-J. Otherwise, the injection_dict provides the names of two
linear layers as a tuple:
`(attention_output projection, transformer output projection)`
"""
dtype: DtypeEnum = torch.float16
"""
Desired model data type, will convert model to this type.
Supported target types: `torch.half`, `torch.int8`, `torch.float`
"""
tensor_parallel: DeepSpeedTPConfig = Field({}, alias="tp")
"""
Configuration for tensor parallelism used to split the model across several
GPUs. Expects a dictionary containing values for :any:`DeepSpeedTPConfig`.
"""
enable_cuda_graph: bool = False
"""
Use this flag for capturing the CUDA-Graph of the inference ops, so that it
can run faster using the graph replay method.
"""
use_triton: bool = False
"""
Use this flag to use triton kernels for inference ops.
"""
triton_autotune: bool = False
"""
Use this flag to enable triton autotuning.
Turning it on is better for performance but increase the 1st runtime for
autotuning.
"""
zero: DeepSpeedZeroConfig = {}
"""
ZeRO configuration to use with the Inference Engine. Expects a dictionary
containing values for :any:`DeepSpeedZeroConfig`.
"""
triangular_masking: bool = Field(True, alias="tm")
"""
Controls the type of masking for attention scores in transformer layer.
Note that the masking is application specific.
"""
moe: Union[bool, DeepSpeedMoEConfig] = {}
"""
Specify if the type of Transformer is MoE. Expects a dictionary containing
values for :any:`DeepSpeedMoEConfig`.
"""
quant: QuantizationConfig = {}
"""
NOTE: only works for int8 dtype.
Quantization settings used for quantizing your model using the MoQ. The
setting can be one element or a tuple. If one value is passed in, we
consider it as the number of groups used in quantization. A tuple is passed
in if we want to mention that there is extra-grouping for the MLP part of a
Transformer layer (e.g. (True, 8) shows we quantize the model using 8
groups for all the network except the MLP part that we use 8 extra
grouping). Expects a dictionary containing values for
:any:`QuantizationConfig`.
"""
#todo: refactor the following 3 into the new checkpoint_config
checkpoint: Union[str, Dict] = None
"""
Path to deepspeed compatible checkpoint or path to JSON with load policy.
"""
base_dir: str = None
"""
This shows the root directory under which all the checkpoint files exists.
This can be passed through the json config too.
"""
set_empty_params: bool = False
"""
specifying whether the inference-module is created with empty or real Tensor
"""
save_mp_checkpoint_path: str = None
"""
The path for which we want to save the loaded model with a checkpoint. This
feature is used for adjusting the parallelism degree to help alleviate the
model loading overhead. It does not save any new checkpoint if no path is
passed.
"""
checkpoint_config: InferenceCheckpointConfig = Field({}, alias="ckpt_config")
"""
TODO: Add docs. Expects a dictionary containing values for
:any:`InferenceCheckpointConfig`.
"""
return_tuple: bool = True
"""
Specify whether or not the transformer layers need to return a tuple or a
Tensor.
"""
training_mp_size: int = 1
"""
If loading a checkpoint this is the mp size that it was trained with, it
may be different than what the mp size that you want to use during
inference.
"""
replace_method: str = Field(
"auto",
deprecated=True,
deprecated_msg="This parameter is no longer needed, please remove from your call to DeepSpeed-inference")
injection_policy: Dict = Field(None, alias="injection_dict")
"""
Dictionary mapping a client nn.Module to its corresponding injection
policy. e.g., `{BertLayer : deepspeed.inference.HFBertLayerPolicy}`
"""
injection_policy_tuple: tuple = None
""" TODO: Add docs """
config: Dict = Field(None, alias="args") # todo: really no need for this field if we can refactor
max_out_tokens: int = Field(1024, alias="max_tokens")
"""
This argument shows the maximum number of tokens inference-engine can work
with, including the input and output tokens. Please consider increasing it
to the required token-length required for your use-case.
"""
min_out_tokens: int = Field(1, alias="min_tokens")
"""
This argument communicates to the runtime the minimum number of tokens you
expect you will need to generate. This will cause the runtime to error
if it unable to provide this and provide context on the memory pressure
rather than seg-faulting or providing corrupted output.
"""
transposed_mode: bool = Field(False, alias="transposed_mode")
mp_size: int = Field(1, deprecated=True, new_param="tensor_parallel.tp_size")
"""
Desired model parallel size, default is 1 meaning no model parallelism.
Deprecated, please use the ``tensor_parallel` config to control model
parallelism.
"""
mpu: object = Field(None, deprecated=True, new_param="tensor_parallel.mpu")
ep_size: int = Field(1, deprecated=True, new_param="moe.ep_size")
ep_group: object = Field(None, alias="expert_group", deprecated=True, new_param="moe.ep_group")
ep_mp_group: object = Field(None, alias="expert_mp_group", deprecated=True, new_param="moe.ep_mp_group")
moe_experts: list = Field([1], deprecated=True, new_param="moe.moe_experts")
moe_type: MoETypeEnum = Field(MoETypeEnum.standard, deprecated=True, new_param="moe.type")
@validator("moe")
def moe_backward_compat(cls, field_value, values):
if isinstance(field_value, bool):
return DeepSpeedMoEConfig(moe=field_value)
return field_value
@validator("use_triton")
def has_triton(cls, field_value, values):
if field_value and not deepspeed.HAS_TRITON:
raise ValueError('Triton needs to be installed to use deepspeed with triton kernels')
return field_value
class Config:
# Get the str representation of the datatype for serialization
json_encoders = {torch.dtype: lambda x: str(x)}
| 10,068 | 32.121711 | 113 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/inference/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .engine import InferenceEngine
| 132 | 18 | 38 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/pipe/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from ..runtime.pipe import PipelineModule, LayerSpec, TiedLayerSpec
| 164 | 22.571429 | 67 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/autotuning/autotuner.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import shutil
import subprocess
import time
import datetime
import math
import hjson
from ..runtime.config_utils import dict_raise_error_on_duplicate_keys
from ..runtime.constants import *
from ..runtime.zero.config import ZERO_OPTIMIZATION, ZeroStageEnum
from ..utils import logger
from .config import DeepSpeedAutotuningConfig
from .constants import *
from .scheduler import ResourceManager
from .tuner import GridSearchTuner, RandomTuner, ModelBasedTuner
from .utils import *
from deepspeed.accelerator import get_accelerator
try:
from tabulate import tabulate
except ImportError:
tabulate = None
try:
import mlflow
has_mlflow = True
except Exception as e:
has_mlflow = False
ZERO_OPTIMIZATION_STAGE = "stage"
OFFLOAD_OPTIMIZER = "offload_optimizer"
OFFLOAD_PARAM = "offload_param"
ZERO_OPTIMIZATION_STAGE_DEFAULT = ZeroStageEnum.disabled
class Autotuner:
"""The DeepSpeed Autotuner automatically discovers the optimal DeepSpeed configuration that delivers good training speed. The Autotuner uses model information, system information, and heuristics to efficiently tune system knobs that affect compute and memory efficiencies, such as ZeRO optimization stages, micro-batch sizes, and many other ZeRO optimization configurations. It not only reduces the time and resources user spend on tuning, but also can discover configurations better than hand-tuned methods.
Autotuning with DeepSpeed requires no code change from DeepSpeed users. Please refer to the README for usage details.
"""
def __init__(self, args, active_resources):
self.args = args
self.selected_exp_dir = None
assert tabulate is not None, "Missing required package `tabulate`, please install with `pip install deepspeed[autotuning]`."
logger.debug(f"autotuning args={args}")
self.user_config = self._get_user_config(args.user_args)
assert self.user_config is not None, "DeepSpeed configuration is not provided"
self.autotuning_config = DeepSpeedAutotuningConfig(self.user_config)
if self.user_config[AUTOTUNING]:
if AUTOTUNING_EXPS_DIR in self.user_config[AUTOTUNING].keys():
del self.user_config[AUTOTUNING][AUTOTUNING_EXPS_DIR]
if AUTOTUNING_RESULTS_DIR in self.user_config[AUTOTUNING].keys():
del self.user_config[AUTOTUNING][AUTOTUNING_RESULTS_DIR]
self.exps_dir = self.autotuning_config.exps_dir
if self.autotuning_config.overwrite and os.path.exists(self.exps_dir):
shutil.rmtree(self.exps_dir, ignore_errors=True)
if not os.path.exists(self.exps_dir):
try:
os.makedirs(self.exps_dir, exist_ok=True)
logger.info(f"Created autotuning experiments directory: {self.exps_dir}")
except:
logger.error(
f"Failed to create {self.exps_dir}, please check `exps_dir` in the autotuning config file is accessible by all the nodes in the job."
)
exit(-1)
self.results_dir = self.autotuning_config.results_dir
if self.autotuning_config.overwrite and os.path.exists(self.results_dir):
shutil.rmtree(self.results_dir, ignore_errors=True)
if not os.path.exists(self.results_dir):
try:
os.makedirs(self.results_dir, exist_ok=True)
logger.info(f"Created autotuning results directory: {self.exps_dir}")
except:
logger.error(
f"Failed to create {self.results_dir}, please check `results_dir` in the autotuning config file is accessible by all the nodes in the job."
)
exit(-1)
# set the active resource for the autotuner resource manager
self.rm = self._get_resource_manager(active_resources)
# get resource requirement for each autotuning experiment
self.exp_num_nodes, self.exp_num_gpus = self._get_exp_resources(args)
assert self.exp_num_gpus <= self.rm.num_gpus_per_node, "num_gpus in the autotuning configuration must not be less than the --num_gpus value in the train script if any"
assert self.exp_num_nodes <= len(
self.rm.nodes
), "num_nodes in the autotuning configuration must not be less than the --num_nodes value in the train script if any"
self.records = {}
self.optimal_cmd = None
self.optimal_ds_config = None
self.mlflow_parent_id = None
def print_tuning_results(self):
"""Print the autotuning results in tabular format.
"""
best_space_records = self.get_best_space_records()
tab = []
if best_space_records:
for key, val in best_space_records.items():
if not val:
continue
row = []
row.append(key)
num_exps = 0
if key == GLOBAL_TUNING_SPACE:
cnt = 0
for k, v in best_space_records.items():
if k != GLOBAL_TUNING_SPACE:
cnt += v[2]
num_exps = cnt
else:
num_exps = val[2]
row.append(num_exps)
row.append(val[1])
row.append(val[0]['name'])
tab.append(row)
summary = tabulate(tab,
headers=["tuning_space", "num_experiments", "best_metric_val", "best_exp_name"],
tablefmt="pipe")
print(summary)
with open(os.path.join(self.results_dir, 'summary.txt'), 'w', buffering=BUFSIZE) as fd:
fd.write(summary)
fd.flush()
os.fsync(fd)
if GLOBAL_TUNING_SPACE in best_space_records:
best_exp, best_metric_val, total_num_exps = best_space_records[GLOBAL_TUNING_SPACE]
if best_exp:
logger.info(
f"{best_exp['name']} is the optimal setup after tuning. The exp result is at {best_exp['result_dir']}."
)
else:
logger.info(f"No optimal setup is found. Please check that experiments were run successfully.")
tuning_duration = datetime.timedelta(seconds=(time.time() - self.start_time))
logger.info(f"Tuning completed in {tuning_duration}")
with open(os.path.join(self.results_dir, 'summary.txt'), 'a') as f:
f.write(
f"\n\nTuning completed in {tuning_duration}. Total number of experiments: {self.rm.experiment_count - 1}."
)
f.flush()
def _get_user_config(self, user_args):
"""Get DeepSpeed configuration from the user arguments passed to the launcher.
Args:
user_args ([list]): user arguments passed to the DeepSpeed launcher
Returns:
[dict]: DeepSpeed configuration dictionary
"""
user_config_file = None
if "--deepspeed_config" in user_args:
idx = user_args.index("--deepspeed_config")
assert ".json" in user_args[
idx + 1], "DeepSpeed --deepspeed_config requires a json file to specify the configuration"
user_config_file = user_args[idx + 1]
elif "--deepspeed" in user_args:
idx = user_args.index("--deepspeed")
if ".json" in user_args[idx + 1]:
user_config_file = user_args[idx + 1]
logger.debug(f"user_config_file = {user_config_file}")
if user_config_file is not None:
assert os.path.isfile(user_config_file), "DeepSpeed configuration file: {} is not an existing file".format(
user_config_file)
if os.path.exists(user_config_file):
return json.load(open(user_config_file, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
return None
def _get_resource_manager(self, active_resources):
"""Initialize and return a resource manager
Args:
active_resources ([dict]): A dictionary of hostname and its slots (GPUs), e.g. {"worker-0": "0,1,2,3,4,5,6,7,8"}
Raises:
RuntimeError: raises the error if no GPU is available
Returns:
[ResourceManager]: A resource manager that schedules and runs autotuning experiments.
"""
logger.info(f"active_resources = {active_resources}")
hosts = []
ngpus_per_node = 100
for hostname, slots in active_resources.items():
hosts.append(hostname)
ngpus_per_node = min(len(slots), ngpus_per_node)
assert ngpus_per_node > 0, "no gpu is available"
return ResourceManager(args=self.args,
hosts=hosts,
num_gpus_per_node=ngpus_per_node,
results_dir=self.results_dir,
exps_dir=self.exps_dir,
arg_mappings=self.autotuning_config.arg_mappings)
def _get_exp_resources(self, args):
"""Get resource requirement for each autotuning experiment
Args:
args (dict): user args
Returns:
num_nodes, num_gpus: the number of gpus and number of nodes used in the autotuning experiments
"""
if args.num_nodes > 0:
num_nodes = args.num_nodes
else:
num_nodes = len(self.rm.nodes)
if args.num_gpus > 0:
num_gpus = args.num_gpus
else:
num_gpus = self.rm.num_gpus_per_node
return num_nodes, num_gpus
def metric(self):
return self.autotuning_config.metric
def fast_enabled(self):
return self.autotuning_config.fast
def max_train_batch_size(self):
return self.autotuning_config.max_train_batch_size
def mp_size(self):
return self.autotuning_config.mp_size
def max_train_micro_batch_size_per_gpu(self):
if self.max_train_batch_size(
) and self.max_train_batch_size() > 0: # if the user specifies a max_train_batch_size
max_train_micro_batch_size = self.max_train_batch_size() * self.mp_size() // (
self.exp_num_gpus * self.exp_num_nodes) # gradient accumulation steps >=1
return min(self.autotuning_config.max_train_micro_batch_size_per_gpu, max_train_micro_batch_size)
else:
return self.autotuning_config.max_train_micro_batch_size_per_gpu
def min_train_micro_batch_size_per_gpu(self):
return self.autotuning_config.min_train_micro_batch_size_per_gpu
def num_tuning_micro_batch_sizes(self):
return self.autotuning_config.num_tuning_micro_batch_sizes
def fp16_enabled(self):
if FP16 in self.user_config.keys():
return self.user_config[FP16].get(FP16_ENABLED, FP16_ENABLED_DEFAULT)
else:
return False
def get_gpu_memory_info(self):
return get_accelerator().total_memory()
def get_activation_memory_per_gpu(self):
if self.model_info and "activation_mem_per_gpu" in self.model_info:
return self.model_info["activation_mem_per_gpu"]
def get_instantiation_memory_required_per_gpu(self, zero_stage):
num_params = self.get_model_num_params()
total_gpus = self.exp_num_nodes * self.exp_num_gpus
fp16_enabled = self.fp16_enabled()
if not num_params:
return 0
# assume the model uses Adam optimizer
# ZeroStageEnum.disabled:
params_mem = num_params * (2 if fp16_enabled else 4)
gradients_mem = num_params * (2 if fp16_enabled else 4)
optimizer_mem = num_params * (16 if fp16_enabled else 8)
if zero_stage >= ZeroStageEnum.optimizer_states:
optimizer_mem = optimizer_mem / total_gpus
if zero_stage >= ZeroStageEnum.gradients:
gradients_mem = gradients_mem / total_gpus
if zero_stage >= ZeroStageEnum.weights:
params_mem = params_mem / total_gpus
mem_per_gpu = (params_mem + gradients_mem + optimizer_mem) / self.mp_size()
return mem_per_gpu
def _generate_experiments(self, tuning_space, max_train_batch_size_per_gpu):
"""Generates a list of autotuning experiments given a tuning_space.
The corresponding parameter values are replaced by user-defined values in the DeepSpeed configuration file.
Args:
tuning_space ([dict]): A DeepSpeed configuration dictionary where a value can be a list (called a tuning parameter). For example,
{
"zero_optimization": {
"stage": 1,
"reduce_bucket_size": [5e7,
5e8,
1e9],
"allgather_bucket_size": [5e7,
5e8,
1e9],
}
}
reduce_bucket_size and allgather_bucket_size are the tuning parameters in this tuning space.
Returns:
[list]: a list of experiments generated by taking combinations of values of the tuning space. The above tuning space generates 3*3 = 9 experiments if the user DeepSpeed configuration file does not overwrite the two tuning parameters or define more tuning parameters.
"""
exps = []
# each zero stage uses a different template configuration file
config_zero = tuning_space.get(ZERO_OPTIMIZATION, {})
stage = config_zero.get(ZERO_OPTIMIZATION_STAGE, ZERO_OPTIMIZATION_STAGE_DEFAULT)
template_config = {}
if stage == 0:
template_path = DEFAULT_TEMPLATE_PATH_ZERO_0
template_config = hjson.load(open(template_path, 'r'))
prefix = "z0_"
elif stage == 1:
template_path = DEFAULT_TEMPLATE_PATH_ZERO_1
template_config = hjson.load(open(template_path, 'r'))
prefix = "z1_"
elif stage == 2:
template_path = DEFAULT_TEMPLATE_PATH_ZERO_2
template_config = hjson.load(open(template_path, 'r'))
prefix = "z2_"
elif stage == 3:
template_path = DEFAULT_TEMPLATE_PATH_ZERO_3
template_config = hjson.load(open(template_path, 'r'))
model_info = self.model_info
if model_info and "hidden_size" in model_info:
hs = model_info["hidden_size"]
template_config[ZERO_OPTIMIZATION]['reduce_bucket_size'] = hs * hs
template_config[ZERO_OPTIMIZATION]['stage3_prefetch_bucket_size'] = 0.9 * hs * hs
template_config[ZERO_OPTIMIZATION]['stage3_param_persistence_threshold'] = 10 * hs
prefix = "z3_"
else:
return exps
# replace the corresponding parameter values if the user specifies them in the DeepSpeed configuration file
replace_dict(tuning_space, self.user_config, [ZERO_OPTIMIZATION, TRAIN_MICRO_BATCH_SIZE_PER_GPU])
logger.debug(f"tuning_space = {json.dumps(tuning_space)}")
all_configs = get_all_configs(tuning_space, ignore_keys=["optimizer"])
tuning_keys = get_tuning_keys(tuning_space)
logger.debug(f"tuning_keys = {tuning_keys}")
logger.debug(f"before pruning total configs = {len(all_configs)}")
pruned_list = prune_configs(all_configs)
logger.debug(f"after pruning total configs = {len(pruned_list)}")
for config in pruned_list:
exp_config = copy.deepcopy(template_config)
# fill the template with the expr config
replace_dict(exp_config, config)
# if the config does not use offloading, remove the offloading section
config_zero = config.get(ZERO_OPTIMIZATION, None)
if config_zero:
if OFFLOAD_OPTIMIZER not in config_zero and OFFLOAD_OPTIMIZER in exp_config[ZERO_OPTIMIZATION]:
del exp_config[ZERO_OPTIMIZATION][OFFLOAD_OPTIMIZER]
if OFFLOAD_PARAM not in config_zero and OFFLOAD_PARAM in exp_config[ZERO_OPTIMIZATION]:
del exp_config[ZERO_OPTIMIZATION][OFFLOAD_PARAM]
# set gradient accumulation steps according to max_train_batch_size_per_gpu
mbs = exp_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU]
gas = max_train_batch_size_per_gpu // mbs
exp_config[GRADIENT_ACCUMULATION_STEPS] = gas
exp_config[TRAIN_BATCH_SIZE] = mbs * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp = {}
# generate the expr name
exp_name = canonical_name(exp_config, tuning_keys, prefix)
exp['name'] = exp_name
exp[DS_CONFIG] = exp_config
exp['num_gpus'] = self.exp_num_gpus
exp['num_nodes'] = self.exp_num_nodes
exps.append(exp)
return exps
def tune(self):
""" Tunes Zero stages, micro batch size per GPU, and other Zero configurations. Performance metrics of different tuning spaces are recorded in self.records.
"""
if has_mlflow:
self.mlflow_parent_id = os.environ['MLFLOW_RUN_ID']
mlflow.start_run(run_id=self.mlflow_parent_id)
self.start_time = time.time()
if self.fast_enabled():
logger.info(f"Fast mode is enabled. Tuning micro batch size only.")
# model info profile run with DEFAULT_MIN_MEM_CONFIG
model_info = self.model_info_profile_run()
if model_info:
self.model_info = model_info
else:
return
logger.info(f"The model has {number_to_string(self.get_model_num_params())} parameters.")
self.gpu_mem = self.get_gpu_memory_info()
logger.info(f"Memory per GPU in the system is {memory_to_string(self.gpu_mem, postfix='B')}.")
self.activation_mem = self.get_activation_memory_per_gpu()
logger.info(
f"The model requires at least {memory_to_string(self.activation_mem, postfix='B')} activation memory for micro batch size 1."
)
stage = self.user_config.get(ZERO_OPTIMIZATION, {}).get(ZERO_OPTIMIZATION_STAGE, 0)
user_zero_stages = [stage] if not isinstance(stage, list) else stage
logger.info(f"User-defined zero stages are {stage}.")
mbs = 0
max_mbs = 0
metric_val = 0
required_gpu_mem = self.get_instantiation_memory_required_per_gpu(ZeroStageEnum.disabled) + self.activation_mem
if self.gpu_mem > required_gpu_mem:
if "all" in user_zero_stages or ZeroStageEnum.disabled in user_zero_stages:
logger.info(
f"The model might be runable with ZERO 0 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1), adding DEFAULT_TUNING_SPACE_ZERO_0 to the global tuning space"
)
next_max_mbs, next_mbs, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_0)
if next_mbs > mbs:
mbs = next_mbs
max_mbs = next_max_mbs
metric_val = next_metric_val
if has_mlflow:
mlflow.log_metric(f"z0{self.metric()}", next_metric_val)
else:
logger.info(
f"The model is not runable with ZERO stage {ZeroStageEnum.disabled} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)"
)
required_gpu_mem = self.get_instantiation_memory_required_per_gpu(
ZeroStageEnum.optimizer_states) + self.activation_mem
if self.gpu_mem > required_gpu_mem:
if "all" in user_zero_stages or ZeroStageEnum.optimizer_states in user_zero_stages:
logger.info(
f"The model might be runable with ZERO 1 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_1 to the global tuning space"
)
next_max_mbs, next_mbs, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_1,
prev_max_mbs=max_mbs,
prev_best_mbs=mbs,
prev_best_metric_val=metric_val)
if next_mbs > mbs:
mbs = next_mbs
max_mbs = next_max_mbs
metric_val = next_metric_val
if has_mlflow:
mlflow.log_metric(f"z1{self.metric()}", next_metric_val)
else:
logger.info(
f"The model is not runable with ZERO stage {ZeroStageEnum.optimizer_states} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)"
)
required_gpu_mem = self.get_instantiation_memory_required_per_gpu(
ZeroStageEnum.gradients) + self.activation_mem
if self.gpu_mem > required_gpu_mem:
if "all" in user_zero_stages or ZeroStageEnum.gradients in user_zero_stages:
logger.info(
f"The model might be runable with ZERO 2 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_2 to the global tuning space"
)
next_max_mbs, next_mbs, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_2,
prev_max_mbs=max_mbs,
prev_best_mbs=mbs,
prev_best_metric_val=metric_val)
if next_mbs > mbs:
mbs = next_mbs
max_mbs = next_max_mbs
metric_val = next_metric_val
if has_mlflow:
mlflow.log_metric(f"z2{self.metric()}", next_metric_val)
else:
logger.info(
f"The model is not runable with ZERO stage {ZeroStageEnum.gradients} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)"
)
required_gpu_mem = self.get_instantiation_memory_required_per_gpu(ZeroStageEnum.weights) + self.activation_mem
if self.gpu_mem > required_gpu_mem:
if "all" in user_zero_stages or ZeroStageEnum.weights in user_zero_stages:
logger.info(
f"The model might be runable with ZERO 3 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_3 to the global tuning space"
)
_, _, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_3,
prev_max_mbs=max_mbs,
prev_best_mbs=mbs,
prev_best_metric_val=metric_val)
if has_mlflow:
mlflow.log_metric(f"z3{self.metric()}", next_metric_val)
else:
logger.info(
f"The model has {self.get_model_num_params()} parameters and requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory per GPU with DeepSpeed Zero stage {ZeroStageEnum.weights} optimization. Memory per GPU in system is {memory_to_string(self.gpu_mem)}. No tuning is performed."
)
return
if has_mlflow:
mlflow.end_run()
def tune_space(self, tuning_space, prev_max_mbs=0, prev_best_mbs=0, prev_best_metric_val=0):
config_zero = tuning_space.get(ZERO_OPTIMIZATION, {})
stage = config_zero.get(ZERO_OPTIMIZATION_STAGE, None)
tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage)
tuning_micro_batch_sizes = []
max_train_batch_size_per_gpu = 0
tuning_micro_batch_sizes_overwritten = False
# calculate max micro batch size using gpu memory, model instantiation memory and activation memory
# calculated_max_micro_batch_size = (memory_per_gpu - instantiation_memory) // activation_memory_micro_batch_size_1
calculated_max_micro_batch_size = int(
self.gpu_mem - self.get_instantiation_memory_required_per_gpu(stage)) // self.activation_mem
logger.info(
f"Start tuning for space {tuning_space_name}, calculated_max_micro_batch_size = {calculated_max_micro_batch_size}"
)
if calculated_max_micro_batch_size < prev_max_mbs:
logger.info(f"No need to tune Zero stage {stage}. End tuning for space {tuning_space_name}")
return 0, 0, 0
if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self.user_config and isinstance(
self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU], list):
# user-specified micro batch size per gpu is a list which overwrites the default tuning behavior
tuning_micro_batch_sizes = [
s for s in self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] if isinstance(s, int)
]
gas = self.get_gas_from_user_config()
min_micro_batch_size = min(tuning_micro_batch_sizes)
max_micro_batch_size = max(tuning_micro_batch_sizes)
max_train_batch_size_per_gpu = max_micro_batch_size * gas
tuning_micro_batch_sizes_overwritten = True
else:
# auto-detects the list of micro batch sizes to tune
min_micro_batch_size, max_micro_batch_size = self.get_min_max_micro_batch_size(
stage, prev_max_mbs, calculated_max_micro_batch_size)
if max_micro_batch_size < prev_max_mbs:
logger.info(f"No need to tune Zero stage {stage}. End tuning for space {tuning_space_name}")
return 0, 0, 0
tuning_micro_batch_sizes, max_train_batch_size_per_gpu = self.get_tuning_micro_batch_size_list(
min_micro_batch_size,
max_micro_batch_size,
num_tuning_micro_batch_sizes=self.num_tuning_micro_batch_sizes())
logger.info(
f"tuning_micro_batch_sizes = {tuning_micro_batch_sizes}, max_train_batch_size_per_gpu = {max_train_batch_size_per_gpu}"
)
# return if the tuning_micro_batch_sizes list is empty
if not tuning_micro_batch_sizes:
logger.info(f"End tuning for space {tuning_space_name}")
return 0, 0, 0
# tune micro batch sizes and gradient accumulation steps given max_train_batch_size_per_gpu
tuning_micro_batch_sizes = self.run_tuning_micro_batch_sizes(tuning_micro_batch_sizes,
max_train_batch_size_per_gpu,
min_micro_batch_size, stage,
tuning_micro_batch_sizes_overwritten)
fast_best_record = self.get_best_space_record(tuning_space_name)
fast_best_metric_val = fast_best_record[1] if fast_best_record else 0
fast_best_mbs = fast_best_record[0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] if fast_best_record else 0
logger.info(f"fast_best_mbs = {fast_best_mbs}, name = {fast_best_record[0]['name']}")
if self.fast_enabled() or stage == 0:
logger.info(f"End tuning for space: {tuning_space_name}")
return max_micro_batch_size, fast_best_mbs, fast_best_metric_val
# if the best metric or the micro batch size for that best metric in the current Zero stage after tuning micro batch size is less than the corresponding value in the previous Zero stage, return, do not tune other Zero configuration parameters
if stage > 0:
if fast_best_mbs <= prev_best_mbs or fast_best_metric_val < prev_best_metric_val:
logger.info(
f"End tuning for space: {tuning_space_name}. No need to tune other Zero configuration parameters.")
return max_micro_batch_size, fast_best_mbs, fast_best_metric_val
tuning_space[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = tuning_micro_batch_sizes
tuning_space_name = canonical_name(tuning_space,
tuning_keys=get_tuning_keys(tuning_space),
prefix="z" + str(stage) + "_",
omit_val=True)
logger.info(f'Tuning space is {tuning_space}')
logger.info(f'Tuning space name is {tuning_space_name}')
exps = self._generate_experiments(tuning_space, max_train_batch_size_per_gpu)
logger.info(f'Tuner type is {self.autotuning_config.tuner_type}')
if self.autotuning_config.tuner_type == AUTOTUNING_TUNER_MODELBASED:
t = ModelBasedTuner(exps, self.rm, self.metric(), tuning_space)
elif self.autotuning_config.tuner_type == AUTOTUNING_TUNER_RANDOM:
t = RandomTuner(exps, self.rm, self.metric())
else:
t = GridSearchTuner(exps, self.rm, self.metric())
sample_size = len(self.rm.nodes) * self.rm.num_gpus_per_node // (self.exp_num_gpus * self.exp_num_nodes)
num_exps = t.tune(sample_size=sample_size,
n_trials=self.autotuning_config.tuner_num_trials,
early_stopping=self.autotuning_config.tuner_early_stopping)
exp = t.best_exp
metric_val = t.best_metric_val
if exp:
self.update_records(tuning_space_name, exp, metric_val, num_exps)
full_best_record = self.get_best_space_record(tuning_space_name)
full_best_metric_val = full_best_record[1] if full_best_record else -1
if full_best_metric_val > fast_best_metric_val:
best_metric_val = full_best_metric_val
best_mbs = full_best_record[0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] if full_best_record else -1
else:
best_metric_val = fast_best_metric_val
best_mbs = fast_best_mbs
logger.info(f"End tuning for space: {tuning_space_name}")
return max_micro_batch_size, best_mbs, best_metric_val
def get_plateau_mbs(self, tuning_space_name):
if tuning_space_name not in self.records:
return 0
space_records = self.records[tuning_space_name]
sorted_space_records = sorted(space_records, key=lambda x: x[0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU])
prev_metric_val = None
prev_micro_batch_size = 0
for (exp, metric_val, _) in sorted_space_records:
if prev_metric_val:
if metric_val < prev_metric_val:
break
if (metric_val >= prev_metric_val
and (metric_val - prev_metric_val) / prev_metric_val < METRIC_PERCENT_DIFF_CONST):
break
prev_metric_val = metric_val
prev_micro_batch_size = exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU]
plateau_mbs = prev_micro_batch_size
return plateau_mbs
def get_model_num_params(self):
if self.model_info and "num_params" in self.model_info:
return self.model_info["num_params"]
def model_info_profile_run(self):
"""Does a model information profiling experiment that collects the number of model parameters and activation memory.\
The experiment produces a "profile_model_info" folder under self.results_dir.
Returns:
[dict]: a model information dictionary, e.g., {"num_params": 335144976, "trainable_num_params": 335144976, "activation_mem_per_gpu": 324358144, "rank": 0}
"""
logger.info("Starting model info profile run.")
model_info = self.autotuning_config.model_info
if model_info and MODEL_INFO_NUM_PARAMS in model_info:
return model_info
ds_config = copy.deepcopy(self.user_config)
replace_dict(ds_config, DEFAULT_MIN_MEM_CONFIG)
model_info_path = os.path.join(self.results_dir, "profile_model_info", "model_info.json")
ds_config[AUTOTUNING] = {"enabled": True, "model_info_path": model_info_path, "model_info": {"profile": True}}
exp_config = {}
exp_name = "profile_model_info"
exp_config['name'] = exp_name
exp_config[DS_CONFIG] = ds_config
exp_config['num_gpus'] = self.exp_num_gpus
exp_config['num_nodes'] = self.exp_num_nodes
exp_path = os.path.join(self.exps_dir, f'{exp_name}.json')
with open(exp_path, 'w', buffering=BUFSIZE) as fd:
json.dump(exp_config, fd)
fd.flush()
os.fsync(fd)
self.rm.schedule_experiments([exp_path])
self.rm.run()
for exp_id, (exp_json, err) in self.rm.finished_experiments.items():
self.rm.clear()
if err:
logger.error(f"The model is not runnable with DeepSpeed with error = {err}")
return None
if os.path.exists(model_info_path):
with open(model_info_path, 'r') as f:
model_info = hjson.load(f)
return model_info
def update_records(self, space_name, exp, metric_val, num_exps):
if space_name not in self.records:
self.records[space_name] = [(exp, metric_val, num_exps)]
else:
self.records[space_name].append((exp, metric_val, num_exps))
def get_best_space_record(self, space_name):
if space_name not in self.records:
return None
space_records = self.records[space_name]
best_space_record = None
space_num_exps = 0
for (exp, metric_val, num_exps) in space_records:
space_num_exps += num_exps
if best_space_record is None or metric_val > best_space_record[1]:
best_space_record = (exp, metric_val)
if best_space_record:
best_space_record = best_space_record + (space_num_exps, )
return best_space_record
def get_best_space_records(self):
best_space_records = {}
global_best_record = None
for space_name, space_records in self.records.items():
best_space_record = self.get_best_space_record(space_name)
if best_space_record:
best_space_records[space_name] = best_space_record
if not global_best_record or best_space_record[1] > global_best_record[1]:
global_best_record = best_space_record
if global_best_record:
best_space_records[GLOBAL_TUNING_SPACE] = global_best_record
return best_space_records
def run_tuning_micro_batch_sizes(self, tuning_micro_batch_sizes, max_train_batch_size_per_gpu,
min_micro_batch_size, stage, tuning_micro_batch_sizes_overwritten):
assert tuning_micro_batch_sizes, "the tuning micro batch size list is empty"
tuning_micro_batch_sizes.sort()
max_micro_batch_size = tuning_micro_batch_sizes[-1]
max_micro_batch_size_metric_val = 0
ds_config = get_first_config(self.user_config)
ds_config[ZERO_OPTIMIZATION] = {ZERO_OPTIMIZATION_STAGE: stage}
tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage)
exp_paths = []
for mbs in tuning_micro_batch_sizes:
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
gas = max_train_batch_size_per_gpu // mbs
ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
exp_config = {}
exp_config['name'] = exp_name
exp_config[DS_CONFIG] = ds_config
exp_config['num_gpus'] = self.exp_num_gpus
exp_config['num_nodes'] = self.exp_num_nodes
exp_path = os.path.join(self.exps_dir, f'{exp_name}.json')
with open(exp_path, 'w', buffering=BUFSIZE) as fd:
json.dump(exp_config, fd)
fd.flush()
os.fsync(fd)
exp_paths.append(exp_path)
self.rm.schedule_experiments(exp_paths)
self.rm.run()
for exp_id, (exp, err) in self.rm.finished_experiments.items():
if exp:
metric_file = exp[DS_CONFIG][AUTOTUNING][AUTOTUNING_METRIC_PATH]
if os.path.exists(metric_file):
with open(metric_file, 'r') as f:
results = hjson.load(f)
metric_val = results[self.metric()]
self.update_records(tuning_space_name, exp, metric_val, 1)
if max_micro_batch_size == exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU]:
max_micro_batch_size_metric_val = metric_val
if has_mlflow:
os.environ.pop('MLFLOW_RUN_ID')
mlflow.start_run(nested=True, run_name=exp['name'])
for metric in results:
mlflow.log_metric(metric, results[metric])
mlflow.end_run()
os.environ['MLFLOW_RUN_ID'] = self.mlflow_parent_id
else:
self.update_records(tuning_space_name, exp, 0, 1)
else:
mbs = exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU]
logger.info(f"micro batch size = {mbs} was not run successfully")
self.rm.clear()
if tuning_micro_batch_sizes_overwritten:
return tuning_micro_batch_sizes
# in a auto-detected tuning_micro_batch_sizes list, max_micro_batch_size might not be performant as the memory consumption is close to max
# try smaller values while gas stays the same
# if finding a more performant mbs value, use it to replace max_micro_batch_size in the list
min_micro_batch_size_with_same_gas = (tuning_micro_batch_sizes[-2] +
1) if len(tuning_micro_batch_sizes) > 1 else min_micro_batch_size
prev_best_metric_val = max_micro_batch_size_metric_val
prev_best_mbs = max_micro_batch_size
stride = (max_micro_batch_size - min_micro_batch_size_with_same_gas) // 3
if stride == 0:
stride = 1
for mbs in reversed(range(min_micro_batch_size_with_same_gas, max_micro_batch_size, stride)):
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
gas = max_train_batch_size_per_gpu // mbs
ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
exp, metric_val = self.run_ds_config(ds_config, exp_name)
if metric_val:
with open(metric_file, 'r') as f:
results = hjson.load(f)
metric_val = results[self.metric()]
if has_mlflow:
os.environ.pop('MLFLOW_RUN_ID')
mlflow.start_run(nested=True, run_name=exp_name)
for metric in results:
mlflow.log_metric(metric, results[metric])
mlflow.end_run()
os.environ['MLFLOW_RUN_ID'] = self.mlflow_parent_id
self.update_records(tuning_space_name, exp, metric_val, 1)
if metric_val > prev_best_metric_val * (1 + METRIC_PERCENT_DIFF_CONST):
prev_best_metric_val = metric_val
prev_best_mbs = mbs
else:
break
else:
self.update_records(tuning_space_name, exp, 0, 1)
break
if prev_best_mbs != max_micro_batch_size:
tuning_micro_batch_sizes[-1] = prev_best_mbs
return tuning_micro_batch_sizes
def get_min_max_micro_batch_size(self, stage, min_micro_batch_size, calculated_max_micro_batch_size):
# get min and max micro batch size with gradient accumulation steps = 1
if min_micro_batch_size > calculated_max_micro_batch_size:
return -1, -1
used_micro_batch_sizes = []
tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage)
ds_config = get_first_config(self.user_config)
ds_config[ZERO_OPTIMIZATION] = {ZERO_OPTIMIZATION_STAGE: stage}
gas = self.get_gas_from_user_config()
ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
# search for the min micro batch size
if min_micro_batch_size < 1:
if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self.user_config and isinstance(
self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU], int):
# user specifies train_micro_batch_size_per_gpu as an int
mbs = int(self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU])
else:
# user does not specify train_micro_batch_size_per_gpu or sets it to "auto" when using Hugging Face
val = self.get_val_from_user_args(TRAIN_MICRO_BATCH_SIZE_PER_GPU)
if val:
mbs = int(val)
else:
mbs = 1
assert mbs > 0, "The micro batch size per GPU must be greater than 0."
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
exp, metric_val = self.run_ds_config(ds_config, exp_name)
if metric_val:
self.update_records(tuning_space_name, exp, metric_val, 1)
used_micro_batch_sizes.append(mbs)
min_micro_batch_size = mbs
else:
self.update_records(tuning_space_name, exp, 0, 1)
logger.info(f"User-specified micro batch size per GPU {mbs} does not run")
if self.min_train_micro_batch_size_per_gpu() == mbs:
return -1, -1
mbs = self.min_train_micro_batch_size_per_gpu()
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
exp, metric_val = self.run_ds_config(ds_config, exp_name)
if not metric_val:
self.update_records(tuning_space_name, exp, 0, 1)
logger.info(f"min_train_micro_batch_size_per_gpu {mbs} is not runnable.")
return -1, -1
self.update_records(tuning_space_name, exp, metric_val, 1)
min_micro_batch_size = mbs
used_micro_batch_sizes.append(mbs)
else:
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = min_micro_batch_size
ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
ds_config[TRAIN_BATCH_SIZE] = min_micro_batch_size * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(min_micro_batch_size)
exp, metric_val = self.run_ds_config(ds_config, exp_name)
if metric_val:
self.update_records(tuning_space_name, exp, metric_val, 1)
used_micro_batch_sizes.append(min_micro_batch_size)
else:
self.update_records(tuning_space_name, exp, 0, 1)
return -1, -1
# search for the max micro batch size
max_micro_batch_size = min(calculated_max_micro_batch_size, self.max_train_micro_batch_size_per_gpu())
for mbs in [math.ceil(1.05 * max_micro_batch_size), max_micro_batch_size, int(0.95 * max_micro_batch_size)]:
if mbs > self.max_train_micro_batch_size_per_gpu():
continue
if mbs in used_micro_batch_sizes:
return min_micro_batch_size, mbs
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
exp, metric_val = self.run_ds_config(ds_config, exp_name)
if metric_val:
logger.info(f"mbs = {mbs} is found as max mbs")
self.update_records(tuning_space_name, exp, metric_val, 1)
used_micro_batch_sizes.append(mbs)
return min_micro_batch_size, mbs
else:
self.update_records(tuning_space_name, exp, 0, 1)
space_records = self.records[tuning_space_name] if tuning_space_name in self.records else []
if space_records:
prev_idx = min(range(len(space_records)),
key=lambda i: abs(space_records[i][0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] -
min_micro_batch_size))
prev_metric_val = space_records[prev_idx][1]
else:
prev_metric_val = None
low = min_micro_batch_size
high = max_micro_batch_size
# binary search until low is the smallest micro batch size that OOMs.
while low <= high:
mid = int((low + high) // 2)
logger.debug(f"trying mbs = {mid}, low = {low}, high = {high}")
if mid not in used_micro_batch_sizes:
ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mid
ds_config[TRAIN_BATCH_SIZE] = mid * gas * \
self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mid)
exp, metric_val = self.run_ds_config(ds_config, exp_name)
if metric_val:
low = mid + 1
self.update_records(tuning_space_name, exp, metric_val, 1)
used_micro_batch_sizes.append(mid)
if prev_metric_val and (
(metric_val - prev_metric_val) / prev_metric_val) < METRIC_PERCENT_DIFF_CONST:
logger.info(f"performance plateaus at mbs = {low}")
break
prev_metric_val = metric_val
else:
self.update_records(tuning_space_name, exp, 0, 1)
high = mid - 1
else:
low = mid + 1
max_micro_batch_size = low - 1
logger.info(f"min_micro_batch_size = {min_micro_batch_size}, max_micro_batch_size = {max_micro_batch_size}.")
return min_micro_batch_size, max_micro_batch_size
def get_gas_from_user_config(self):
gas = 1
if GRADIENT_ACCUMULATION_STEPS in self.user_config:
gas_in_config = self.user_config[GRADIENT_ACCUMULATION_STEPS]
if isinstance(gas_in_config, int):
gas = gas_in_config
elif gas_in_config == "auto": # GRADIENT_ACCUMULATION_STEPS: "auto"
val = self.get_val_from_user_args(GRADIENT_ACCUMULATION_STEPS)
if val:
gas = int(val)
elif isinstance(gas_in_config, list):
logger.info(
f"Specifying a list of {GRADIENT_ACCUMULATION_STEPS} to tune is not supported. 1 would be used.")
assert gas > 0, "Gradient accumulation steps must be positive."
return gas
def get_val_from_user_args(self, ds_name):
arg_mappings = self.autotuning_config.arg_mappings
user_args = self.args.user_args
if arg_mappings and ds_name in arg_mappings:
arg_name = arg_mappings[ds_name]
if arg_name in user_args:
idx = user_args.index(arg_name)
if user_args[idx + 1].isnumeric():
return (user_args[idx + 1])
return None
def get_tuning_micro_batch_size_list(self, min_micro_batch_size, max_micro_batch_size,
num_tuning_micro_batch_sizes):
"""Get a list of micro batch sizes to tune based on min and max values, as well as the size of the list.
Args:
min_micro_batch_size ([int]): min micro batch size per GPU
max_micro_batch_size ([int]): max micro batch size per GPU
num_tuning_micro_batch_sizes (int): the number of items in the returned list
Returns:
[list]: a list of micro batch sizes to tune.
"""
if min_micro_batch_size <= 0 or max_micro_batch_size <= 0:
logger.info(
f"min_micro_batch_size = {min_micro_batch_size}, max_micro_batch_size = {max_micro_batch_size}")
return [], 0
# NUM_GPUS=$(( ${NUM_WORKERS} * ${NUM_GPUS_PER_WORKER} ))
# DP_SIZE=$(( ${NUM_GPUS} / (${PP_SIZE} * ${MP_SIZE}) ))
# GRAD_ACC_STEPS=$(( ${TARGET_GLOBAL_BATCH_SIZE} / (${BATCH_SIZE} * ${DP_SIZE}) ))
if self.max_train_batch_size(
) and self.max_train_batch_size() > 0: # if the user specifies a max_train_batch_size
max_train_batch_size_per_gpu = self.max_train_batch_size() * self.mp_size() // (self.exp_num_gpus *
self.exp_num_nodes)
else:
gas = self.get_gas_from_user_config()
max_train_batch_size_per_gpu = max_micro_batch_size * gas // self.mp_size()
logger.info(f"max_train_batch_size_per_gpu = {max_train_batch_size_per_gpu}")
if min_micro_batch_size < max_micro_batch_size // 2:
min_micro_batch_size = max_micro_batch_size // 2
# constant stride
stride = (max_micro_batch_size - min_micro_batch_size) // num_tuning_micro_batch_sizes
if stride == 0:
stride = 1
ls = []
min_gas = max_train_batch_size_per_gpu // max_micro_batch_size
# if gas is the same as min_gas, do not add mbs to the tuning list
for mbs in range(min_micro_batch_size, max_micro_batch_size, stride):
if max_train_batch_size_per_gpu // mbs != min_gas:
ls.append(mbs)
ls.append(max_micro_batch_size)
return ls, max_train_batch_size_per_gpu
def run_ds_config(self, ds_config, exp_name):
exp_config = {}
exp_config['name'] = exp_name
exp_config[DS_CONFIG] = ds_config
exp_config['num_gpus'] = self.exp_num_gpus
exp_config['num_nodes'] = self.exp_num_nodes
exp_path = os.path.join(self.exps_dir, f'{exp_name}.json')
logger.debug(f'run_ds_config exp_name = {exp_name}')
with open(exp_path, 'w', buffering=BUFSIZE) as fd:
json.dump(exp_config, fd)
fd.flush()
os.fsync(fd)
self.rm.schedule_experiments([exp_path])
self.rm.run()
exp, metric_val = self.rm.parse_results(self.metric())
self.rm.clear()
return exp, metric_val
def write_optimal_config(self):
best_space_records = self.get_best_space_records()
if GLOBAL_TUNING_SPACE not in best_space_records:
return
best_exp, best_metric_val, _ = best_space_records[GLOBAL_TUNING_SPACE]
if best_exp:
exp_dir = best_exp["result_dir"]
cmd = None
with open(os.path.join(exp_dir, "cmd.txt"), "r") as f:
cmd = [str(i) for i in f.read().split()]
ds_config = hjson.load(open(os.path.join(exp_dir, "ds_config.json"), "r"))
ds_config.pop(AUTOTUNING)
ds_config_path = os.path.join(self.results_dir, "ds_config_optimal.json")
json.dump(ds_config, open(ds_config_path, "w"))
cmd_path = os.path.join(self.results_dir, "cmd_optimal.txt")
with open(cmd_path, "w") as fd:
fd.write(" ".join(cmd))
fd.write("\n")
fd.flush()
self.optimal_cmd = cmd
self.optimal_ds_config = ds_config
logger.info(
f"Wrote the optimal DeepSpeed configuration found by autotuning to {ds_config_path}, and the corresponding DeepSpeed command to {cmd_path}"
)
def run_after_tuning(self):
""" Launches the training with the optimal DeepSpeed configuration found through the autotuning process.
"ds_config_optimal.json" describing the optimal DeepSpeed configuration as well the command used to launch training "cmd_optimal.txt" are saved to self.results_dir.
"""
if self.optimal_cmd:
result = subprocess.Popen(self.optimal_cmd)
result.wait()
logger.info(f"Done running with the optimal DeepSpeed configuration using {self.optimal_cmd}")
else:
logger.info(f"No optimal DeepSpeed configuration found by autotuning.")
| 54,160 | 47.749775 | 512 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/autotuning/constants.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#########################################
# autotuner implementation constants
#########################################
import os
DEFAULT_TEMPLATE_PATH_ZERO_0 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
"template_zero0.json")
DEFAULT_TEMPLATE_PATH_ZERO_1 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
"template_zero1.json")
DEFAULT_TEMPLATE_PATH_ZERO_2 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
"template_zero2.json")
DEFAULT_TEMPLATE_PATH_ZERO_3 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
"template_zero3.json")
METRIC_PERCENT_DIFF_CONST = 0.05
DS_CONFIG = "ds_config"
BUFSIZE = 1 # line buffer size for writing files
#########################################
# autotuner configuration constants
#########################################
# Autotuner. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
AUTOTUNING_FORMAT = """
autotuner should be enabled as:
"session_params": {
"autotuning": {
"enabled": true,
"start_step": 5,
"end_step": 15
}
}
"""
AUTOTUNING = "autotuning"
AUTOTUNING_ENABLED = "enabled"
AUTOTUNING_ENABLED_DEFAULT = False
AUTOTUNING_FAST = "fast"
AUTOTUNING_FAST_DEFAULT = True
AUTOTUNING_RESULTS_DIR = "results_dir"
AUTOTUNING_RESULTS_DIR_DEFAULT = "autotuning_results"
AUTOTUNING_EXPS_DIR = "exps_dir"
AUTOTUNING_EXPS_DIR_DEFAULT = "autotuning_exps"
AUTOTUNING_OVERWRITE = "overwrite"
AUTOTUNING_OVERWRITE_DEFAULT = True
AUTOTUNING_START_PROFILE_STEP = "start_profile_step"
AUTOTUNING_START_PROFILE_STEP_DEFAULT = 3
AUTOTUNING_END_PROFILE_STEP = "end_profile_step"
AUTOTUNING_END_PROFILE_STEP_DEFAULT = 5
AUTOTUNING_METRIC_PATH = "metric_path"
AUTOTUNING_METRIC_PATH_DEFAULT = None
AUTOTUNING_TUNER_TYPE = "tuner_type"
AUTOTUNING_TUNER_GRIDSEARCH = "gridsearch"
AUTOTUNING_TUNER_RANDOM = "random"
AUTOTUNING_TUNER_MODELBASED = "model_based"
AUTOTUNING_TUNER_TYPE_DEFAULT = AUTOTUNING_TUNER_GRIDSEARCH
AUTOTUNING_TUNER_EARLY_STOPPING = "tuner_early_stopping"
AUTOTUNING_TUNER_EARLY_STOPPING_DEFAULT = 5
AUTOTUNING_TUNER_NUM_TRIALS = "tuner_num_trials"
AUTOTUNING_TUNER_NUM_TRIALS_DEFAULT = 50
AUTOTUNING_ARG_MAPPINGS = "arg_mappings"
AUTOTUNING_ARG_MAPPINGS_DEFAULT = None
AUTOTUNING_MAX_TRAIN_BATCH_SIZE = "max_train_batch_size"
AUTOTUNING_MAX_TRAIN_BATCH_SIZE_DEFAULT = None
AUTOTUNING_MIN_TRAIN_BATCH_SIZE = "min_train_batch_size"
AUTOTUNING_MIN_TRAIN_BATCH_SIZE_DEFAULT = 1
AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU = "max_train_micro_batch_size_per_gpu"
AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = 1024
AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU = "min_train_micro_batch_size_per_gpu"
AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = 1
AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES = "num_tuning_micro_batch_sizes"
AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES_DEFAULT = 3
AUTOTUNING_MP_SIZE = "mp_size"
AUTOTUNING_MP_SIZE_DEFAULT = 1
AUTOTUNING_METRIC = "metric"
AUTOTUNING_METRIC_LATENCY = "latency"
AUTOTUNING_METRIC_THROUGHPUT = "throughput"
AUTOTUNING_METRIC_FLOPS = "flops"
AUTOTUNING_METRIC_FORWARD = "forward"
AUTOTUNING_METRIC_BACKWRAD = "flops"
AUTOTUNING_METRIC_STEPS = "step"
AUTOTUNING_METRIC_DEFAULT = AUTOTUNING_METRIC_THROUGHPUT
#########################################
# MODEL INFO
#########################################
AUTOTUNING_MODEL_INFO_PATH = "model_info_path"
AUTOTUNING_MODEL_INFO_PATH_DEFAULT = None
MODEL_INFO_FORMAT = '''
"model_info": {
"num_params": 1000000000,
"hidden_size": 10,
"num_layers": 12,
}
'''
MODEL_INFO = "model_info"
MODEL_INFO_PROFILE = "profile"
MODEL_INFO_PROFILE_DEFAULT = False
MODEL_INFO_NUM_PARAMS = "num_params"
MODEL_INFO_NUM_PARAMS_DEFAULT = None
MODEL_INFO_HIDDEN_SIZE = "hidden_size"
MODEL_INFO_HIDDEN_SIZE_DEFAULT = None
MODEL_INFO_NUM_LAYERS = "num_layers"
MODEL_INFO_NUM_LAYERS_DEFAULT = None
MODEL_INFO_KEY_DEFAULT_DICT = {
MODEL_INFO_PROFILE: MODEL_INFO_PROFILE_DEFAULT,
MODEL_INFO_NUM_PARAMS: MODEL_INFO_NUM_PARAMS_DEFAULT,
MODEL_INFO_HIDDEN_SIZE: MODEL_INFO_HIDDEN_SIZE_DEFAULT,
MODEL_INFO_NUM_LAYERS: MODEL_INFO_NUM_LAYERS_DEFAULT
}
#########################################
# autotuner search space constants
#########################################
DEFAULT_HF_CONFIG = {
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"gradient_accumulation_steps": "auto",
}
DEFAULT_MIN_MEM_CONFIG = {
"train_micro_batch_size_per_gpu": 1,
"zero_optimization": {
"stage": 3
},
"memory_break_down": False
}
DEFAULT_TUNING_SPACE_ZERO_0 = {"zero_optimization": {"stage": 0}}
DEFAULT_TUNING_SPACE_ZERO_1 = {
"zero_optimization": {
"stage": 1,
"reduce_bucket_size": [5e7, 5e8, 1e9],
"allgather_bucket_size": [5e7, 5e8, 1e9],
}
}
DEFAULT_TUNING_SPACE_ZERO_2 = {
"zero_optimization": {
"stage": 2,
"overlap_comm": [True, False],
"reduce_scatter": [False, True],
"reduce_bucket_size": [5e7, 5e8, 1e9],
"allgather_bucket_size": [5e7, 5e8, 1e9],
"contiguous_gradients": [False, True]
},
}
DEFAULT_TUNING_SPACE_ZERO_3 = {
"zero_optimization": {
"stage": 3,
"overlap_comm": [True, False],
"reduce_scatter": [False, True],
"reduce_bucket_size": [5e7, 5e8, 1e9],
"allgather_partitions": [True, False],
"allgather_bucket_size": [5e7, 5e8, 1e9],
"contiguous_gradients": [False, True]
},
}
GLOBAL_TUNING_SPACE = 'global'
# TUNING_MICRO_BATCH_SIZE_PREFIX="tune_micro_batch_size_z"
TUNING_MICRO_BATCH_SIZE_PREFIX = "z"
| 5,943 | 30.956989 | 108 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/autotuning/utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import re
import collections.abc
import os
import json
from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU
import itertools
import copy
from ..utils import logger
def search_error(filename):
if not os.path.exists(filename):
return "stderr.log does not exist"
with open(filename) as f:
for line in f:
for s in ["Error", "error", "ERROR"]:
idx = line.find(s)
if idx != -1:
return line[idx + len(s):].lstrip(": ")
return None
def was_interruptted(filename):
if not os.path.exists(filename):
return "stderr.log does not exist"
with open(filename) as f:
for line in f:
s = "KeyboardInterrupt"
idx = line.find(s)
if idx != -1:
return True
return False
def find_replace_str(value, replace_dict):
if not isinstance(value, str):
return str(value)
matches = re.findall(r"\$[A-Za-z0-9_]+", value)
for var in matches:
var_key = var.replace("$", "").lower()
if var_key == "nvme_path":
continue
assert var_key in replace_dict, f"unknown var key: {var_key}, in {replace_dict}"
if isinstance(replace_dict[var_key], str):
value = value.replace(var, replace_dict[var_key])
else:
assert len(matches) == 1, "unable to replace multiple non-string matches"
value = replace_dict[var_key]
return value
def find_replace(target, replace_dict):
if isinstance(target, dict):
for key, value in target.items():
if isinstance(value, str):
target[key] = find_replace_str(value, replace_dict)
if isinstance(value, list):
for i in range(len(value)):
value[i] = find_replace_str(value[i], replace_dict)
if isinstance(value, dict):
find_replace(value, replace_dict)
elif isinstance(target, list):
for i in range(len(target)):
target[i] = str(find_replace_str(target[i], replace_dict))
def get_list(val):
if not isinstance(val, list):
return [val]
else:
return val
def combine_dict(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = combine_dict(d.get(k, {}), v)
else:
if k not in d:
d[k] = v
else:
if not isinstance(d[k], list):
d[k] = [d[k]]
d[k].extend(i for i in get_list(v) if i not in d[k])
return d
def del_if_exists(t, d):
"""Deletes a key from a dictionary if it exists.
Args:
t (string): target key to delete
d (dict): dictionary to delete from
"""
if t in d:
del d[t]
return
for k, v in d.items():
if isinstance(v, collections.abc.Mapping):
del_if_exists(t, v)
def replace_dict(d, u, ignored_keys=[]):
"""Replaces values in dict d with values in dict u.
Args:
d (dict): the target dict to overwrite
u (dict): the dict containing the values to overwrite the target dict
Returns:
dict d with values overwritten by the corresponding ones in dict u.
"""
if u is not None:
for k, v in u.items():
if k not in ignored_keys:
if v is None:
del_if_exists(k, d)
continue
if isinstance(v, collections.abc.Mapping):
d[k] = replace_dict(d.get(k, {}), v, ignored_keys)
else:
d[k] = v
return d
def get_val_by_key(d: dict, k):
if k in d:
return d[k]
for v in d.values():
if isinstance(v, dict):
return get_val_by_key(v, k)
return None
def set_val_by_key(d: dict, k, vv):
if k in d:
d[k] = vv
for v in d.values():
if isinstance(v, dict):
set_val_by_key(v, k, vv)
def fetch_hostfile(hostfile_path):
if not os.path.isfile(hostfile_path):
logger.warning("Unable to find hostfile, will proceed with training "
"with local resources only.")
return None
# e.g., worker-0 slots=16
with open(hostfile_path, 'r') as fd:
resource_pool = collections.OrderedDict()
for line in fd.readlines():
line = line.strip()
if line == '':
# skip empty lines
continue
try:
hostname, slots = line.split()
_, slot_count = slots.split("=")
slot_count = int(slot_count)
except ValueError as err:
logger.error("Hostfile is not formatted correctly, unable to "
"proceed with training.")
raise err
if hostname in resource_pool:
logger.error("Hostfile contains duplicate hosts, unable to "
"proceed with training.")
raise ValueError("host {} is already defined".format(hostname))
resource_pool[hostname] = slot_count
return resource_pool
def validate_ds_config(config: dict):
def is_False(config: dict, key):
if config is None:
return False
return bool(config.get(key))
config_zero = config.get("zero_optimization", {})
if not config_zero:
return True
stage = config_zero.get("stage")
offload = False
if stage == 1:
return True
elif stage == 2:
if is_False(config_zero, "cpu_offload") and is_False(config_zero, "cpu_offload_params"):
return False
elif stage == 3:
offload_devices = ["cpu", "nvme"]
if config_zero.get("offload_optimizer", {}).get("device") in offload_devices:
offload = True
if config_zero.get("offload_param", {}).get("device") in offload_devices:
offload = True
else:
return True
# HF requires that "ZeRO Offload can only work with DeepSpeed optimizers"
if offload and not config.get("optimizer"):
return False
return True
def remove_dupe_dicts(l):
""" Removes duplicate dictionaries from a list. Uses list comprehension and the json library to sort and stringify each dictionary and the set data type to ensure unique values. Works with nested data structures.
Args:
l (list): a list of (nested) data structures.
Returns:
A list of unique values.
"""
list_of_strings = [json.dumps(d, sort_keys=True) for d in l]
list_of_strings = set(list_of_strings)
return [json.loads(s) for s in list_of_strings]
def prune_config(config, ignored_keys=[]):
""" Prunes the input configurations
Args:
configs (dict): A configuration dictionary.
ignored_keys (list, optional): the keys of the sections to delete. Defaults to [].
Returns:
A configuration dictionary.
"""
if ignored_keys:
for k in ignored_keys:
def find_del_key(d: dict, k: str):
if k in d:
del d[k]
else:
for dd in d.values():
if isinstance(dd, dict):
find_del_key(dd, k)
find_del_key(config, k)
def prune_configs(configs, ignored_keys=[]):
""" Prunes the input list of configurations
Args:
configs (list): A list of configuration dictionaries.
ignored_keys (list, optional): the keys of the sections to delete. Defaults to [].
Returns:
A list of valid and unique configuration dictionaries.
"""
pruned_list = []
for config in configs:
prune_config(config, ignored_keys)
pruned_list.append(config)
return remove_dupe_dicts(pruned_list)
def get_tuning_keys(tuning_space: dict):
"""Outputs the list of tunable parameters in the tuning space dict.
Args:
tuning_space (dict): a configuration dictionary containing tunable parameters as lists of values.
Returns:
A list of strings
"""
tuning_keys = []
for key, val in tuning_space.items():
if isinstance(val, dict):
tuning_keys.extend(get_tuning_keys(val))
if isinstance(val, list) and len(val) > 1:
tuning_keys.append(key)
return tuning_keys
def get_all_configs(tuning_space: dict, ignore_keys=None):
""" Splits the tuning space dictionary to result in all combinations of values.
Args:
tuning_space (dict): the tuning space where tunable parameters are lists of values.
"""
def gen_combinations(d: dict):
keys, values = d.keys(), d.values()
for v in values:
if not isinstance(v, list):
v = [v]
values_choices = (gen_combinations(v) if isinstance(v, dict) else get_list(v) for v in values)
for comb in itertools.product(*values_choices):
yield dict(zip(keys, comb))
all_configs = []
ignored_key_vals = {}
for ik in ignore_keys:
ignored_key_vals[ik] = tuning_space.get(ik, {})
del_if_exists(ik, tuning_space)
for c in gen_combinations(tuning_space):
replace_dict(c, ignored_key_vals)
all_configs.append(c)
return all_configs
def canonical_name(config: dict, tuning_keys=None, prefix="", omit_val=False):
""" Generates a name from the acronyms of the tuning keys in the config dict. TRAIN_MICRO_BATCH_SIZE_PER_GPU is always included in the tuning keys.
Args:
config (dict): the config dict used to generate the name
tuning_keys (list, optional): the tuning keys used to generate the name. Defaults to None.
prefix (str, optional): a string added to the beginning of the name. Defaults to None.
"""
if TRAIN_MICRO_BATCH_SIZE_PER_GPU not in tuning_keys:
tuning_keys.append(TRAIN_MICRO_BATCH_SIZE_PER_GPU)
if GRADIENT_ACCUMULATION_STEPS not in tuning_keys:
tuning_keys.append(GRADIENT_ACCUMULATION_STEPS)
tuning_keys.sort()
def get_offload_name(offload_config):
cname = ""
if offload_config is None:
return "None_"
for key, val in offload_config.items():
key = "".join(map(lambda c: c[0], key.split('_')))
if (isinstance(val, int) or isinstance(val, float)) and val > 9000:
cname += key + '{:.1e}'.format(val) + "_"
else:
if isinstance(val, bool):
val = "T" if val else "F"
cname += f"{key}{val}_"
return cname
def get_name_by_keys(config: dict, tuning_keys=None, omit_val=False):
cname = ""
if not tuning_keys or config is None:
return cname
for key, val in config.items():
# skip the arg_mappings section when naming the exp file
if key == "arg_mappings":
continue
if key == "offload_param":
cname += "op_"
if not omit_val:
cname += get_offload_name(val)
continue
if key == "offload_optimizer":
cname += "oo_"
if not omit_val:
cname += get_offload_name(val)
continue
# recursively call the func to get name for the child dicts
if isinstance(val, dict):
n = get_name_by_keys(val, tuning_keys, omit_val=omit_val)
if n != "":
cname += n + "_"
if tuning_keys and key not in tuning_keys:
continue
key_str = "".join(map(lambda c: c[0], key.split('_')))
if not omit_val:
if (isinstance(val, int) or isinstance(val, float)) and val > 9000:
cname += key_str + '{:.1e}'.format(val) + "_"
else:
if isinstance(val, bool):
val = "T" if val else "F"
cname += f"{key_str}{val}_"
else:
cname += key_str + "_"
return cname[:-1]
name = get_name_by_keys(config, tuning_keys, omit_val=omit_val)
return prefix + (name if name != "" else "exp")
def get_first_config(config: dict):
if not config:
return None
cfg = copy.deepcopy(config)
for key, val in cfg.items():
if isinstance(val, dict):
if key == "optimizer": # use user defined optimizer which might have lists of values as params
cfg[key] = val
else:
cfg[key] = get_first_config(val)
if isinstance(val, list) and len(val) > 0:
cfg[key] = val[0]
return cfg
def write_experiments(exps: list, exps_dir: str):
exp_paths = []
for exp in exps:
exp_name = exp['name']
# write the expr config to a json file
exp_path = os.path.join(exps_dir, f'{exp_name}.json')
with open(exp_path, 'w') as fd:
json.dump(exp, fd)
exp_paths.append(exp_path)
return exp_paths
def memory_to_string(n, postfix="", units=None, precision=2):
if units is None:
if n // 10**12 > 0:
return str(round(n / 1024**4, precision)) + " T" + postfix
if n // 10**9 > 0:
return str(round(n / 1024**3, precision)) + " G" + postfix
elif n // 10**6 > 0:
return str(round(n / 1024**2, precision)) + " M" + postfix
elif n // 10**3 > 0:
return str(round(n / 1014, precision)) + " K" + postfix
else:
return str(n) + " "
else:
if units == "T":
return str(round(n / 1024**4, precision)) + " " + units
if units == "G" + postfix:
return str(round(n / 1024**3, precision)) + " " + units
elif units == "M" + postfix:
return str(round(n / 1024**2, precision)) + " " + units
elif units == "K" + postfix:
return str(round(n / 1024, precision)) + " " + units
else:
return str(n) + " "
def number_to_string(n, postfix="", units=None, precision=2):
if units is None:
if n // 10**9 > 0:
return str(round(n / 1000**3, precision)) + " B" + postfix
if n // 10**6 > 0:
return str(round(n / 1000**2, precision)) + " M" + postfix
elif n // 10**3 > 0:
return str(round(n / 1000**1, precision)) + " K" + postfix
else:
return str(n) + " "
else:
if units == "B" + postfix:
return str(round(n / 1000**3, precision)) + " " + units
elif units == "M" + postfix:
return str(round(n / 1000**2, precision)) + " " + units
elif units == "K" + postfix:
return str(round(n / 1000**1, precision)) + " " + units
else:
return str(n) + " "
| 15,053 | 31.726087 | 216 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/autotuning/scheduler.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import copy
from numpy import BUFSIZE
import json
import subprocess
import sys
import threading
import time
import base64
import os
import hjson
from tqdm import tqdm
from ..utils import logger
from .constants import AUTOTUNING, AUTOTUNING_METRIC_PATH
from .utils import get_val_by_key, search_error, was_interruptted
"""
thread-0: loop over experiment queue dispatching experiments if they become available
thread-N: start each experiment in its own thread
"""
from deepspeed import comm as dist
TIMEOUT = 5
class ResourceManager:
def __init__(self, args, hosts, num_gpus_per_node, results_dir, exps_dir, arg_mappings):
self.results_dir = results_dir
self.exps_dir = exps_dir
self.nodes = []
self.num_gpus_per_node = num_gpus_per_node
for host in hosts:
self.nodes.append(Node(host, num_gpus_per_node))
self.experiment_queue = []
self.running_experiments = {}
self.finished_experiments = {}
self.experiment_count = 0
self.exp_paths = set()
self.args = args
self.arg_mappings = {}
if arg_mappings is not None:
for k, v in arg_mappings.items():
k = k.strip()
v = v.strip()
if k not in self.arg_mappings:
self.arg_mappings[k] = v
def schedule_experiments(self, exp_paths):
for exp_path in exp_paths:
if exp_path in self.exp_paths:
continue
else:
self.exp_paths.add(exp_path)
with open(exp_path, "r") as fd:
exp = hjson.load(fd)
exp["exp_id"] = self.experiment_count
self.experiment_count += 1
result_dir = exp["result_dir"] = os.path.join(self.results_dir, exp['name'])
if AUTOTUNING in exp["ds_config"]:
metric_file = os.path.join(result_dir, "metrics.json")
exp["ds_config"][AUTOTUNING][AUTOTUNING_METRIC_PATH] = metric_file
stderr_file = os.path.join(result_dir, "stderr.log")
model_info_file = os.path.join(result_dir, "model_info.json")
metric_file = os.path.join(result_dir, "metrics.json")
# skip existing experiments (except for the ones that were interrupted)
if os.path.exists(result_dir) and os.path.exists(stderr_file):
if not was_interruptted(stderr_file):
err = search_error(stderr_file)
exp_id = exp["exp_id"]
self.finished_experiments[exp_id] = (exp, err)
if err or os.path.exists(metric_file) or os.path.exists(model_info_file):
logger.info(f"Skipping exp {exp['name']} whose result already exists")
continue
self.experiment_queue.append(exp)
def run_job(self, exp: dict, reservations):
exp_id = exp["exp_id"]
exp["master_port"] = self.args.master_port + exp_id
exp["result_dir"] = os.path.join(self.results_dir, exp['name'])
user_script = self.args.user_script
user_args = self.args.user_args
# overwrite the user arg in the arg_mappings
for key, val in self.arg_mappings.items():
nval = get_val_by_key(exp, key)
if nval and str(nval) != "auto":
if val in user_args:
idx = user_args.index(val)
user_args[idx + 1] = str(nval)
else:
user_args.append(val)
user_args.append(str(nval))
t = threading.Thread(target=run_experiment, args=(exp, reservations, user_script, user_args))
t.start()
self.running_experiments[exp_id] = (t, exp, reservations, time.time())
def experiment_check(self, pbar):
finished_exps = []
for exp_id, exp_data in self.running_experiments.items():
thread, exp_json, reservations, start_time = exp_data
logger.debug(f"Checking exp_id = {exp_id}, alive = {thread.is_alive()}")
thread.join(timeout=TIMEOUT)
if not thread.is_alive():
exp_dir = exp_json["result_dir"]
stderr_file = os.path.join(exp_dir, "stderr.log")
err = search_error(stderr_file)
finished_exps.append((exp_id, reservations))
self.finished_experiments[exp_id] = (exp_json, err)
duration = time.time() - start_time
logger.debug(f"Finished exp_id = {exp_id}, duration={duration:.2f} sec")
pbar.update(len(finished_exps))
for exp_id, reservations in finished_exps:
for reservation in reservations:
reservation.restore_slots()
self.running_experiments.pop(exp_id)
time.sleep(TIMEOUT)
def resource_request(self, exp):
num_gpus, num_nodes = exp['num_gpus'], exp['num_nodes']
slot_request = num_gpus
reservations = []
for node in self.nodes:
if num_nodes == 0:
break
slots = node.reserve_slots(slot_request=slot_request)
if slots:
reservations.append(Reservation(node=node, slots=slots))
num_nodes -= 1
if num_nodes == 0:
# request satisfied
return reservations
else:
# request not satisfied
for reservation in reservations:
reservation.restore_slots()
def status(self):
status = ""
for node in self.nodes:
status += f"{node.host} ({len(node.idle_slots)} idle gpus), "
return status[:-1]
def run(self):
pbar = tqdm(total=len(self.experiment_queue))
while len(self.experiment_queue) > 0:
exp = self.experiment_queue.pop(0)
logger.debug(f'Popped exp_id = {exp["exp_id"]} from the queue')
logger.debug(f'Resource status: {self.status()}')
reservations = self.resource_request(exp)
if not reservations:
logger.debug(f'Unable to schedule exp_id = {exp["exp_id"]}')
self.experiment_queue.insert(0, exp)
logger.debug(f'Put exp_id = {exp["exp_id"]} back into the queue')
self.experiment_check(pbar)
else:
desc = ""
for reservation in reservations:
reservation.slots.sort()
slots = ",".join(map(str, reservation.slots))
desc += f"{reservation.node.host}:{slots}@"
desc = desc[:-1]
logger.debug(f'Running exp_id = {exp["exp_id"]} on {desc}')
self.run_job(exp, reservations)
# All pending experiments are scheduled, waiting for them to complete
while len(self.running_experiments) > 0:
self.experiment_check(pbar)
def save_exp_results_to_database(self, message, ranks=None, path=None):
"""Print message when one of following condition meets
+ not dist.is_initialized()
+ dist.get_rank() in ranks if ranks is not None or ranks = [-1]
Args:
message (str)
ranks (list)
path (str)
"""
should_log = not dist.is_initialized()
ranks = ranks or []
my_rank = dist.get_rank() if dist.is_initialized() else -1
if ranks and not should_log:
should_log = ranks[0] == -1
should_log = should_log or (my_rank in set(ranks))
logger.debug(f"*** Should log: {should_log}")
if should_log:
message['rank'] = my_rank
with open(path, 'a') as outfile:
json.dump(message, outfile)
outfile.write('\n')
def parse_results(self, metric):
""" Parses the metric file of the finished experiments to select the optimal DeepSpeed configuration.
Args:
finished_experiments (dcit): a dictionary of experiment id and experiment description.
Returns:
The path to the result folder of the experiment with the optimal configuration.
"""
max_throughput = sys.float_info.min
best_exp_id = -1
for exp_id, (exp, err) in self.finished_experiments.items():
if err:
logger.info(
f"The experiment exp_id = {exp_id}, exp_name = {exp['name']}, did not run successfully with error = {err}, thus a metrics.txt does not exist for it. Check the stderr.log in {exp['result_dir']}"
)
continue
metric_file = exp["ds_config"][AUTOTUNING][AUTOTUNING_METRIC_PATH]
if os.path.exists(metric_file):
with open(metric_file, 'r') as f:
results = hjson.load(f)
curr_throughput = results[metric]
if curr_throughput > max_throughput:
max_throughput = curr_throughput
best_exp_id = exp_id
exp['results'] = results
if best_exp_id != -1:
best_exp, _ = self.finished_experiments[best_exp_id]
return best_exp, max_throughput
return exp, None
def clear(self):
"""Clear experiment queues, does not reset self.experiment_count
"""
self.experiment_queue = []
# clean up the running experiments
for exp_id, exp_data in self.running_experiments.items():
thread, exp_json, reservations, start_time = exp_data
clean_up(exp_json, reservations)
self.running_experiments = {}
self.finished_experiments = {}
self.exp_paths = set()
class Node:
def __init__(self, host, max_slots):
self.host = host
self.max_slots = max_slots
self.idle_slots = list(range(max_slots))
def reserve_slots(self, slot_request: int) -> list:
if len(self.idle_slots) >= slot_request:
return [self.idle_slots.pop(0) for _ in range(slot_request)]
def restore_slots(self, slots: list):
self.idle_slots += slots
class Reservation:
def __init__(self, node, slots):
self.node = node
self.slots = slots
def restore_slots(self):
self.node.restore_slots(self.slots)
def desc(self):
slots = ",".join(map(str, self.slots))
return f"{self.node.host}:{slots}@"
def get_job_id():
# Infrastructure-specific job-id
infra_job_id = None
if "DLWS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLWS_JOB_ID"]
elif "DLTS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLTS_JOB_ID"]
else:
infra_job_id = "unknown-job-id"
return infra_job_id
def get_user():
user = None
if "USER" in os.environ:
user = os.environ["USER"]
else:
user = "unknown-user"
return user
def run_experiment(exp: dict, reservations, user_script, user_args):
include_str = ""
for reservation in reservations:
reservation.slots.sort()
slots = ",".join(map(str, reservation.slots))
include_str += f"{reservation.node.host}:{slots}@"
include_str = include_str[:-1]
master_port = exp["master_port"]
exp["launcher_args"] = [
"--include",
f"{include_str}",
"--master_port",
str(master_port),
]
logger.debug(f'launcher args={exp["launcher_args"]}')
exp["user"] = get_user()
exp["job_id"] = get_job_id()
exp_dir = exp["result_dir"]
os.makedirs(exp_dir, exist_ok=True)
ds_config_path = os.path.join(exp_dir, "ds_config.json")
exp["ds_config_path"] = ds_config_path
ds_config = copy.deepcopy(exp["ds_config"])
ds_config_json = json.dumps(ds_config).encode('utf-8')
exp["ds_config_base64"] = base64.urlsafe_b64encode(ds_config_json).decode('utf-8')
with open(exp["ds_config_path"], "w", buffering=BUFSIZE) as fd:
json.dump(ds_config, fd)
fd.flush()
os.fsync(fd)
path = exp["ds_config_path"]
logger.info(f"Scheduler wrote ds_config to {path}, {os.path.abspath(path)}")
with open(os.path.join(exp_dir, "exp.json"), "w", buffering=BUFSIZE) as fd:
json.dump(exp, fd)
fd.flush()
os.fsync(fd)
path = os.path.join(exp_dir, "exp.json")
logger.info(f"Scheduler wrote exp to {path}, {os.path.abspath(path)}")
# remove "--deepspeed_config ds_config.json" from user_args
if user_args:
if "--deepspeed_config" in user_args:
idx = user_args.index("--deepspeed_config")
# "--deepspeed_config" is omitted in HF
elif "--deepspeed" in user_args:
idx = user_args.index("--deepspeed")
assert idx < len(user_args), "there is no ds_config file specified after --deepspeed_config or --deepspeed"
# user_args[idx + 1] = exp["ds_config_path"]
# pass base64 serialized ds_config to launcher
user_args[idx + 1] = exp["ds_config_base64"]
exp["user_script"] = user_script
exp["user_args"] = user_args
cmd = ["deepspeed"] + exp["launcher_args"] + [user_script] + user_args
assert len(exp["launcher_args"]) > 0, "must provide launcher args"
with open(os.path.join(exp_dir, "cmd.txt"), "w", buffering=BUFSIZE) as fd:
fd.write(" ".join(cmd))
fd.write("\n")
fd.flush()
os.fsync(fd)
logger.info(
f"Launching exp_id = {exp['exp_id']}, exp_name = {exp['name']}, with resource = {include_str}, and ds_config = {os.path.abspath(ds_config_path)}"
)
with open(os.path.join(exp_dir, "stdout.log"), "wb") as out, open(os.path.join(exp_dir, "stderr.log"),
"wb") as err:
result = subprocess.Popen(cmd, stdout=out, stderr=err)
result.wait()
out.flush()
err.flush()
os.fsync(out)
os.fsync(err)
clean_up(exp, reservations)
logger.info(f"Done running exp_id = {exp['exp_id']}, exp_name = {exp['name']}, with resource = {include_str}")
PDSH_MAX_FAN_OUT = 1024
def clean_up(exp: dict, reservations):
env = os.environ.copy()
env['PDSH_RCMD_TYPE'] = 'ssh'
nodes_str = ""
for reservation in reservations:
nodes_str += f"{reservation.node.host},"
nodes_str = nodes_str[:-1]
logger.debug(f"Cleaning up exp_id = {exp['exp_id']} on the following workers: {nodes_str}")
# PDSH flags for max node fan out and specific hosts to launch on
# See https://linux.die.net/man/1/pdsh for flag details
pdsh_cmd = ['pdsh', '-f', str(PDSH_MAX_FAN_OUT), '-w', nodes_str]
kill_cmd = [
'pkill',
'-f',
exp['name'],
]
cmd = pdsh_cmd + kill_cmd
logger.debug("cmd = {}".format(' '.join(cmd)))
result = subprocess.Popen(cmd, env=env)
result.wait()
# In case of failure must propagate the error-condition back to the caller (usually shell). The
# actual error and traceback should have been printed in the subprocess, so in order to avoid
# unnecessary noise we just quietly exit here with the same code as the subprocess
if result.returncode > 0:
sys.exit(result.returncode)
logger.info(f"Done cleaning up exp_id = {exp['exp_id']} on the following workers: {nodes_str}")
| 15,644 | 35.299304 | 213 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/autotuning/config.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.runtime.config_utils import get_scalar_param, get_dict_param, DeepSpeedConfigObject
from deepspeed.autotuning.constants import *
class DeepSpeedAutotuningConfig(DeepSpeedConfigObject):
def __init__(self, param_dict):
super(DeepSpeedAutotuningConfig, self).__init__()
self.enabled = None
self.start_step = None
self.end_step = None
self.metric_path = None
self.arg_mappings = None
self.metric = None
self.model_info = None
self.results_dir = None
self.exps_dir = None
self.overwrite = None
if param_dict and AUTOTUNING in param_dict.keys():
autotuning_dict = param_dict[AUTOTUNING]
else:
autotuning_dict = {}
self._initialize(autotuning_dict)
def _initialize(self, autotuning_dict):
self.enabled = get_scalar_param(autotuning_dict, AUTOTUNING_ENABLED, AUTOTUNING_ENABLED_DEFAULT)
self.fast = get_scalar_param(autotuning_dict, AUTOTUNING_FAST, AUTOTUNING_FAST_DEFAULT)
self.results_dir = get_scalar_param(autotuning_dict, AUTOTUNING_RESULTS_DIR, AUTOTUNING_RESULTS_DIR_DEFAULT)
assert self.results_dir, "results_dir cannot be empty"
self.exps_dir = get_scalar_param(autotuning_dict, AUTOTUNING_EXPS_DIR, AUTOTUNING_EXPS_DIR_DEFAULT)
assert self.exps_dir, "exps_dir cannot be empty"
self.overwrite = get_scalar_param(autotuning_dict, AUTOTUNING_OVERWRITE, AUTOTUNING_OVERWRITE_DEFAULT)
self.start_profile_step = get_scalar_param(autotuning_dict, AUTOTUNING_START_PROFILE_STEP,
AUTOTUNING_START_PROFILE_STEP_DEFAULT)
self.end_profile_step = get_scalar_param(autotuning_dict, AUTOTUNING_END_PROFILE_STEP,
AUTOTUNING_END_PROFILE_STEP_DEFAULT)
self.metric = get_scalar_param(autotuning_dict, AUTOTUNING_METRIC, AUTOTUNING_METRIC_DEFAULT)
self.metric_path = get_scalar_param(autotuning_dict, AUTOTUNING_METRIC_PATH, AUTOTUNING_METRIC_PATH_DEFAULT)
self.tuner_type = get_scalar_param(autotuning_dict, AUTOTUNING_TUNER_TYPE, AUTOTUNING_TUNER_TYPE_DEFAULT)
self.tuner_early_stopping = get_scalar_param(autotuning_dict, AUTOTUNING_TUNER_EARLY_STOPPING,
AUTOTUNING_TUNER_EARLY_STOPPING_DEFAULT)
self.tuner_num_trials = get_scalar_param(autotuning_dict, AUTOTUNING_TUNER_NUM_TRIALS,
AUTOTUNING_TUNER_NUM_TRIALS_DEFAULT)
self.arg_mappings = get_dict_param(autotuning_dict, AUTOTUNING_ARG_MAPPINGS, AUTOTUNING_ARG_MAPPINGS_DEFAULT)
self.model_info = get_model_info_config(autotuning_dict)
self.model_info_path = get_scalar_param(autotuning_dict, AUTOTUNING_MODEL_INFO_PATH,
AUTOTUNING_MODEL_INFO_PATH_DEFAULT)
self.mp_size = get_scalar_param(autotuning_dict, AUTOTUNING_MP_SIZE, AUTOTUNING_MP_SIZE_DEFAULT)
self.max_train_batch_size = get_dict_param(autotuning_dict, AUTOTUNING_MAX_TRAIN_BATCH_SIZE,
AUTOTUNING_MAX_TRAIN_BATCH_SIZE_DEFAULT)
self.min_train_batch_size = get_dict_param(autotuning_dict, AUTOTUNING_MIN_TRAIN_BATCH_SIZE,
AUTOTUNING_MIN_TRAIN_BATCH_SIZE_DEFAULT)
self.max_train_micro_batch_size_per_gpu = get_dict_param(
autotuning_dict, AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU,
AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT)
self.min_train_micro_batch_size_per_gpu = get_dict_param(
autotuning_dict, AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU,
AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT)
self.num_tuning_micro_batch_sizes = get_dict_param(autotuning_dict, AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES,
AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES_DEFAULT)
def get_model_info_config(param_dict):
if MODEL_INFO in param_dict and param_dict[MODEL_INFO] is not None:
model_info_config = {}
for key, default_value in MODEL_INFO_KEY_DEFAULT_DICT.items():
model_info_config[key] = get_scalar_param(param_dict[MODEL_INFO], key, default_value)
return model_info_config
return None
def get_default_model_info_config():
return MODEL_INFO_KEY_DEFAULT_DICT
| 4,633 | 45.808081 | 117 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/autotuning/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .autotuner import Autotuner
| 129 | 17.571429 | 38 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/autotuning/tuner/utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import numpy as np
import itertools
from ..utils import *
import collections.abc
def index_to_feature(p, dims):
"""convert index form (single integer) to feature form (vector)"""
feature = []
for dim in dims:
feature.append(p % dim)
p //= dim
return feature
def feature_to_index(feature, dims):
"""convert feature form (vector) to index form (single integer)"""
p = 0
for j, k in enumerate(feature):
print("j:", "k:", k, "dims", dims[:j])
p += int(np.prod(dims[:j])) * k
return p
def dict_to_dims(tuning_space):
dims = []
for key, val in tuning_space.items():
if isinstance(val, dict):
dims.extend(dict_to_dims(val))
elif isinstance(val, list):
dims.append(len(val))
else:
dims.append(1)
return dims
def gen_combinations(d: dict):
keys, values = d.keys(), d.values()
for v in values:
if not isinstance(v, list):
v = [v]
values_choices = (gen_combinations(v) if isinstance(v, dict) else get_list(v) for v in values)
for comb in itertools.product(*values_choices):
yield dict(zip(keys, comb))
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def dict_to_feature(feature_dict, keys, max_value=None):
"""Extract values from dict"""
feature = []
for key, val in feature_dict.items(): # First level
if key not in keys:
continue
if val is None or val == "auto" or key == "autotuning" or val == "":
continue
if isinstance(val, dict):
feature.append(dict_to_feature(val, max_value))
else:
feature.append(float(val))
# normalization, should not matter in tree models
if max_value is not None:
norm_feature = []
for f, mv in zip(feature, max_value):
norm_feature.append(f / mv)
feature = norm_feature
return feature
| 2,329 | 25.781609 | 98 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/autotuning/tuner/base_tuner.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import sys
from deepspeed.autotuning.constants import *
from deepspeed.autotuning.utils import write_experiments
from deepspeed.utils import logger
class BaseTuner:
def __init__(self, exps, resource_manager, metric):
self.all_exps = exps
self.rm = resource_manager
self.best_iter = 0
self.best_exp = None
self.best_metric_val = None
self.metric = metric if metric else AUTOTUNING_METRIC_DEFAULT
logger.info(f"total number of exps = {len(self.all_exps)}")
def has_next(self):
"""Whether there exists more configurations for evaluation"""
if len(self.all_exps) > 0:
return True
else:
return False
def next_batch(self, sample_size):
"""Select the next batch of configurations for evaluation"""
raise NotImplementedError
def update(self):
""""Update the tuner with what configurations have been evaluated and their performance results"""
def tune(self, sample_size=1, n_trials=1000, early_stopping=None):
i = 0
try:
while i < n_trials and self.has_next():
# Select the next batch of configuration for evaluation
sampled_exps = self.next_batch(sample_size)
# Generate experiments for measurement of performance
exp_paths = write_experiments(sampled_exps, self.rm.exps_dir)
self.rm.schedule_experiments(exp_paths)
self.rm.run()
exp, metric_val = self.rm.parse_results(self.metric)
if self.best_exp is None or self.best_metric_val is None or (metric_val
and metric_val > self.best_metric_val):
# logger.info(f"tuner finds better = {exp}")
self.best_exp = exp
self.best_metric_val = metric_val
self.best_iter = i
i += len(sampled_exps)
# Update the tuner with evaluated performance results
self.update()
self.rm.clear()
# Early stop if no more promising configurations are likely to be found
if early_stopping and i >= self.best_iter + early_stopping:
logger.info(
f"Tuner early stopped at iteration {i}. Best iteration is {self.best_iter}. Early stopping threshold is {early_stopping}"
)
break
return i
except:
logger.info("Tuner Error:", sys.exc_info()[0])
return i
| 2,754 | 36.739726 | 145 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/autotuning/tuner/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .index_based_tuner import RandomTuner, GridSearchTuner
# from .ga_tuner import GATuner
from .model_based_tuner import ModelBasedTuner
| 235 | 25.222222 | 59 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/autotuning/tuner/cost_model.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .utils import *
try:
import xgboost as xgb
except ImportError:
xgb = None
class XGBoostCostModel():
def __init__(self, loss_type, num_threads=None, log_interval=25, upper_model=None):
assert xgb is not None, "missing requirements, please install deepspeed w. 'autotuning_ml' extra."
self.loss_type = loss_type
if loss_type == "reg":
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.0,
"alpha": 0,
"objective": "reg:linear",
}
elif loss_type == "rank":
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.0,
"alpha": 0,
"objective": "rank:pairwise",
}
else:
raise RuntimeError("Invalid loss type: " + loss_type)
self.xgb_params["verbosity"] = 0
if num_threads:
self.xgb_params["nthread"] = num_threads
def fit(self, xs, ys):
x_train = np.array(xs, dtype=np.float32)
y_train = np.array(ys, dtype=np.float32)
y_max = np.max(y_train)
y_train = y_train / max(y_max, 1e-9)
index = np.random.permutation(len(x_train))
dtrain = xgb.DMatrix(x_train[index], y_train[index])
self.bst = xgb.train(self.xgb_params, dtrain)
def predict(self, xs):
features = xgb.DMatrix(xs)
return self.bst.predict(features)
| 1,820 | 26.179104 | 106 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/autotuning/tuner/index_based_tuner.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import random
from .base_tuner import BaseTuner
class RandomTuner(BaseTuner):
"""Explore the search space in random order"""
def __init__(self, exps: list, resource_manager, metric):
super().__init__(exps, resource_manager, metric)
def next_batch(self, sample_size=1):
if sample_size > len(self.all_exps):
sample_size = len(self.all_exps)
sampled_batch = random.sample(self.all_exps, sample_size)
self.all_exps = [x for x in self.all_exps if x not in sampled_batch]
return sampled_batch
class GridSearchTuner(BaseTuner):
"""Explore the search space in sequential order"""
def __init__(self, exps: list, resource_manager, metric):
super().__init__(exps, resource_manager, metric)
def next_batch(self, sample_size=1):
if sample_size > len(self.all_exps):
sample_size = len(self.all_exps)
sampled_batch = self.all_exps[0:sample_size]
self.all_exps = [x for x in self.all_exps if x not in sampled_batch]
return sampled_batch
| 1,158 | 27.268293 | 76 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/autotuning/tuner/model_based_tuner.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import hjson
from ..constants import AUTOTUNING, AUTOTUNING_METRIC_PATH
from .base_tuner import BaseTuner
from .cost_model import XGBoostCostModel
from .utils import *
from ..utils import *
import numbers
from ..constants import AUTOTUNING_METRIC_LATENCY
INIT_NUM = 2
class ModelBasedTuner(BaseTuner):
"""Exploring the search space with a cost model"""
def __init__(self, exps: list, resource_manager, metric, tuning_space):
super().__init__(exps, resource_manager, metric)
self.tuning_space = tuning_space
self.best_iter = 0
self.all_configs = [e['ds_config'] for e in exps]
self.num_all_configs = len(self.all_configs)
self.dims = dict_to_dims(self.tuning_space)
logger.info(f"Create config dim: {self.dims}, all configs: {self.num_all_configs}")
self.visited = set([])
self.trials = []
self.trial_pt = 0
init_num = min(INIT_NUM, self.num_all_configs)
for _ in range(init_num):
exp_feature = np.random.randint(self.num_all_configs)
exp_feature = 0
while exp_feature in self.visited:
exp_feature = np.random.randint(self.num_all_configs)
self.trials.append(exp_feature)
self.visited.add(exp_feature)
self.cost_model = XGBoostCostModel("rank")
self.evaluated_configs = []
self.evaluated_perf = []
self.train_ct = 0
self.random_exploration_ratio = 0.2 # do random exploration
def find_estimated_top_configs(self):
"""Use the cost model to predict the estimated performance of configurations and find the top ones for the next round of evaluation"""
configs = []
for c in self.all_configs:
flattened_ds_config = flatten(c)
feature_val = []
for k, v in flattened_ds_config.items():
if isinstance(v, numbers.Number):
feature_val.append(v)
configs.append(feature_val)
# print(configs)
# TODO the current implementation requires that all configs have the same shape.
configs = np.array(configs, dtype=np.float32)
estimates = self.cost_model.predict(configs)
n = len(estimates)
top_idx = np.argsort(estimates)
top_idx_ret = top_idx if self.metric == AUTOTUNING_METRIC_LATENCY else top_idx[::-1][:n]
# top_configs = [self.all_configs[i] for i in top_idx]
return top_idx_ret
def next_batch(self, sample_size):
sampled_batch = []
counter = 0
while counter < sample_size:
if len(self.visited) >= self.num_all_configs:
break
while self.trial_pt < len(self.trials):
logger.debug(f"trials: {self.trials}")
# Select top promising trials
index = self.trials[self.trial_pt]
if index not in self.visited:
break
self.trial_pt += 1
# To avoid over-exploitation, randomly select one that has not been explored.
rand = np.random.rand()
if rand < self.random_exploration_ratio:
# Do normal selection
feature = np.random.choice(self.trials)
while index in self.visited:
index = np.random.randint(self.num_all_configs)
# Need to track both the sampled configs and indices
sampled_batch.append(self.all_exps[index])
self.visited.add(index)
counter += 1
return sampled_batch
def has_next(self):
return len(self.visited) < self.num_all_configs
def update(self):
for exp_id, (exp, err) in self.rm.finished_experiments.items():
feature_val = []
if err:
logger.info(
f"Skipping exp_id = {exp_id}, exp_name = {exp['name']}, the experiment did not run successfully with error = {err}, thus a metrics.txt does not exist for it. Please check the stderr.log in {exp['result_dir']}"
)
ds_config = exp["ds_config"]
flattened_ds_config = flatten(ds_config)
for k, v in flattened_ds_config.items():
if isinstance(v, numbers.Number):
feature_val.append(v)
self.evaluated_configs.append(feature_val)
self.evaluated_perf.append(0.0)
continue
p = exp["ds_config"][AUTOTUNING][AUTOTUNING_METRIC_PATH]
with open(p, 'r') as f:
results = hjson.load(f)
curr_iter = results[self.metric]
logger.debug(f"parsing the results for {exp_id}, Result is {curr_iter}")
ds_config = exp["ds_config"]
flattened_ds_config = flatten(ds_config)
for k, v in flattened_ds_config.items():
if isinstance(v, numbers.Number):
feature_val.append(v)
self.evaluated_configs.append(feature_val)
self.evaluated_perf.append(curr_iter)
logger.debug(f"**Evaluated configs: {len(self.evaluated_configs)}, evaluated perf: {self.evaluated_perf}")
self.cost_model.fit(self.evaluated_configs, self.evaluated_perf)
estimated_top_configs = self.find_estimated_top_configs()
self.trials = estimated_top_configs
self.trial_pt = 0
self.train_ct += 1
| 5,612 | 34.525316 | 229 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from . import adam
from . import adagrad
from . import lamb
#from ..git_version_info_installed import installed_ops as __installed_ops__
#if __installed_ops__['sparse_attn']:
from . import sparse_attention
from . import transformer
from .transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
from ..git_version_info import compatible_ops as __compatible_ops__
| 477 | 27.117647 | 78 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/sparse_attention/softmax.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
# https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py
import torch
import triton
import triton.language as tl
def next_power_of_2(n):
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
return n
def num_warps(n):
if n < 512:
return 4
if n < 2048:
return 8
return 16
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[6] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[6] * meta['BLOCK'])})
@triton.jit
def _forward(X, scale, LUT, RPE, KP_M, ATTN_M, sizemax, stride_zx, stride_zrpe, stride_hrpe, stride_srpe, stride_zkpm,
stride_zattnm, **meta):
TN = meta['TN']
BLOCK = meta['BLOCK']
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from LUT
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# block id and column id
blockid = tl.load(LUT + offset + rbmn * 4 + 0)
columnid = tl.load(LUT + offset + rbmn * 4 + 1)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
# pointers to X
px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
x = tl.load(px, mask=check, other=-float('inf'))
x = x.to(tl.float32)
# apply scale
if meta['APPLY_SCALE']:
x = x * scale
# apply RPE
if meta['APPLY_RPE']:
prpe = RPE + pidz * stride_zrpe + headid * stride_hrpe + columnid * BLOCK + rowid * BLOCK * stride_srpe + rxm * stride_srpe + rxn
rpe = tl.load(prpe, mask=check, other=0)
x = x + rpe
# apply key-padding mask
if meta['APPLY_KP_MASK']:
pkp_m = KP_M + pidz * stride_zkpm + columnid * BLOCK + rxn
kp_m = tl.load(pkp_m, mask=check, other=-float('inf'))
if meta['KP_MASK_MUL']:
kp_m = tl.where(kp_m == 0, -float('inf'), 0.)
x = x + kp_m
# apply attention mask
if meta['APPLY_ATTN_MASK']:
pattn_m = ATTN_M + columnid * BLOCK + rowid * BLOCK * stride_zattnm + rxm * stride_zattnm + rxn
attn_m = tl.load(pattn_m, mask=check, other=-float('inf'))
if meta['ATTN_MASK_MUL']:
attn_m = tl.where(attn_m == 0, -float('inf'), 0.)
x = x + attn_m
# computation
x = tl.softmax(x)
tl.store(px, x, mask=check)
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[4] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[4]) * meta['BLOCK']})
@triton.jit
def _backward(X, scale, DX, LUT, sizemax, stride_zx, stride_zdx, **meta):
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
TN = meta['TN']
BLOCK = meta['BLOCK']
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from look-up table
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
# bounds checking on lut
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# initialize pointers to block-sparse input
blockid = tl.load(LUT + offset + rbmn * 4)
X = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
DX = DX + pidz * stride_zdx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
# compute fused softmax backward
x = tl.load(X, mask=check, other=0)
dx = tl.load(DX, mask=check, other=0)
x = x.to(tl.float32)
dx = dx.to(tl.float32)
y = x * (dx - tl.sum(x * dx, 0)) * scale
tl.store(DX, y, mask=check)
class _sparse_softmax(torch.autograd.Function):
bwd_kernels = dict()
@staticmethod
def make_lut(layout, block, device):
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
sizes = _empty.clone()
# sizes along rows
for h in range(layout.shape[0]):
sizes = torch.cat((sizes, layout[h, :, :].sum(-1)))
# offsets in block format
offsets = torch.zeros_like(sizes)
offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
# block indices
idx = torch.arange(layout.sum())
head = layout.nonzero()[:, 0]
rows = layout.nonzero()[:, 1]
columns = layout.nonzero()[:, 2]
core = torch.stack((idx, columns, rows, head), dim=1).view(-1)
# construct look-up table
offsets = offsets * 4 + 2 * sizes.numel()
header = torch.stack((sizes, offsets), dim=1).view(-1)
lut = torch.cat((header, core)).type(torch.int32).to(device)
return lut, int(sizes.max())
@staticmethod
def forward(ctx, x, scale, rpe, key_padding_mask, attn_mask, kp_mask_mode, attn_mask_mode, spdims, block, lut,
num_blocks, maxlut, bench, time):
apply_scale = False if scale == 1.0 else True
# handle None rpe
if rpe is None:
apply_rpe = False
stride_zrpe, stride_hrpe, stride_srpe = 0, 0, 0
rpe = torch.empty(0, dtype=x.dtype, device=x.device)
else:
apply_rpe = True
stride_zrpe, stride_hrpe, stride_srpe = rpe.stride(0), rpe.stride(1), rpe.stride(2)
# handle None key_padding_mask
if key_padding_mask is None:
apply_kp_mask = False
stride_zkpm = 0
key_padding_mask = torch.empty(0, dtype=x.dtype, device=x.device)
else:
apply_kp_mask = True
stride_zkpm = key_padding_mask.stride(0)
# handle None attention_mask
if attn_mask is None:
apply_attn_mask = False
stride_zattnm = 0
attn_mask = torch.empty(0, dtype=x.dtype, device=x.device)
else:
apply_attn_mask = True
stride_zattnm = attn_mask.stride(0)
# run kernel
M = x.shape[0]
meta = {
'BLOCK': block,
'APPLY_SCALE': apply_scale,
'APPLY_RPE': apply_rpe,
'APPLY_KP_MASK': apply_kp_mask,
'APPLY_ATTN_MASK': apply_attn_mask,
'KP_MASK_MUL': kp_mask_mode == 'mul',
'ATTN_MASK_MUL': attn_mask_mode == 'mul',
}
grid = lambda opt: [spdims[0] * spdims[1] * block, M]
_forward[grid](x, scale, lut, rpe, key_padding_mask, attn_mask, maxlut, x.stride(0),\
stride_zrpe, stride_hrpe, stride_srpe, stride_zkpm, stride_zattnm, **meta)
# save to context
ctx.mark_dirty(x)
ctx.save_for_backward(x, lut)
ctx.spdims = spdims
ctx.block = block
ctx.maxlut = maxlut
ctx.scale = scale
ctx.apply_scale = apply_scale
ctx.apply_rpe = apply_rpe
ctx.apply_kp_mask = apply_kp_mask
ctx.apply_attn_mask = apply_attn_mask
ctx.kp_mask_mode = kp_mask_mode
ctx.attn_mask_mode = attn_mask_mode
return x
@staticmethod
def backward(ctx, dx):
# retrieve from context
x, lut = ctx.saved_tensors
# run kernel
M = x.shape[0]
grid = lambda opt: [ctx.spdims[0] * ctx.spdims[1] * ctx.block, M]
_backward[grid](x, ctx.scale, dx, lut, ctx.maxlut, x.stride(0), dx.stride(0), BLOCK=ctx.block)
return dx, None, None, None, None, None, None, None, None, None, None, None, None, None, None
class Softmax:
"""Block-Sparse Softmax class; this class computes softmax on a block sparse matrix. It is also able to apply either/all of the following masks:
- relative position embedding
- key padding mask
- attention mask
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
"""
def sparse_softmax(*args, **kwargs):
return _sparse_softmax.apply(*args, **kwargs)
def make_lut(self, device):
"""Generates the sparsity layout used in block-sparse softmax
"""
key = (device, )
if key not in self.lut_cache:
self.lut_cache[key] = _sparse_softmax.make_lut(self.layout, self.block, device)
return self.lut_cache[key]
def __init__(self, layout, block, bench=False):
"""Initialize the Block-Sparse Softmax class.
Arguments:
layout: required: sparsity layout tensor
block: required: an integer determining the block size.
bench: optional: set if you want to do benchmarking
"""
self.num_blocks = layout.sum().item()
self.spdims = layout.shape
self.layout = layout
self.block = block
self.bench = bench
self.lut_cache = dict()
def __call__(self,
x,
scale=1.,
rpe=None,
key_padding_mask=None,
attn_mask=None,
key_padding_mask_mode='add',
attn_mask_mode='add'):
"""Applies softmax on a Block-Sparse input tensor.
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
Arguments:
x: required: a block-sparse tensor that softmax is applied on it; computation will be in place and result will be returned in the same tensor
scale: optional: a float value; x values will be multiplied by this value before normalization. Default value is 1.0.
rpe: optional: a tensor same dimension as x that is used as relative position embedding
key_padding_mask: optional: a mask tensor of size (BatchSize X SequenceLength)
attn_mask: optional: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
key_padding_mask_mode: optional: a boolean determining if key_padding_mask needs to be added or multiplied
attn_mask_mode: optional: a boolean determining if attn_mask needs to be added or multiplied
Return:
x: a block-sparse tensor contains normalized input x using softmax; and masks applied if given
"""
time_y = [None]
if rpe is not None and rpe.dtype != x.dtype:
raise ValueError('relative position embedding must be %s' % x.dtype)
if attn_mask is not None and attn_mask.dtype != x.dtype:
raise ValueError('Attention mask must be %s' % x.dtype)
if key_padding_mask is not None and key_padding_mask.dtype != x.dtype:
raise ValueError('Key padding mask must be %s' % x.dtype)
lut, maxlut = self.make_lut(x.device)
x = Softmax.sparse_softmax(x, scale, rpe, key_padding_mask, attn_mask, key_padding_mask_mode, attn_mask_mode,
self.spdims, self.block, lut, self.num_blocks, maxlut, self.bench, time_y)
self.time_y = time_y[0]
return x
| 11,322 | 37.124579 | 154 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/sparse_attention/sparse_attention_utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from torch.nn import functional as F
from deepspeed.ops.sparse_attention import BertSparseSelfAttention, SparsityConfig
'''
This file contains few utility functions to handle adapting pretrained model with sparse self-attention module.
'''
class SparseAttentionUtils:
"""This class provides some utility functions that are use integrating sparse attention into transformer models.
Such utilities include extending position embeddings, replacing current self-attention layer with sparse attention, padding sequences to multiple of block size, etc.
"""
@staticmethod
def extend_position_embedding(model, max_position):
"""This function extends the position embedding weights of a model loaded from a checkpoint.
It assumes the new max position is bigger than the original max length.
Arguments:
model: required: a transformer model
max_position: required: an integer determining new position embedding size
Return:
model: updated model; in which position embedding weights have been extended based on new size
"""
if hasattr(model, 'bert'):
original_max_position = model.bert.embeddings.position_embeddings.weight.size(0)
assert max_position > original_max_position
extend_multiples = max(1, max_position // original_max_position)
model.bert.embeddings.position_embeddings.weight.data = model.bert.embeddings.position_embeddings.weight.repeat(
extend_multiples, 1)
elif hasattr(model, 'roberta'):
# RoBERTa has positions 0 & 1 reserved, so embedding size is max position + 2
original_max_position, embed_size = model.roberta.embeddings.position_embeddings.weight.shape
original_max_position -= 2
extend_multiples = max(1, max_position // original_max_position)
assert max_position > original_max_position
max_position += 2
extended_position_embedding = model.roberta.embeddings.position_embeddings.weight.new_empty(
max_position, embed_size)
k = 2
for i in range(extend_multiples):
extended_position_embedding[k:(
k + original_max_position)] = model.roberta.embeddings.position_embeddings.weight[2:]
k += original_max_position
model.roberta.embeddings.position_embeddings.weight.data = extended_position_embedding
else:
raise ValueError(
'Please extend \"extend_position_embedding\" function to support your model type. It currently only supports \"bert\" & \"roberta\"!'
)
model.config.max_position_embeddings = max_position
print(f'Extended position embeddings to {original_max_position * extend_multiples}')
return model
@staticmethod
def update_tokenizer_model_max_length(tokenizer, max_position):
"""This function updates the position embedding length of a tokenizer to a new max position.
Arguments:
tokenizer: required: a transformer tokenizer
max_position: required: an integer determining new position embedding size
Return:
tokenizer: updated tokenizer; in which model maximum length has been extended based on new size
"""
tokenizer.model_max_length = max_position
tokenizer.init_kwargs['model_max_length'] = max_position
print(f'updated tokenizer model max imum length to {max_position}')
return tokenizer
@staticmethod
def replace_model_self_attention_with_sparse_self_attention(
model,
max_position,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=SparsityConfig(num_heads=4)):
"""This function replaces the self attention layers in model encoder with sparse self attention.
It currently supports bert and roberta model and can be easily extended to any other models following similar steps here.
For sparsityConfig, refer to the config class.
Arguments:
model: required: a transformer model
max_position: required: an integer determining new position embedding size
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class
Return:
model: updated model; in which self attention layer has been replaced with DeepSpeed Sparse Self Attention layer.
"""
if hasattr(model, 'bert'):
model.config.max_position_embeddings = max_position
model.replace_self_attention_layer_with_sparse_self_attention_layer(model.config, model.bert.encoder.layer,
sparsity_config)
elif hasattr(model, 'roberta'):
model.config.max_position_embeddings = max_position + 2
model.replace_self_attention_layer_with_sparse_self_attention_layer(model.config,
model.roberta.encoder.layer,
sparsity_config)
else:
raise ValueError(
'Please extend \"update_model_self_attention_to_sparse_self_attention\" function to support \
your model type. It currently only supports \"bert\" & \"roberta\"!')
return model
@staticmethod
def replace_self_attention_layer_with_sparse_self_attention_layer(
config,
layers,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=SparsityConfig(num_heads=4)):
"""This function replaces the self attention layers in attention layer with sparse self attention.
For sparsityConfig, refer to the config class.
Arguments:
config: required: transformer model config
layers: required: transformer model attention layers
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class
Return:
layers: updated attention layers; in which self attention layers have been replaced with DeepSpeed Sparse Self Attention layer.
"""
for layer in layers:
deepspeed_sparse_self_attn = BertSparseSelfAttention(config, sparsity_config)
deepspeed_sparse_self_attn.query = layer.attention.self.query
deepspeed_sparse_self_attn.key = layer.attention.self.key
deepspeed_sparse_self_attn.value = layer.attention.self.value
layer.attention.self = deepspeed_sparse_self_attn
return layers
@staticmethod
def pad_to_block_size(block_size, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds,
pad_token_id, model_embeddings):
"""This function pads input tokens and attention mask on sequence length dimension to be multiple of block size.
This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size.
It needs to be called in your model, such as BertModel, right before you calculate the embedding outputs.
Note)
1- instead of passing your embedding layer to this function, you can simply add this function to your model. It can be more simplified if given attention_mask and/or token_type_ids are none.
2- you need to call unpad function before returning your model output to unpad the encoder sequence output.
Arguments:
block_size: required: an integer determining the block size of sparsity config.
pad_token_id: required: an integer determining the pad token from the model config; such as bert.config.pad_token_id.
input_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary
attention_mask: a torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences.
token_type_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
position_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the indices of positions of each input sequence tokens in the position embeddings.
inputs_embeds: an optional torch.FloatTensor of shape [batch_size, sequence_length, hidden_size] that contains embedded representation and can be passed instead of input_ids directly.
model_embeddings: an optional object. If inputs_embeds are not none, this will be your model embeddings such as BertEmbeddings from your model such as BertModel. You can move this function inside your model and use self.embeddings instead of passing this parameter.
Return:
pad_len: an integer determining how much inputs have been padded to transfer sequence length dimension to multiple of block size.
input_ids: if input_ids are not none padded input_ids otherwise none.
attention_mask: if attention_mask is not none padded attention_mask otherwise none.
token_type_ids: if token_type_ids are not none padded token_type_ids otherwise none.
position_ids: if position_ids are not none padded position_ids otherwise none.
inputs_embeds: if inputs_embeds are not none padded inputs_embeds otherwise none.
"""
batch_size, seq_len = input_ids.shape if input_ids is not None else inputs_embeds.shape[:-1]
pad_len = (block_size - seq_len % block_size) % block_size
if pad_len > 0:
if inputs_embeds is not None:
pad_input_ids = inputs_embeds.new_full((batch_size, pad_len), pad_token_id, dtype=torch.long)
pad_inputs_embeds = model_embeddings(pad_input_ids)
inputs_embeds = torch.cat([inputs_embeds, pad_inputs_embeds], dim=-2)
# may not be needed as input_ids are not used if inputs_embeds are given
if input_ids is not None:
input_ids = F.pad(input_ids, (0, pad_len), value=pad_token_id)
if position_ids is not None:
# pad position_id with pad_token_id
position_ids = F.pad(position_ids, (0, pad_len), value=pad_token_id)
# pad attention mask without attention on the padding tokens
attention_mask = F.pad(attention_mask, (0, pad_len), value=False)
# pad token_type_ids with token_type_id = 0
token_type_ids = F.pad(token_type_ids, (0, pad_len), value=0)
return pad_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
@staticmethod
def unpad_sequence_output(pad_len, sequence_output):
"""This function unpads sequence output if inputs of the model were padded.
This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size.
It needs to be called in your model, such as BertModel, right before you return the model outputs.
Arguments:
pad_len: required: an integer determining how much model inputs have been padded to transfer sequence length dimension to multiple of block size.
sequence_output: required: sequence output of the encoder layer.
Return:
sequence_output: unpaded sequence output of the encoder layer.
"""
if (pad_len > 0):
sequence_output = sequence_output[:, :-pad_len]
return sequence_output
| 12,300 | 57.856459 | 335 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/sparse_attention/sparsity_config.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import random
class SparsityConfig:
"""Abstract Configuration class to store `sparsity configuration of a self attention layer`.
It contains shared property of different block-sparse sparsity patterns. However, each class needs to extend it based on required property and functionality.
"""
def __init__(self, num_heads, block=16, different_layout_per_head=False):
"""Initialize the Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
"""
self.num_heads = num_heads
self.block = block
self.different_layout_per_head = different_layout_per_head
self.num_layout_heads = num_heads if different_layout_per_head else 1
def setup_layout(self, seq_len):
"""Create layout tensor for the given sequence length
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) for sparsity layout of all head; initialized with zero
"""
if (seq_len % self.block != 0):
raise ValueError(f'Sequence Length, {seq_len}, needs to be dividable by Block size {self.block}!')
num_blocks = seq_len // self.block
# TODO Currently we allocate layout per head; needs to be updated if heads share a single layout.
layout = torch.zeros((self.num_heads, num_blocks, num_blocks), dtype=torch.int64)
return layout
def check_and_propagate_first_head_layout(self, layout):
"""If all heads require same sparsity layout, it propagate first head layout to all heads
Arguments:
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head
"""
if not self.different_layout_per_head:
layout[1:self.num_heads, :, :] = layout[0, :, :]
return layout
class DenseSparsityConfig(SparsityConfig):
"""Configuration class to store `Dense` configuration.
In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison and comprehension.
"""
def __init__(self, num_heads, block=16, different_layout_per_head=False):
"""Initialize the Dense Sparsity Pattern Config.
In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison and comprehension.
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
seq_len: required: an integer determining number of attention heads of the layer.
different_layout_per_head: optional: this is just for the sake of consistency with other sparsity formats; can ignore it for DenseSparsityConfig
"""
super().__init__(num_heads, block, different_layout_per_head)
def make_layout(self, seq_len):
"""Set 1 to all blocks of the layout meaning the pattern is dense; not sparse.
Arguments:
seq_len: required: an integer determining the underling sequence length; must be <= max sequence length
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; for dense everything is 1
"""
layout = self.setup_layout(seq_len)
layout[:, :, :] = 1
return layout
class FixedSparsityConfig(SparsityConfig):
"""Configuration class to store `Fixed` sparsity configuration.
For more details about this sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509; this has been customized.
This class extends parent class of `SparsityConfig` and customizes it for `Fixed` sparsity.
"""
def __init__(self,
num_heads,
block=16,
different_layout_per_head=False,
num_local_blocks=4,
num_global_blocks=1,
attention='bidirectional',
horizontal_global_attention=False,
num_different_global_patterns=1):
"""Initialize `Fixed` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
num_local_blocks: optional: an integer determining the number of blocks in local attention window.
num_global_blocks: optional: an integer determining how many consecutive blocks in a local window is used as the representative of the window for global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
horizontal_global_attention: optional: a boolean determining if blocks that are global representative of a local window, also attend to all other blocks. This is valid only if attention type is `bidirectional`. Looking at the attention matrix, that means global attention not only includes the vertical blocks, but also horizontal blocks.
num_different_global_patterns: optional: an integer determining number of different global attentions layouts. While global attention can be fixed by which block/s are representative of any local window, since there are multi-heads, each head can use a different global representative. For example, with 4 blocks local window and global attention size of 1 block, we can have 4 different versions in which the first, Second, third, or forth block of each local window can be global representative of that window. This parameter determines how many of such patterns we want. Of course, there is a limitation based on num_local_blocks and num_global_blocks.
"""
super().__init__(num_heads, block, different_layout_per_head)
self.num_local_blocks = num_local_blocks
if (num_local_blocks % num_global_blocks != 0):
raise ValueError(
f'Number of blocks in a local window, {num_local_blocks}, must be dividable by number of global blocks, {num_global_blocks}!'
)
self.num_global_blocks = num_global_blocks
if (attention != 'unidirectional' and attention != 'bidirectional'):
raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!')
self.attention = attention
if (attention != 'bidirectional' and horizontal_global_attention):
raise ValueError('only \"bi-directional\" attentions can support horizontal global attention!')
self.horizontal_global_attention = horizontal_global_attention
if (num_different_global_patterns > 1 and not different_layout_per_head):
raise ValueError(
f'Number of different layouts cannot be more than one when you have set a single layout for all heads! Set different_layout_per_head to True.'
)
if (num_different_global_patterns > (num_local_blocks // num_global_blocks)):
raise ValueError(
f'Number of layout versions (num_different_global_patterns), {num_different_global_patterns}, cannot be larger than number of local window blocks divided by number of global blocks, {num_local_blocks} / {num_global_blocks} = {num_local_blocks//num_global_blocks}!'
)
self.num_different_global_patterns = num_different_global_patterns
def set_local_layout(self, h, layout):
"""Sets local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local layout is set
"""
num_blocks = layout.shape[1]
for i in range(0, num_blocks, self.num_local_blocks):
end = min(i + self.num_local_blocks, num_blocks)
for row in range(i, end):
for col in range(i, (row + 1 if self.attention == 'unidirectional' else end)):
layout[h, row, col] = 1
return layout
def set_global_layout(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Currently we set global blocks starting from the last block of a local window to the first one. That means if a local window consists of 4 blocks and global attention size is one block, we use block #4 in each local window as global. If we have different layout per head, then other heads will get #3, #2, and #1. And if we have more heads (and different layout has set) than num of global attentions, multiple head may have same global attentions.
Note) if horizontal_global_attention is set, global blocks will be set both horizontally and vertically.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
first_global_block_idx = self.num_local_blocks - (
1 + h % self.num_different_global_patterns) * self.num_global_blocks
# set all global blocks except the last one if (in last local window)
end = num_blocks - (num_blocks % self.num_local_blocks)
for i in range(first_global_block_idx, end, self.num_local_blocks):
# vertical global attention
first_row = 0 if self.attention == 'bidirectional' else i
#(((i // self.num_local_blocks) + 1) * self.num_local_blocks)
#if (first_row < num_blocks):
layout[h, first_row:, i:i + self.num_global_blocks] = 1
# horizontal global attention; only in bidirectional attention
if (self.horizontal_global_attention):
layout[h, i:i + self.num_global_blocks, :] = 1
# set last global blocks; handle possible short last local window
if (end < num_blocks):
start = min(end + first_global_block_idx, num_blocks - self.num_global_blocks)
end = start + self.num_global_blocks
# vertical global attention
first_row = 0 if self.attention == 'bidirectional' else start
#(((start // self.num_local_blocks) + 1) * self.num_local_blocks)
#if (first_row < num_blocks):
layout[h, first_row:, start:end] = 1
# horizontal global attention
if (self.horizontal_global_attention):
layout[h, start:end, :] = 1
return layout
def make_layout(self, seq_len):
"""Generates `Fixed` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `Fixed` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_local_layout(h, layout)
layout = self.set_global_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class VariableSparsityConfig(SparsityConfig):
"""Configuration class to store `Variable` sparsity configuration.
This layout is an extension of FixedSparsityConfig in which:
- user can set random layout; default value is zero means no random block
- user can provide a list of local block sizes
- user can provide a list of global block indices.
For more details about `Fixed` sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509; this has been customized.
This class extends parent class of `SparsityConfig` and customizes it for `Fixed` sparsity.
"""
def __init__(self,
num_heads,
block=16,
different_layout_per_head=False,
num_random_blocks=0,
local_window_blocks=[4],
global_block_indices=[0],
global_block_end_indices=None,
attention='bidirectional',
horizontal_global_attention=False):
"""Initialize `Variable` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability. Currently this sparsity config can only assign single layout to all heads; needs to be extended for different layout per head.
num_random_blocks: optional: an integer determining the number of random blocks in each block row.
local_window_blocks: optional: a list of integers determining the number of blocks in each local attention window. It assumes first number determines # of blocks in the first local window, second the second window, ..., and the last number determines the number of blocks in the remaining local windows.
global_block_indices: optional: a list of integers determining which blocks are considered as global attention. Given indices, determine the blocks that all other token blocks attend to and they attend to all other token blocks. Default value is only index 0. Notice that if global_block_end_indices parameter is set, this parameter is used as starting index of each global window.
global_block_end_indices: optional: a list of integers determining end indices of global window blocks. By default this is not used. But if it is set, it must have the same size of global_block_indices parameter, and combining this two parameters, for each index i, blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are considered as global attention.
num_global_blocks: optional: an integer determining how many consecutive blocks in a local window is used as the representative of the window for global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
horizontal_global_attention: optional: a boolean determining if blocks that are global representative of a local window, also attend to all other blocks. This is valid only if attention type is `bidirectional`. Looking at the attention matrix, that means global attention not only includes the vertical blocks, but also horizontal blocks.
"""
super().__init__(num_heads, block, different_layout_per_head)
self.num_random_blocks = num_random_blocks
self.local_window_blocks = local_window_blocks
self.global_block_indices = global_block_indices
if (global_block_end_indices is not None):
if (len(global_block_indices) != len(global_block_end_indices)):
raise ValueError(
f'Global block start indices length, {len(global_block_indices)}, must be same as global block end indices length, {len(global_block_end_indices)}!'
)
for _, (start_idx, end_idx) in enumerate(zip(global_block_indices, global_block_end_indices)):
if start_idx >= end_idx:
raise ValueError(
f'Global block start index, {start_idx}, must be smaller than global block end index, {end_idx}!'
)
self.global_block_end_indices = global_block_end_indices
if (attention != 'unidirectional' and attention != 'bidirectional'):
raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!')
self.attention = attention
if (attention != 'bidirectional' and horizontal_global_attention):
raise ValueError('only \"bi-directional\" attentions can support horizontal global attention!')
self.horizontal_global_attention = horizontal_global_attention
def set_random_layout(self, h, layout):
"""Sets random attention layout used by the given head in the sparse attention.
Note) By default, it assumes there will be a unique random block layout for all heads; unless `different_layout_per_head` parameter is set in which each head can have a different random layout.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which random layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_random_blocks):
raise ValueError(
f'Number of random blocks, {self.num_random_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
for row in range(0, num_blocks):
rnd_cols = random.sample(range(0, num_blocks), self.num_random_blocks)
layout[h, row, rnd_cols] = 1
return layout
def set_local_layout(self, h, layout):
"""Sets local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local layout is set
"""
num_blocks = layout.shape[1]
start_block_idx = 0
end_block_idx = 0
for block_size in self.local_window_blocks:
end_block_idx += block_size
end_block_idx = min(end_block_idx, num_blocks)
for row in range(start_block_idx, end_block_idx):
for col in range(start_block_idx, (row + 1 if self.attention == 'unidirectional' else end_block_idx)):
layout[h, row, col] = 1
start_block_idx += block_size
# if there is any remaining not attended part, use the lats local window block size as local window for the remaining applicable local windows
for i in range(start_block_idx, num_blocks, block_size):
end_block_idx = min(i + block_size, num_blocks)
for row in range(i, end_block_idx):
for col in range(i, (row + 1 if self.attention == 'unidirectional' else end_block_idx)):
layout[h, row, col] = 1
return layout
def set_global_layout(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if (self.global_block_end_indices is None):
for idx in self.global_block_indices:
# if global block idx is in the range of the sequence blocks
if (idx < num_blocks):
#global rows
if (self.horizontal_global_attention):
layout[h, idx, :] = 1
#global columns
first_row = 0 if self.attention == 'bidirectional' else idx
layout[h, first_row:, idx] = 1
else:
for _, (start_idx, end_idx) in enumerate(zip(self.global_block_indices, self.global_block_end_indices)):
# if global block idx is in the range of the sequence blocks
if (start_idx < num_blocks):
end_idx = min(end_idx, num_blocks)
#global rows
if (self.horizontal_global_attention):
layout[h, start_idx:end_idx, :] = 1
#global columns
first_row = 0 if self.attention == 'bidirectional' else start_idx
layout[h, first_row:, start_idx:end_idx] = 1
return layout
def make_layout(self, seq_len):
"""Generates `Variable` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `Variable` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_random_layout(h, layout)
layout = self.set_local_layout(h, layout)
layout = self.set_global_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class BigBirdSparsityConfig(SparsityConfig):
"""Configuration class to store `BigBird` sparsity configuration.
For more details about this sparsity config, please see `Big Bird: Transformers for Longer Sequences`: https://arxiv.org/pdf/2007.14062.pdf
This class extends parent class of `SparsityConfig` and customizes it for `BigBird` sparsity.
"""
def __init__(self,
num_heads,
block=16,
different_layout_per_head=False,
num_random_blocks=1,
num_sliding_window_blocks=3,
num_global_blocks=1,
attention='bidirectional'):
"""Initialize the BigBird Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
num_random_blocks: optional: an integer determining the number of random blocks in each block row.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
num_global_blocks: optional: an integer determining how many consecutive blocks, starting from index 0, are considered as global attention. Global block tokens will be attended by all other block tokens and will attend to all other block tokens as well.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
"""
super().__init__(num_heads, block, different_layout_per_head)
self.num_random_blocks = num_random_blocks
self.num_sliding_window_blocks = num_sliding_window_blocks
self.num_global_blocks = num_global_blocks
if (attention != 'unidirectional' and attention != 'bidirectional'):
raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!')
self.attention = attention
def set_random_layout(self, h, layout):
"""Sets random attention layout used by the given head in the sparse attention.
Note) By default, it assumes there will be a unique random block layout for all heads; unless `different_layout_per_head` parameter is set in which each head can have a different random layout.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which random layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_random_blocks):
raise ValueError(
f'Number of random blocks, {self.num_random_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
for row in range(0, num_blocks):
sample_range = range(0, num_blocks) if self.attention == 'bidirectional' else range(0, row + 1)
rnd_cols = random.sample(sample_range, self.num_random_blocks)
layout[h, row, rnd_cols] = 1
return layout
def set_sliding_window_layout(self, h, layout):
"""Sets sliding local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_sliding_window_blocks):
raise ValueError(
f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
w = self.num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks)
layout[h, row, start:end] = 1
return layout
def set_global_layout_itc(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_global_blocks):
raise ValueError(
f'Number of global blocks, {self.num_global_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
#global rows
layout[h, 0:self.num_global_blocks, :] = 1
#global columns
layout[h, :, 0:self.num_global_blocks] = 1
if self.attention == 'unidirectional':
# zero out anything attending to the future
layout = torch.tril(layout)
return layout
def make_layout(self, seq_len):
"""Generates `BigBird` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BigBird` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_random_layout(h, layout)
layout = self.set_sliding_window_layout(h, layout)
layout = self.set_global_layout_itc(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class BSLongformerSparsityConfig(SparsityConfig):
"""Configuration class to store edited `Longformer` sparsity configuration.
Note) this is a block-sparse version of the Longformer which is slightly different than original Longformer; which is element-wise sparsity.
For more details about this sparsity config, please see `Longformer: The Long-Document Transformer`: https://arxiv.org/pdf/2004.05150.pdf
This class extends parent class of `SparsityConfig` and customizes it for `Longformer` sparsity.
"""
def __init__(self,
num_heads,
block=16,
different_layout_per_head=False,
num_sliding_window_blocks=3,
global_block_indices=[0],
global_block_end_indices=None,
attention='bidirectional'):
"""Initialize the edited `Longformer` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
global_block_indices: optional: a list of integers determining which blocks are considered as global attention. Given indices, determine the blocks that all other token blocks attend to and they attend to all other token blocks. Default value is only index 0. Notice that if global_block_end_indices parameter is set, this parameter is used as starting index of each global window.
global_block_end_indices: optional: a list of integers determining end indices of global window blocks. By default this is not used. But if it is set, it must have the same size of global_block_indices parameter, and combining this two parameters, for each index i, blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are considered as global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
"""
super().__init__(num_heads, block, different_layout_per_head)
self.num_sliding_window_blocks = num_sliding_window_blocks
self.global_block_indices = global_block_indices
self.attention = attention
if (global_block_end_indices is not None):
if (len(global_block_indices) != len(global_block_end_indices)):
raise ValueError(
f'Global block start indices length, {len(global_block_indices)}, must be same as global block end indices length, {len(global_block_end_indices)}!'
)
for _, (start_idx, end_idx) in enumerate(zip(global_block_indices, global_block_end_indices)):
if start_idx >= end_idx:
raise ValueError(
f'Global block start index, {start_idx}, must be smaller than global block end index, {end_idx}!'
)
self.global_block_end_indices = global_block_end_indices
def set_sliding_window_layout(self, h, layout):
"""Sets sliding local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_sliding_window_blocks):
raise ValueError(
f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
w = self.num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks)
layout[h, row, start:end] = 1
return layout
def set_global_layout(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if (self.global_block_end_indices is None):
for idx in self.global_block_indices:
# if global block idx is in the range of the sequence blocks
if (idx < num_blocks):
#global rows
layout[h, idx, :] = 1
#global columns
layout[h, :, idx] = 1
else:
for _, (start_idx, end_idx) in enumerate(zip(self.global_block_indices, self.global_block_end_indices)):
# if global block idx is in the range of the sequence blocks
if (start_idx < num_blocks):
end_idx = min(end_idx, num_blocks)
#global rows
layout[h, start_idx:end_idx, :] = 1
#global columns
layout[h, :, start_idx:end_idx] = 1
if self.attention == 'unidirectional':
layout = torch.tril(layout)
return layout
def make_layout(self, seq_len):
"""Generates edited `Longformer` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BSLongformer` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_sliding_window_layout(h, layout)
layout = self.set_global_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class LocalSlidingWindowSparsityConfig(SparsityConfig):
"""Configuration class to store `Local Sliding Window` sparsity configuration - a purely-local sliding window attention.
This class extends parent class of `SparsityConfig` and customizes it for `Local` sparsity.
"""
def __init__(self, num_heads, block=16, num_sliding_window_blocks=3, attention='unidirectional'):
"""Initialize the Local Sliding Window Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
"""
super().__init__(num_heads, block)
self.num_sliding_window_blocks = num_sliding_window_blocks
self.attention = attention
def set_sliding_window_layout(self, h, layout):
"""Sets sliding local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_sliding_window_blocks):
raise ValueError(
f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
w = self.num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks) if self.attention == "bidirectional" else row + 1
layout[h, row, start:end] = 1
return layout
def make_layout(self, seq_len):
"""Generates `Local Sliding Window` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BigBird` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_sliding_window_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
| 42,463 | 57.32967 | 668 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/sparse_attention/sparse_self_attention.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch.nn as nn
import torch
from torch import distributed as dist
from deepspeed.ops.sparse_attention import SparsityConfig
class SparseSelfAttention(nn.Module):
"""Implements an efficient Sparse Self Attention of Transformer layer based on `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
For more information please see, TODO DeepSpeed Sparse Transformer.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial.
"""
def __init__(
self,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=SparsityConfig(num_heads=4),
key_padding_mask_mode='add',
attn_mask_mode='mul',
max_seq_length=2048):
"""Initialize the sparse self attention layer.
Arguments:
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class.
key_padding_mask_mode: optional: a string determining if key padding mask needs to be added, `add`, or be multiplied, `mul`.
attn_mask_mode: optional: a string determining if attention mask needs to be added, `add`, or be multiplied, `mul`.
max_seq_length: optional: the maximum sequence length this sparse attention module will be applied to; it controls the size of the master_layout.
"""
super().__init__()
# sparsity information
self.sparsity_config = sparsity_config
# initialize sparse layout and register as buffer
master_layout = self.sparsity_config.make_layout(max_seq_length)
self.register_buffer("master_layout", master_layout)
self._need_layout_synchronization = True
# mask modes
self.key_padding_mask_mode = key_padding_mask_mode
self.attn_mask_mode = attn_mask_mode
ops = dict()
def get_layout(self, L):
# if layout is never synchronized across GPUs, broadcast the layout from global rank 0
if self._need_layout_synchronization and dist.is_initialized():
dist.broadcast(self.master_layout, src=0)
self._need_layout_synchronization = False
if (L % self.sparsity_config.block != 0):
raise ValueError(
f'Sequence Length, {L}, needs to be dividable by Block size {self.sparsity_config.block}!')
num_blocks = L // self.sparsity_config.block
return self.master_layout[..., :num_blocks, :num_blocks].cpu() # layout needs to be a CPU tensor
# add to cache
def get_ops(self, H, L):
from deepspeed.ops.sparse_attention.matmul import MatMul
from deepspeed.ops.sparse_attention.softmax import Softmax
if L not in SparseSelfAttention.ops:
sparsity_layout = self.get_layout(L)
sparse_dot_sdd_nt = MatMul(sparsity_layout, self.sparsity_config.block, 'sdd', trans_a=False, trans_b=True)
sparse_dot_dsd_nn = MatMul(sparsity_layout,
self.sparsity_config.block,
'dsd',
trans_a=False,
trans_b=False)
sparse_softmax = Softmax(sparsity_layout, self.sparsity_config.block)
SparseSelfAttention.ops[L] = (sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax)
return SparseSelfAttention.ops[L]
def transpose_key_for_scores(self, x, L):
bsz, num_heads, seq_len, head_dim = x.size()
if seq_len != L:
return x.permute(0, 1, 3, 2)
return x
def transpose_mask_for_sparse(self, qtype, x, is_key_padding_mask=False):
x = x.type(qtype)
if is_key_padding_mask:
xdim = x.dim()
for d in range(xdim - 1, 0, -1):
x = x.squeeze(dim=d)
return x
return x.squeeze()
# forward pass
def forward(self, query, key, value, rpe=None, key_padding_mask=None, attn_mask=None):
"""Applies forward phase of sparse self attention
Arguments:
query: required: query tensor
key: required: key tensor
value: required: value tensor
rpe: optional: a tensor same dimension as x that is used as relative position embedding
key_padding_mask: optional: a mask tensor of size (BatchSize X SequenceLength)
attn_mask: optional: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
key_padding_mask_mode: optional: a boolean determining if key_padding_mask needs to be added or multiplied
attn_mask_mode: optional: a boolean determining if attn_mask needs to be added or multiplied
Return:
attn_output: a dense tensor containing attention context
"""
assert query.dtype == torch.half, "sparse attention only supports training in fp16 currently, please file a github issue if you need fp32 support"
bsz, num_heads, tgt_len, head_dim = query.size()
# transpose back key if it is already transposed
key = self.transpose_key_for_scores(key, tgt_len)
# check that operation is supported
if query.shape != key.shape or key.shape != value.shape:
raise NotImplementedError('only self-attention is supported for now')
# squeeze key_padding_mask if it is given
if key_padding_mask is not None:
key_padding_mask = self.transpose_mask_for_sparse(query.dtype, key_padding_mask, is_key_padding_mask=True)
# squeeze attn_mask if it is given
if attn_mask is not None:
attn_mask = self.transpose_mask_for_sparse(query.dtype, attn_mask)
# cache look-up table computations etc
sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax = self.get_ops(num_heads, tgt_len)
scaling = float(head_dim)**-0.5
# attention scores
attn_output_weights = sparse_dot_sdd_nt(query, key)
attn_output_weights = sparse_softmax(attn_output_weights,
scale=scaling,
rpe=rpe,
key_padding_mask=key_padding_mask,
attn_mask=attn_mask,
key_padding_mask_mode=self.key_padding_mask_mode,
attn_mask_mode=self.attn_mask_mode)
# outputs
attn_output = sparse_dot_dsd_nn(attn_output_weights, value)
return attn_output
| 6,746 | 43.98 | 163 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/sparse_attention/matmul.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
# https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py
import importlib
import torch
import triton
import triton.language as tl
import triton._C.libtriton as libtriton
from deepspeed.accelerator import get_accelerator
@triton.jit
def _kernel(A, B, C, stride_za, stride_ha, stride_ma, stride_ka, stride_zb, stride_hb, stride_kb, stride_nb, stride_zc,
stride_hc, stride_mc, stride_nc, DS0, DS1, SDD_K, SDD_off_width, lut, locks, nlocks, **meta):
TM = meta['TM']
TN = meta['TN']
TK = meta['TK']
TZ = meta['TZ']
BLOCK = meta['BLOCK']
#------------#
#- Prologue -#
#------------#
pid0 = tl.program_id(0)
pid1 = tl.program_id(1)
pidz = tl.program_id(2)
if meta['SDD']:
pid1 = pid1 + SDD_off_width
blockidm = tl.arange(0, TM) // BLOCK
blockidn = tl.arange(0, TN) // BLOCK
offlutm = blockidm * (TN // BLOCK) * 4
offlutn = blockidn * 4
header = lut + pid1 * (TM // BLOCK) * (TN // BLOCK) * 4
z = tl.load(header + 0)
i = tl.load(header + 1 + offlutm)
j = tl.load(header + 2 + offlutn)
AS1 = SDD_K // TZ
lockid = tl.where(TZ > 1, 1, 0)
offka = pid0 * AS1
offkb = pid0 * AS1
offmc = 0
offnc = 0
offpa = 0
offpb = 0
maxid = TZ
offhc = 0
offha = z
offhb = z
ram = i * BLOCK + (tl.arange(0, TM) % BLOCK)
rbn = j * BLOCK + (tl.arange(0, TN) % BLOCK)
else:
header = lut + pid0 * 6
offset = tl.load(header + 0)
AS1 = tl.load(header + 1)
column = tl.load(header + 2)
depth = tl.load(header + 3)
lockid = tl.load(header + 4)
maxid = tl.load(header + 5)
pinc = lut + offset
offhc = depth
if meta['DSD']:
# output offset
offnc = pid1 * TN
offmc = column * TM
offpc = 0
# dense input offset
offnb = pid1 * TN
offkb = tl.load(pinc)
offkb = tl.multiple_of(offkb, 8) # compiler hint
offpb = 0
# sparse input offset
offma = 0
offka = 0
offpa = tl.load(pinc + 1)
offpa = tl.multiple_of(offpa, 8) # compiler hint
offpa = offpa * BLOCK * BLOCK
offha = 0
offhb = depth
else:
# output offset
offmc = pid1 * TM
offnc = column * TN
offpc = 0
# dense input offset
offma = pid1 * TM
offka = tl.load(pinc)
offka = tl.multiple_of(offka, 8) # compiler hint
offpa = 0
# sparse input offset
offnb = 0
offkb = 0
offpb = tl.load(pinc + 1)
offpb = tl.multiple_of(offpb, 8) # compiler hint
offpb = offpb * BLOCK * BLOCK
offha = depth
offhb = 0
ram = offma + tl.arange(0, TM)
rbn = offnb + tl.arange(0, TN)
# initialize a, b pointers
rka = offka + tl.arange(0, TK)
rkb = offkb + tl.arange(0, TK)
pa = A + pidz * stride_za + offha * stride_ha + offpa + ram[:, None] * stride_ma + rka[None, :] * stride_ka
pb = B + pidz * stride_zb + offhb * stride_hb + offpb + rbn[None, :] * stride_nb + rkb[:, None] * stride_kb
if meta['DDS']:
checkam = ram[:, None] < DS0
else:
checkam = AS1 > 0
if meta['DSD']:
checkbn = rbn[None, :] < DS0
else:
checkbn = AS1 > 0
a = tl.load(pa, mask=checkam, other=0.)
b = tl.load(pb, mask=checkbn, other=0.)
## ---------------- ##
## Inner Loop ##
## ---------------- ##
acc = tl.zeros((TM, TN), dtype=tl.float32)
for k in range(AS1, 0, -TK):
acc += tl.dot(a, b)
if meta['SDD']:
inc_a = TK * stride_ka
inc_b = TK * stride_kb
else:
pinc += 2
if meta['DSD']:
inc_b = tl.load(pinc)
inc_a = tl.load(pinc + 1)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = inc_b * stride_kb
if meta['DDS']:
inc_a = tl.load(pinc)
inc_b = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = inc_a * stride_ka
pa += inc_a
pb += inc_b
# pre-fetch
checkak = k > TK
checkbk = k > TK
checka = checkam & checkak
checkb = checkbn & checkbk
a = tl.load(pa, mask=checka)
b = tl.load(pb, mask=checkb)
c = acc.to(C.dtype.element_ty)
if meta['SDD']:
checkc = True
rr_blockidm = tl.arange(0, TM) // BLOCK
rr_blockidn = tl.arange(0, TN) // BLOCK
rr_offlutm = rr_blockidm * (TN // BLOCK) * 4
rr_offlutn = rr_blockidn * 4
off_bkid = 3 + rr_offlutm[:, None] + rr_offlutn[None, :]
bkid = tl.load(header + off_bkid)
offpc = bkid * BLOCK * BLOCK
rcm = tl.arange(0, TM) % BLOCK
rcn = tl.arange(0, TN) % BLOCK
else:
rcm = offmc + tl.arange(0, TM)
rcn = offnc + tl.arange(0, TN)
if meta['DSD']:
checkc = rcn[None, :] < DS0
if meta['DDS']:
checkc = rcm[:, None] < DS0
pc = C + offpc + offhc * stride_hc + pidz * stride_zc + rcm[:, None] * stride_mc + rcn[None, :] * stride_nc
# write-back directly
if lockid == 0:
tl.store(pc, c, mask=checkc)
# accumulate partial results using spin-locks
else:
plock = locks + tl.program_id(2) * nlocks * tl.num_programs(1) + tl.program_id(1) * nlocks + lockid - 1
pcount = plock + tl.num_programs(2) * tl.num_programs(1) * nlocks
while tl.atomic_cas(plock, 0, 1) == 1:
pass
count = tl.load(pcount)
if count == 0:
tl.store(pc, c, mask=checkc)
else:
d = tl.load(pc, mask=checkc)
tl.store(pc, d + c, mask=checkc)
tl.atomic_xchg(pcount, (count + 1) % maxid)
tl.atomic_xchg(plock, 0)
##############
# MAIN API #
##############
class _sparse_matmul(torch.autograd.Function):
sdd_cache = dict()
dsd_cache = dict()
dds_cache = dict()
locks = dict()
# Given an array sizes representing reduction size for each
# column of a block-mode matrix multiplication,
# performs load-balancing to achieve more smaller reductions
# between `seg_size` elements
@staticmethod
def load_balance(sizes, block):
#global triton
#if triton is None:
# triton = importlib.import_module('triton')
# segment size
# heuristics taken from OpenAI blocksparse code
# https://github.com/openai/blocksparse/blob/master/blocksparse/matmul.py#L95
max_size = sizes.max()
min_size = sizes[sizes != 0].min()
#if max_size > min_size * 2.0:
# seg_max = max(triton.cdiv(max_size, 4), min_size*2)
#else:
# seg_max = max_size
seg_max = max_size
seg_min = max(triton.cdiv(seg_max, 4), 4)
# split reduction into segments
div = sizes // seg_max
rem = sizes % seg_max
packs = div + (sizes < seg_min).long() + (rem >= seg_min).long()
width = packs.sum()
segments = torch.empty(width, dtype=sizes.dtype)
column = torch.empty_like(segments)
lockid = torch.zeros_like(segments)
maxid = torch.zeros_like(segments)
nlocks = 0
current = 0
col_idx = 0
for i in range(len(sizes)):
d, r = div[i], rem[i]
isempty = sizes[i] < seg_min
last = current + d + (r >= seg_min) + isempty
# column id
column[current:last] = col_idx
# lock id
if d > 1 or (d == 1 and r >= seg_min):
nlocks += 1
lockid[current:last] = nlocks
maxid[current:last] = last - current
# segment size
segments[current:current + d] = seg_max
if r < seg_min and not isempty:
segments[current + d - 1] += r
if r >= seg_min or isempty:
segments[current + d] = r
current = last
col_idx += 1
offsets = torch.zeros_like(segments)
offsets[1:] = torch.cumsum(segments[:-1], dim=0)
return segments, column, lockid, maxid, offsets
@staticmethod
def get_locks(size, dev):
if dev not in _sparse_matmul.locks or \
size > _sparse_matmul.locks[dev].size(0):
_sparse_matmul.locks[dev] = torch.zeros(size, dtype=torch.int32, device=dev)
return _sparse_matmul.locks[dev]
##########################
# SPARSE = DENSE x DENSE #
##########################
@staticmethod
def make_sdd_lut(layout, block, dtype, device):
#_sparse_matmul._load_utils()
#start_width = 64 // block
#segmented = _sparse_matmul.sdd_segment(layout.type(torch.int32), start_width)
start_width = (128 if block > 16 else 32) // block
layout = layout.type(torch.int32)
segmented = libtriton.superblock(layout.data_ptr(), layout.shape[0], layout.shape[1], layout.shape[2],
start_width)
luts, widths, packs = [], [], []
for size, nnz in segmented:
""" width = nnz.shape[0] // (size * size)
h = nnz[:, 0]
i = nnz[:, 1]
j = nnz[:, 2]
b = nnz[:, 3]
lut = torch.stack((h, i, j, b), dim=1).view(-1).contiguous()
luts.append(lut.type(torch.int32).to(device))
widths.append(width)
packs.append(size) """
nnz = nnz.reshape(-1, 4)
width = nnz.shape[0] // (size * size)
luts.append(torch.from_numpy(nnz).type(torch.int32).to(device))
widths.append(width)
packs.append(size)
# create locks
return luts, None, widths, packs
@staticmethod
def _sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, luts, num_locks, widths, packs, bench, time):
if trans_c:
a, b = b, a
trans_a, trans_b = not trans_b, not trans_a
AS0 = a.size(0)
# Shape check
a_dim = -2 if trans_a else -1
b_dim = -1 if trans_b else -2
a_inner, b_inner = a.shape[a_dim], b.shape[b_dim]
if a_inner != b_inner:
raise ValueError(f"Size of tensor A along the {a_dim} dim ({a_inner}) must match size "
f"of tensor B along the {b_dim} dim ({b_inner})")
if a_inner % 16 != 0:
raise ValueError('Reduction size for SDD must be a multiple of 16')
batch_size = a.size(0)
a_outer = a.size(3 if trans_a else 2)
dtype = a.dtype
is_16_multiple = a_inner % 16 == 0
is_32_multiple = a_inner % 32 == 0
is_64_multiple = a_inner % 64 == 0
if not is_16_multiple:
raise ValueError('Reduction size for SDD must be a multiple of 16')
device = a.device
# create kernel
total_width = sum([width * pack * pack for width, pack in zip(widths, packs)])
c = torch.empty((batch_size, total_width, block, block), dtype=dtype, device=a.device)
for lut, width, pack in zip(luts, widths, packs):
F32TK = [8, 16]
F16TK = [16]
F16TK += [32] if is_32_multiple else []
F16TK += [64] if is_64_multiple else []
TK = {torch.float32: F32TK, torch.float16: F16TK}[dtype]
num_lock = 1
meta = {
'TM': block * pack,
'TN': block * pack,
'BLOCK': block,
'TK': TK[0],
'TZ': 1,
'SDD': True,
'DSD': False,
'DDS': False
}
# create output
locks = _sparse_matmul.get_locks(2 * width * AS0 * num_lock, a.device)
# maximum grid size is 65535
# so operation might be decomposed into multiple
# kernel calls
max_width = 49152
total = 0 if bench else None
for off_width in range(0, width, max_width):
grid = lambda meta: [meta['TZ'], min(max_width, width - off_width), batch_size]
_kernel[grid](a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(0),
c.stride(2),
c.stride(3),
a_outer,
a_outer,
a_inner,
off_width,
lut,
locks,
num_lock,
num_warps=4,
**meta)
# save for backward pass
return c
##########################
# DENSE = DENSE x SPARSE #
##########################
# Given a binary layout of 0s and 1s,
# Construct look-up table for efficient execution on GPUs
@staticmethod
def make_dxx_lut(layout, block, step, trans, device, transform=lambda idx: idx):
# load-balancing
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
segments = _empty.clone()
column = _empty.clone()
depth = _empty.clone()
lockid = _empty.clone()
maxid = _empty.clone()
offsets = _empty.clone()
current_offset = 0
current_maxid = 0
for z in range(layout.size(0)):
if trans:
sizes = torch.sum(layout[z, :, :], 1)
else:
sizes = torch.sum(layout[z, :, :], 0)
z_segments, z_column, z_lockid, z_maxid, z_offsets = _sparse_matmul.load_balance(sizes, block)
z_depth = z * torch.ones_like(z_segments)
z_lockid[z_lockid > 0] += current_maxid
current_maxid = z_lockid.max()
# concatenate depth
segments = torch.cat((segments, z_segments))
column = torch.cat((column, z_column))
depth = torch.cat((depth, z_depth))
maxid = torch.cat((maxid, z_maxid))
offsets = torch.cat((offsets, current_offset + z_offsets))
lockid = torch.cat((lockid, z_lockid))
current_offset += layout[z, :, :].sum()
segments *= step
# pointer increments
if trans:
nnz = layout.nonzero()
else:
nnz = layout.transpose(1, 2).nonzero()
num_blocks = nnz.size(0)
offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets))
idx = transform(nnz[:, 2] * block)
xincs = idx.clone()
xincs[1:] -= idx[:-1]
# divide block into multiple steps
div = block // step
xincs = xincs.view(-1, 1).repeat(1, div)
xincs[:, 1:] = step
xincs[:, 0] -= (div - 1) * step
# first increment for each reduction is actually the offset
xincs[offsets[segments > 0], 0] = idx[offsets[segments > 0]]
xincs = xincs.view(-1)
# block-mode input increments
if trans:
widx = torch.arange(num_blocks)
else:
widx = _empty.clone()
current_offset = 0
for z in range(layout.size(0)):
layoutw = layout[z, :, :].clone()
msum = layoutw.sum()
layoutw[layoutw > 0] = 1 + torch.arange(msum)
widx = torch.cat((widx, current_offset + layoutw.T[layoutw.T > 0] - 1))
current_offset += msum
widx = widx
wincs = widx * block * block
wincs[1:] -= widx[:-1] * block * block
wincs = wincs.view(-1, 1).repeat(1, div)
if trans:
wincs[:, 1:] = step
wincs[:, 0] -= (div - 1) * step
else:
wincs[:, 1:] = step * block
wincs[:, 0] -= (div - 1) * step * block
wincs[offsets[segments > 0], 0] = widx[offsets[segments > 0]]
wincs = wincs.view(-1)
# adjust offset and segment size
offsets *= 2 * div
segments *= div
# create header
width = column.size(0)
offsets += 6 * width
header = torch.stack((offsets, segments, column, depth, lockid, maxid), dim=1).view(-1).contiguous()
incs = torch.stack((xincs, wincs), dim=1).view(-1).contiguous()
incs = torch.cat((incs, torch.zeros(2, device=incs.device, dtype=incs.dtype)))
# create lut
lut = torch.cat((header, incs))
lut = lut.type(torch.int32).to(device)
# create locks
num_locks = max(1, lockid.max())
return lut, num_locks, width, None
@staticmethod
def _dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs, bench, time):
global triton
if triton is None:
triton = importlib.import_module('triton')
# shapes / dtypes
AS0 = a.size(0)
AS1 = a.size(1)
AS2 = a.size(3 if trans_a else 2)
AS3 = a.size(2 if trans_a else 3)
BS0 = spdims[0]
BS1 = block * spdims[2 if trans_b else 1]
BS2 = block * spdims[1 if trans_b else 2]
dtype = a.dtype
# kernel
meta = {'TN': block, 'TM': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1, 'SDD': False, 'DSD': False, 'DDS': True}
# output
CS0 = AS0
CS1 = AS1
CS2 = BS2 if trans_c else AS2
CS3 = AS2 if trans_c else BS2
locks = _sparse_matmul.get_locks(2 * AS0 * AS2 // 32 * num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
grid = lambda meta: [width, triton.cdiv(AS2, meta['TM']), AS0]
_kernel[grid](a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(1),
c.stride(3 if trans_c else 2),
c.stride(2 if trans_c else 3),
AS2,
BS2,
0,
0,
lut,
locks,
num_locks,
num_warps=4,
**meta)
return c
@staticmethod
def _dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs, bench, time):
global triton
if triton is None:
triton = importlib.import_module('triton')
# shapes / dtypes
AS0 = spdims[0]
AS1 = block * spdims[2 if trans_a else 1]
AS2 = block * spdims[1 if trans_a else 2]
BS0 = b.size(0)
BS1 = b.size(1)
BS2 = b.size(3 if trans_b else 2)
BS3 = b.size(2 if trans_b else 3)
dtype = a.dtype
# kernel
meta = {'TM': block, 'TN': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1, 'SDD': False, 'DSD': True, 'DDS': False}
# output
CS0 = BS0
CS1 = BS1
CS2 = BS3 if trans_c else AS1
CS3 = AS1 if trans_c else BS3
locks = _sparse_matmul.get_locks(2 * BS0 * BS3 // 32 * num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
grid = lambda meta: [width, triton.cdiv(BS3, meta['TN']), BS0]
_kernel[grid](a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(1),
c.stride(2),
c.stride(3),
BS3,
AS1,
0,
0,
lut,
locks,
num_locks,
num_warps=4,
**meta)
return c
fn = {'sdd': _sdd_matmul.__get__(object), 'dsd': _dsd_matmul.__get__(object), 'dds': _dds_matmul.__get__(object)}
@staticmethod
def forward(ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, c_lut, c_num_locks, c_width, c_packs,
c_bench, c_time, da_lut, da_num_locks, da_width, da_packs, da_bench, da_time, db_lut, db_num_locks,
db_width, db_packs, db_bench, db_time):
c = _sparse_matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_num_locks, c_width,
c_packs, c_bench, c_time)
# save for backward
ctx.save_for_backward(a, b)
ctx.da_num_locks = da_num_locks
ctx.da_lut = da_lut
ctx.da_width = da_width
ctx.da_packs = da_packs
ctx.da_bench = da_bench
ctx.da_time = da_time
ctx.db_lut = db_lut
ctx.db_num_locks = db_num_locks
ctx.db_width = db_width
ctx.db_bench = db_bench
ctx.db_packs = db_packs
ctx.db_time = db_time
ctx.mode = mode
ctx.spdims = spdims
ctx.block = block
ctx.trans_a = trans_a
ctx.trans_b = trans_b
return c
@staticmethod
def backward(ctx, dc):
# saved for backward
a, b = ctx.saved_tensors
mode = ctx.mode
# gradients w.r.t. a
if ctx.needs_input_grad[0]:
mode_da = mode[1] + mode[0] + mode[2]
da = _sparse_matmul.fn[mode_da](dc, b, False, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block,
ctx.da_lut, ctx.da_num_locks, ctx.da_width, ctx.da_packs, ctx.da_bench,
ctx.da_time)
# gradients w.r.t. b
if ctx.needs_input_grad[1]:
mode_db = mode[2] + mode[1] + mode[0]
db = _sparse_matmul.fn[mode_db](a, dc, not ctx.trans_a, False, ctx.trans_b, ctx.spdims, ctx.block,
ctx.db_lut, ctx.db_num_locks, ctx.db_width, ctx.db_packs, ctx.db_bench,
ctx.db_time)
return da, db, None, None, None,\
None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None
class MatMul:
"""Block-Sparse MatMul class; this class handles three types of matrix-multiplication:
- sparse = dense X dense
- dense = sparse X dense
- dense = dense X sparse
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
"""
def make_lut(self, dtype, device):
"""Generates the sparsity layout/s used in block-sparse matmul
"""
key = (dtype, device)
if key in self.lut_cache:
return self.lut_cache[key]
# C look-up table
layout, block = self.layout, self.block
step = 16
if self.mode == 'sdd':
c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
elif self.mode == 'dsd':
c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_dxx_lut(layout, block, step, not self.trans_a,
device)
elif self.mode == 'dds':
c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_dxx_lut(layout, block, step, self.trans_b,
device)
# DA look-up table
if self.mode == 'sdd':
da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_dxx_lut(layout, block, step, True, device)
elif self.mode == 'dsd':
da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
elif self.mode == 'dds':
da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_dxx_lut(layout, block, step,
not self.trans_b, device)
# DB look-up table
if self.mode == 'sdd':
db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_dxx_lut(layout, block, step, False, device)
elif self.mode == 'dsd':
db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_dxx_lut(layout, block, step, self.trans_a,
device)
elif self.mode == 'dds':
db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
self.lut_cache[key] = (c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs)
return self.lut_cache[key]
def __init__(self, layout, block, mode, trans_a=False, trans_b=False, bench=False):
"""Initialize the Block-Sparse MatMul class.
Arguments:
layout: required: sparsity layout tensor
block: required: an integer determining the block size.
mode: required: a string determining type of matmul; ('sdd') sparse = dense X dense, ('dsd') dense = sparse X dense, ('dds') dense = dense X sparse
trans_a: optional: a boolean determining if multiplication needs to be applied on transpose of input a; default is false
trans_b: optional: a boolean determining if multiplication needs to be applied on transpose of input b; default is false
bench: optional: set if you want to do benchmarking
"""
if mode not in ['sdd', 'dsd', 'dds']:
raise NotImplementedError('Supported modes are: sdd, dsd, dds')
# look-up table cache
self.lut_cache = dict()
# attributes
self.trans_a = trans_a
self.trans_b = trans_b
self.mode = mode
self.block = block
self.layout = layout
layout_dim = layout.ndim
assert layout_dim in (2, 3), "Layout should be a 2 or 3 dimensional tensor of 0s and 1s"
if not mode == 'sdd':
# Dims to be reduced on the 'inside' of the matmul, either -1 or -2
trans_dense, trans_sparse, sparse_inner = (trans_b, trans_a, -1) if mode == 'dsd' else (trans_a, trans_b,
-2)
self.dense_inner_dim = -((sparse_inner % 2) + 1) if not trans_dense else sparse_inner
sparse_inner = sparse_inner if not trans_sparse else -((sparse_inner % 2) + 1)
# Inner dim of the dense input should be equal to the inner dim of the sparse input
self.dense_inner_size = layout.shape[sparse_inner] * block
# Expected shape for sparse inputs
self.sparse_shape = (layout.sum().item(), block, block)
# Support using the same layout across attention heads etc.
if layout_dim == 2:
layout = layout.unsqueeze(0)
layout = layout.long() # Above code assumes the layout tensor is an integral type
self.spdims = layout.shape
# timings
self.bench = bench
self.time_c = None
self.time_da = None
self.time_db = None
# pad shapes of a tensor to make it
# compatible with kernel calls
@staticmethod
def _pad_shape(x, is_sparse):
max_dim = 3 if is_sparse else 4
for i in range(max_dim - x.dim()):
x = x.unsqueeze(0)
return x
def __call__(self, a, b):
"""Applies Block-Sparse MatMul.
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
Arguments:
a: required: a dense/block-sparse tensor; first input of mat-mul
b: required: a dense/block-sparse tensor; second input of mat-mul
Return:
c: a dense/block-sparse tensor result of a X b
"""
c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs = self.make_lut(a.dtype, a.device)
# timings
time_c = [None]
time_da = [None]
time_db = [None]
original_dims = max(a.ndim, b.ndim)
a, b = self._validate_inputs(a, b)
# pad shapes with ones
a = MatMul._pad_shape(a, self.mode == 'dsd')
b = MatMul._pad_shape(b, self.mode == 'dds')
# execute
c = _sparse_matmul.apply(a, b, self.trans_a, self.trans_b, False, self.mode, self.spdims, self.block, c_lut,
c_num_locks, c_width, c_packs, self.bench, time_c, da_lut, da_num_locks, da_width,
da_packs, self.bench, time_da, db_lut, db_num_locks, db_width, db_packs, self.bench,
time_db)
# This removes any leading singleton dimensions we may have added to the tensor that weren't in the input
dims_to_trim = c.ndim - original_dims
for _ in range(dims_to_trim):
c = c.squeeze(0)
self.time_c = time_c[0]
self.time_da = time_da[0]
self.time_db = time_db[0]
return c
def _validate_inputs(self, a, b):
if a.device != b.device:
raise ValueError(f"Inputs must be on the same device; got {a.device} for tensor A "
f"and {b.device} for tensor B")
if not get_accelerator().on_accelerator(a):
raise ValueError("Only GPU devices are supported for now")
# When autocast is enabled, torch.matmul autocasts to float16, so we do the same here
if torch.is_autocast_enabled():
a, b = a.half(), b.half()
elif a.dtype != b.dtype:
raise ValueError(f"Inputs must be the same dtype; got {a.dtype} for A and {b.dtype} for B")
mode, trans_a, trans_b = self.mode, self.trans_a, self.trans_b
if mode != 'sdd':
# One input is sparse
dense, dense_name, sparse, sparse_name = (a, 'A', b, 'B') if mode == 'dds' else (b, 'B', a, 'A')
dense_inner = dense.shape[self.dense_inner_dim]
if dense_inner != self.dense_inner_size:
raise ValueError(f"Expected tensor {dense_name} to have size {self.dense_inner_size} at dim "
f"{self.dense_inner_dim % dense.ndim}, got {dense_inner}.")
if sparse.shape[-len(self.sparse_shape):] != self.sparse_shape:
raise ValueError(f"Expected tensor with trailing dimensions of shape {self.sparse_shape} for argument "
f"{sparse_name}, got {sparse.shape}")
def add_extra_dims(x):
# Add extra leading singleton dimensions if needed
dims_needed = 4 - x.ndim
if dims_needed > 0:
singletons = [1] * dims_needed
x = x.view(*singletons, *x.shape)
elif dims_needed < 0:
raise ValueError("Tensors with more than 4 dimensions are not currently supported")
return x
# Pad shapes with leading singleton dimensions
a = add_extra_dims(a)
b = add_extra_dims(b)
return a, b
| 32,948 | 39.181707 | 160 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/sparse_attention/bert_sparse_self_attention.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from torch import nn
from deepspeed.ops.sparse_attention import SparseSelfAttention, FixedSparsityConfig
class BertSparseSelfAttention(nn.Module):
"""Implements Sparse Self Attention layer of Bert model based on https://github.com/microsoft/DeepSpeedExamples/blob/master/bing_bert/nvidia/modelingpreln.py#L373
For more information please see, TODO DeepSpeed Sparse Transformer.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial.
"""
def __init__(
self,
config,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=FixedSparsityConfig(num_heads=4)):
"""Initialize the bert sparse self attention layer.
Note) you can use any of the provided sparsity configs or simply add yours!
Arguments:
config: required: Bert model config
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on FixedSparsityConfig class.
"""
super(BertSparseSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError("The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.sparse_self_attention = SparseSelfAttention(sparsity_config)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
"""Applies forward phase of bert sparse self attention
Arguments:
hidden_states: required: hidden_states tensor of the bert model
attn_mask: required: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
Return:
context_layer: a dense tensor containing attention context
"""
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
context_layer = self.sparse_self_attention(query_layer,
key_layer,
value_layer,
key_padding_mask=attention_mask)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size, )
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
| 3,463 | 43.410256 | 166 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/sparse_attention/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .sparsity_config import SparsityConfig, DenseSparsityConfig, FixedSparsityConfig, VariableSparsityConfig, BigBirdSparsityConfig, BSLongformerSparsityConfig, LocalSlidingWindowSparsityConfig
from .sparse_self_attention import SparseSelfAttention
from .bert_sparse_self_attention import BertSparseSelfAttention
from .sparse_attention_utils import SparseAttentionUtils
| 467 | 45.8 | 194 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/sparse_attention/trsrc/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import sys
import os
def _build_file_index(directory, suffix='.tr'):
"""Build an index of source files and their basenames in a given directory.
Args:
directory (string): the directory to index
suffix (string): index files with this suffix
Returns:
list: A list of tuples of the form [(basename, absolute path), ...]
"""
index = []
for fname in os.listdir(directory):
if fname.endswith(suffix):
basename = fname[:fname.rfind(suffix)] # strip the suffix
path = os.path.join(directory, fname)
index.append((basename, path))
return index
# Go over all local source files and parse them as strings
_module = sys.modules[_build_file_index.__module__]
_directory = os.path.dirname(os.path.realpath(__file__))
for name, fname in _build_file_index(_directory):
with open(fname, 'r') as fin:
setattr(_module, name, fin.read())
| 1,032 | 26.184211 | 79 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/aio/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from ..op_builder import AsyncIOBuilder
| 136 | 18.571429 | 39 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/adam/cpu_adam.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from cpuinfo import get_cpu_info
from deepspeed.utils import logger
from deepspeed.utils.logging import should_log_le
from deepspeed.ops.op_builder import CPUAdamBuilder
class DeepSpeedCPUAdam(torch.optim.Optimizer):
optimizer_id = 0
def __init__(self,
model_params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
adamw_mode=True,
fp32_optimizer_states=True):
"""Fast vectorized implementation of two variations of Adam optimizer on CPU:
* Adam: A Method for Stochastic Optimization: (https://arxiv.org/abs/1412.6980);
* AdamW: Fixing Weight Decay Regularization in Adam (https://arxiv.org/abs/1711.05101)
DeepSpeed CPU Adam(W) provides between 5x to 7x speedup over torch.optim.adam(W).
In order to apply this optimizer, the model requires to have its master parameter (in FP32)
reside on the CPU memory.
To train on a heterogeneous system, such as coordinating CPU and GPU, DeepSpeed offers
the ZeRO-Offload technology which efficiently offloads the optimizer states into CPU memory,
with minimal impact on training throughput. DeepSpeedCPUAdam plays an important role to minimize
the overhead of the optimizer's latency on CPU. Please refer to ZeRO-Offload tutorial
(https://www.deepspeed.ai/tutorials/zero-offload/) for more information on how to enable this technology.
For calling step function, there are two options available: (1) update optimizer's states and (2) update
optimizer's states and copy the parameters back to GPU at the same time. We have seen that the second
option can bring 30% higher throughput than the doing the copy separately using option one.
.. note::
We recommend using our `config
<https://www.deepspeed.ai/docs/config-json/#optimizer-parameters>`_
to allow :meth:`deepspeed.initialize` to build this optimizer
for you.
Arguments:
model_params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in DeepSpeed CPUAdam!
adamw_mode: select between Adam and AdamW implementations (default: AdamW)
full_precision_optimizer_states: creates momentum and variance in full precision regardless of
the precision of the parameters (default: True)
"""
default_args = dict(lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
bias_correction=bias_correction,
amsgrad=amsgrad)
super(DeepSpeedCPUAdam, self).__init__(model_params, default_args)
cpu_info = get_cpu_info()
self.cpu_vendor = cpu_info["vendor_id_raw"].lower() if "vendor_id_raw" in cpu_info else "unknown"
if "amd" in self.cpu_vendor:
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group['params']):
if p.dtype == torch.half:
logger.warning("FP16 params for CPUAdam may not work on AMD CPUs")
break
else:
continue
break
self.opt_id = DeepSpeedCPUAdam.optimizer_id
DeepSpeedCPUAdam.optimizer_id = DeepSpeedCPUAdam.optimizer_id + 1
self.adam_w_mode = adamw_mode
self.fp32_optimizer_states = fp32_optimizer_states
self.ds_opt_adam = CPUAdamBuilder().load()
self.ds_opt_adam.create_adam(self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode,
should_log_le("info"))
def __del__(self):
# need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize
# is used multiple times in the same process (notebook or pytest worker)
self.ds_opt_adam.destroy_adam(self.opt_id)
def __setstate__(self, state):
super(DeepSpeedCPUAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None, fp16_param_groups=None):
"""Update the model parameters.
.. note::
This method will be called internally by ZeRO-Offload. DeepSpeed
users should still use ``engine.step()`` as shown in the
`Getting Started
<https://www.deepspeed.ai/getting-started/#training>`_ guide.
Args:
closure (callable, optional): closure to compute the loss.
Defaults to ``None``.
fp16_param_groups: FP16 GPU parameters to update. Performing the
copy here reduces communication time. Defaults to ``None``.
Returns:
loss: if ``closure`` is provided. Otherwise ``None``.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# intended device for step
device = torch.device('cpu')
# converting the fp16 params to a group of parameter
if type(fp16_param_groups) is list:
if type(fp16_param_groups[0]) is not list:
fp16_param_groups = [fp16_param_groups]
elif fp16_param_groups is not None:
fp16_param_groups = [[fp16_param_groups]]
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group['params']):
if p.grad is None:
continue
assert p.device == device, f"CPUAdam param is on {p.device} and must be 'cpu', make " \
"sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config."
state = self.state[p]
# State initialization
if len(state) == 0:
#print(f'group {group_id} param {param_id} = {p.numel()}')
state['step'] = 0
#use full precision by default unless self.fp32_optimizer_states is off
state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
# gradient momentums
state['exp_avg'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
#memory_format=torch.preserve_format)
# gradient variances
state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
#memory_format=torch.preserve_format)
state['step'] += 1
beta1, beta2 = group['betas']
if fp16_param_groups is not None:
self.ds_opt_adam.adam_update_copy(self.opt_id, state['step'], group['lr'], beta1, beta2,
group['eps'], group['weight_decay'], group['bias_correction'],
p.data, p.grad.data, state['exp_avg'], state['exp_avg_sq'],
fp16_param_groups[group_id][param_id].data)
else:
self.ds_opt_adam.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'],
group['weight_decay'], group['bias_correction'], p.data, p.grad.data,
state['exp_avg'], state['exp_avg_sq'])
return loss
| 8,544 | 45.950549 | 118 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/adam/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .cpu_adam import DeepSpeedCPUAdam
from .fused_adam import FusedAdam
| 169 | 20.25 | 38 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/adam/fused_adam.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from fused adam in NVIDIA/apex, commit 6bd01c4
"""
import torch
from .multi_tensor_apply import MultiTensorApply
multi_tensor_applier = MultiTensorApply(2048 * 32)
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import FusedAdamBuilder
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This version of fused Adam implements 2 fusions.
* Fusion of the Adam update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`apex.optimizers.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
or ``torch.optim.Adam`` with ``adam_w_mode=False``::
opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
...
opt.step()
:class:`apex.optimizers.FusedAdam` may be used with or without Amp. If you wish to use :class:`FusedAdam` with Amp,
you may choose any ``opt_level``::
opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
...
opt.step()
In general, ``opt_level="O1"`` is recommended.
.. warning::
A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``. These additional arguments
are now deprecated and unnecessary.
Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
True for decoupled weight decay(also known as AdamW) (default: True)
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
.. _Adam - A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
adam_w_mode=True,
weight_decay=0.,
amsgrad=False,
set_grad_none=True):
if amsgrad:
raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay)
super(FusedAdam, self).__init__(params, defaults)
self.adam_w_mode = 1 if adam_w_mode else 0
self.set_grad_none = set_grad_none
fused_adam_cuda = FusedAdamBuilder().load()
# Skip buffer
self._dummy_overflow_buf = get_accelerator().IntTensor([0])
self.multi_tensor_adam = fused_adam_cuda.multi_tensor_adam
def zero_grad(self):
if self.set_grad_none:
for group in self.param_groups:
for p in group['params']:
p.grad = None
else:
super(FusedAdam, self).zero_grad()
def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, grad_scaler=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
"""
if any(p is not None for p in [grads, output_params, scale, grad_norms]):
raise RuntimeError(
'FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.'
)
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
if len(group['params']) == 0:
continue
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' not in group:
group['step'] = 0
# create lists for multi-tensor apply
g_16, p_16, m_16, v_16 = [], [], [], []
g_bf, p_bf, m_bf, v_bf = [], [], [], []
g_32, p_32, m_32, v_32 = [], [], [], []
for p in group['params']:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError(
'FusedAdam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
# DeepSpeed ZeRO 3 processes each subgroup a time, so we need to keep tracking step count for each tensor separately.
# While this is not an issue for ZeRO 1 & 2, since they apply a single optimization step to the whole param group at the same time.
# In order to keep backward compatibility for the existing checkpoints, we use group['state'] to initialize state['step'] if it exists.
state['step'] = group.get('step', 0)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if p.dtype == torch.float16:
g_16.append(p.grad.data)
p_16.append(p.data)
m_16.append(state['exp_avg'])
v_16.append(state['exp_avg_sq'])
elif p.dtype == torch.bfloat16:
g_bf.append(p.grad)
p_bf.append(p)
m_bf.append(state['exp_avg'])
v_bf.append(state['exp_avg_sq'])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state['exp_avg'])
v_32.append(state['exp_avg_sq'])
else:
raise RuntimeError('FusedAdam only support fp16, bf16 and fp32.')
if len(g_16) > 0:
state['step'] += 1
multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16],
group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
bias_correction, group['weight_decay'])
if len(g_bf) > 0:
state['step'] += 1
multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_bf, p_bf, m_bf, v_bf],
group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
bias_correction, group['weight_decay'])
if len(g_32) > 0:
state['step'] += 1
multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32],
group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
bias_correction, group['weight_decay'])
return loss
| 8,767 | 43.734694 | 155 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/adam/multi_tensor_apply.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from NVIDIA/apex, commit a109f85
"""
class MultiTensorApply(object):
def __init__(self, chunk_size):
self.chunk_size = chunk_size
def __call__(self, op, noop_flag_buffer, tensor_lists, *args):
return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args)
| 429 | 22.888889 | 73 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/quantizer/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .quantizer import ds_quantizer
| 132 | 18 | 38 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/quantizer/quantizer.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.op_builder import QuantizerBuilder
# Cuda modules will be imported if needed
quantizer_cuda_module = None
def ds_quantizer(input, groups=1, bit_num=8, sr=False, asym=False):
# Load cuda modules if needed
global quantizer_cuda_module
if quantizer_cuda_module is None:
quantizer_cuda_module = QuantizerBuilder().load()
if sr:
if asym:
quantize_func = quantizer_cuda_module.ds_sr_quantize_asym_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_sr_quantize_asym_fp32
else:
quantize_func = quantizer_cuda_module.ds_sr_quantize_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_sr_quantize_fp32
else:
if asym:
quantize_func = quantizer_cuda_module.ds_quantize_asym_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_quantize_asym_fp32
else:
quantize_func = quantizer_cuda_module.ds_quantize_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_quantize_fp32
return quantize_func(input, groups, bit_num)
| 1,193 | 38.8 | 155 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/random_ltd/dropping_utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.op_builder import RandomLTDBuilder
"""
Returns:
sampled_indices: [layers, batch_size, reserved_length]
new_mask: [batch_size, 1, reserved_length, reserved_length]
"""
random_ltd_module = None
def gpt_sample_tokens(reserved_length: int,
seq_length: int,
batch_size: int,
layers: int = 1,
device: str = 'cpu',
attn_mask: torch.Tensor = None):
prob_dist = torch.ones((layers * batch_size, seq_length), device=device)
sampled_indices = torch.multinomial(prob_dist, reserved_length)
sampled_indices = sampled_indices.reshape(layers, batch_size, reserved_length).to(torch.int32)
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
sampled_indices = random_ltd_module.token_sort_(sampled_indices, seq_length)
# Not certain the optimized kernel is actually better here, cause it kind of screws
# with alignment right if the sequence length is not divisible by like 16
# new_mask = random_ltd_module.mask_gather_gpt(attn_mask, reserved_length)
if attn_mask is not None:
new_mask = attn_mask[:, :, :reserved_length, :reserved_length]
else:
new_mask = None
return sampled_indices, new_mask
"""
Returns:
sampled_indices: [layers, batch_size, reserved_length]
new_mask: [layers, batch_size, 1, reserved_length, reserved_length]
"""
def bert_sample_tokens(reserved_length: int,
seq_length: int,
batch_size: int,
layers: int = 1,
device: str = 'cpu',
attn_mask: torch.Tensor = None):
assert attn_mask is not None
prob_dist = torch.ones((layers * batch_size, seq_length), device=device)
sampled_indices = torch.multinomial(prob_dist, reserved_length)
sampled_indices = sampled_indices.reshape(layers, batch_size, reserved_length).to(torch.int32)
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
sampled_indices = random_ltd_module.token_sort_(sampled_indices, seq_length)
dtype = sampled_indices.dtype
sampled_indices = sampled_indices.to(torch.long)
new_mask = []
for l in range(layers):
tmp_mask_list = []
for i in range(batch_size):
mask_tmp = attn_mask[i:i + 1, :, sampled_indices[l][i], :]
tmp_mask_list.append(mask_tmp[:, :, :, sampled_indices[l][i]])
new_mask.append(torch.cat(tmp_mask_list, dim=0))
return sampled_indices.to(dtype), new_mask
class GatherTokens(torch.autograd.Function):
@staticmethod
def forward(ctx, activations: torch.Tensor, sorted_indices: torch.Tensor, batch_first: bool):
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
ctx.save_for_backward(activations, sorted_indices)
ctx.batch_first = batch_first
return activations, random_ltd_module.token_gather(activations, sorted_indices, batch_first)
@staticmethod
def backward(ctx, a_gradients: torch.Tensor, g_gradients: torch.Tensor):
g_gradients = g_gradients.contiguous()
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
activations, sorted_indices = ctx.saved_tensors
batch_first = ctx.batch_first
return random_ltd_module.token_scatter_(a_gradients, g_gradients, sorted_indices, batch_first), None, None
class ScatterTokens(torch.autograd.Function):
@staticmethod
def forward(ctx, all_activations: torch.Tensor, layer_activations: torch.Tensor, sorted_indices: torch.Tensor,
batch_first: bool):
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
scatter_results = random_ltd_module.token_scatter_(all_activations.clone(), layer_activations, sorted_indices,
batch_first)
ctx.save_for_backward(sorted_indices)
ctx.batch_first = batch_first
return scatter_results
@staticmethod
def backward(ctx, out_gradients: torch.Tensor):
out_gradients = out_gradients.contiguous()
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
sorted_indices, = ctx.saved_tensors
batch_first = ctx.batch_first
ret_val = random_ltd_module.token_gather(out_gradients, sorted_indices, batch_first)
return out_gradients, ret_val, None, None
| 4,902 | 35.864662 | 118 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/random_ltd/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .dropping_utils import gpt_sample_tokens, bert_sample_tokens, GatherTokens, ScatterTokens
| 191 | 26.428571 | 94 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/adagrad/cpu_adagrad.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.op_builder import CPUAdagradBuilder
from deepspeed.utils.logging import should_log_le
class DeepSpeedCPUAdagrad(torch.optim.Optimizer):
optimizer_id = 0
def __init__(self, model_params, lr=1e-2, eps=1e-10, weight_decay=0, amsgrad=False, fp32_optimizer_states=True):
default_args = dict(lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(DeepSpeedCPUAdagrad, self).__init__(model_params, default_args)
self.opt_id = DeepSpeedCPUAdagrad.optimizer_id
DeepSpeedCPUAdagrad.optimizer_id = DeepSpeedCPUAdagrad.optimizer_id + 1
self.fp32_optimizer_states = fp32_optimizer_states
self.ds_opt_adagrad = CPUAdagradBuilder().load()
self.ds_opt_adagrad.create_adagrad(self.opt_id, lr, eps, weight_decay, should_log_le("info"))
def __del__(self):
# need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize
# is used multiple times in the same process (notebook or pytest worker)
self.ds_opt_adagrad.destroy_adagrad(self.opt_id)
def __setstate__(self, state):
super(DeepSpeedCPUAdagrad, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None, fp16_param_groups=None):
"""Update the model parameters.
.. note::
This method will be called internally by ZeRO-Offload. DeepSpeed
users should still use ``engine.step()`` as shown in the
`Getting Started
<https://www.deepspeed.ai/getting-started/#training>`_ guide.
Args:
closure (callable, optional): closure to compute the loss.
Defaults to ``None``.
fp16_param_groups: FP16 GPU parameters to update. Performing the
copy here reduces communication time. Defaults to ``None``.
Returns:
loss: if ``closure`` is provided. Otherwise ``None``.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# intended device for step
device = torch.device('cpu')
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group['params']):
if p.grad is None:
continue
assert p.device == device, f"CPUAdagrad param is on {p.device} and must be 'cpu', make " \
"sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config."
state = self.state[p]
# State initialization
if len(state) == 0:
#print(f'group {group_id} param {param_id} = {p.numel()}')
state['step'] = 0
#use full precision by default unless self.fp32_optimizer_states is off
state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
#memory_format=torch.preserve_format)
# gradient variances
state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device='cpu')
#memory_format=torch.preserve_format)
state['step'] += 1
if p.grad.is_sparse == True:
sparse_param = p.sparse_mask(p.grad)
sparse_exp_avg_sq = state['exp_avg_sq'].sparse_mask(p.grad)
self.ds_opt_adagrad.adagrad_update(self.opt_id, state['step'], group['lr'], group['eps'],
group['weight_decay'], sparse_param.values(), p.grad.values(),
sparse_exp_avg_sq.values())
p[sparse_param.indices()] = sparse_param.values()
state['exp_avg_sq'][sparse_exp_avg_sq.indices()] = sparse_exp_avg_sq.values()
if fp16_param_groups is not None:
fp16_param_groups[group_id][param_id][sparse_param.indices()] = sparse_param.values()
else:
if fp16_param_groups is not None:
self.ds_opt_adagrad.adagrad_update_copy(self.opt_id, state['step'], group['lr'], group['eps'],
group['weight_decay'], p.data, p.grad.data,
state['exp_avg_sq'],
fp16_param_groups[group_id][param_id].data)
else:
self.ds_opt_adagrad.adagrad_update(self.opt_id, state['step'], group['lr'], group['eps'],
group['weight_decay'], p.data, p.grad.data,
state['exp_avg_sq'])
return loss
| 5,089 | 45.272727 | 118 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/adagrad/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .cpu_adagrad import DeepSpeedCPUAdagrad
| 141 | 19.285714 | 44 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/transformer.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import json
import math
import torch
from torch import nn
from torch.autograd import Function
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import TransformerBuilder, StochasticTransformerBuilder
# Cuda modules will be imported if needed
transformer_cuda_module = None
stochastic_transformer_cuda_module = None
class TransformerConfig():
def __init__(self, batch_size, hidden_size, intermediate_size, heads, attn_dropout_ratio, hidden_dropout_ratio,
num_hidden_layers, initializer_range):
self.layer_id = -1
self.batch_size = batch_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.heads = heads
self.attn_dropout_ratio = attn_dropout_ratio
self.hidden_dropout_ratio = hidden_dropout_ratio
self.num_hidden_layers = num_hidden_layers
self.initializer_range = initializer_range
class DeepSpeedTransformerConfig(TransformerConfig):
"""Initialize the DeepSpeed Transformer Config.
Arguments:
batch_size: The maximum batch size used for running the kernel on each GPU
hidden_size: The hidden size of the transformer layer
intermediate_size: The intermediate size of the feed-forward part of transformer layer
heads: The number of heads in the self-attention of the transformer layer
attn_dropout_ratio: The ratio of dropout for the attention's output
hidden_dropout_ratio: The ratio of dropout for the transformer's output
num_hidden_layers: The number of transformer layers
initializer_range: BERT model's initializer range for initializing parameter data
local_rank: Optional: The rank of GPU running the transformer kernel, it is not required
to use if the model already set the current device, otherwise need to set it
so that the transformer kernel can work on the right device
seed: The random seed for the dropout layers
fp16: Enable half-precision computation
pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture
normalize_invertible: Optional: Enable invertible LayerNorm execution (dropping the input activation),
default is False
gelu_checkpoint: Optional: Enable checkpointing of Gelu activation output to save memory,
default is False
adjust_init_range: Optional: Set as True (default) if the model adjusts the weight initial values of
its self-attention output and layer output, False keeps the initializer_range no change.
See the adjustment below:
output_std = self.config.initializer_range / math.sqrt(2.0 * num_layers)
attn_dropout_checkpoint: Optional: Enable checkpointing of attention dropout to save memory,
default is False
stochastic_mode: Enable for high performance, please note that this flag has some level of
non-determinism and can produce different results on different runs. However, we have seen
that by enabling it, the pretraining tasks such as BERT are not affected and can obtain
a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend
to turn it off in order to be able to reproduce the same result through the regular kernel execution.
return_tuple: Enable if using the return_tuple interface style for sending out the forward results.
training: Enable for training rather than inference.
"""
def __init__(self,
batch_size=-1,
hidden_size=-1,
intermediate_size=-1,
heads=-1,
attn_dropout_ratio=-1,
hidden_dropout_ratio=-1,
num_hidden_layers=-1,
initializer_range=-1,
layer_norm_eps=1e-12,
local_rank=-1,
seed=-1,
fp16=False,
pre_layer_norm=True,
normalize_invertible=False,
gelu_checkpoint=False,
adjust_init_range=True,
attn_dropout_checkpoint=False,
stochastic_mode=False,
return_tuple=False,
training=True):
super(DeepSpeedTransformerConfig,
self).__init__(batch_size, hidden_size,
(intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads,
attn_dropout_ratio, hidden_dropout_ratio, num_hidden_layers, initializer_range)
self.fp16 = fp16
self.pre_layer_norm = pre_layer_norm
self.local_rank = local_rank
self.seed = seed
self.normalize_invertible = normalize_invertible
self.gelu_checkpoint = gelu_checkpoint # True: if higher batch size is required
self.adjust_init_range = adjust_init_range
self.test_gemm = False
self.layer_norm_eps = layer_norm_eps
self.training = training
self.is_grad_enabled = True
self.attn_dropout_checkpoint = attn_dropout_checkpoint
self.stochastic_mode = stochastic_mode
self.return_tuple = return_tuple
@classmethod
def from_dict(cls, json_object):
config = DeepSpeedTransformerConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file, "r", encoding='utf-16') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
class DeepSpeedTransformerFunction(Function):
@staticmethod
def forward(ctx, input, input_mask, self, grads, layer_id, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw,
attn_nb, inter_w, inter_b, output_w, output_b, norm_w, norm_b, config):
cuda_module = stochastic_transformer_cuda_module if config.stochastic_mode else transformer_cuda_module
forward_func = cuda_module.forward_fp16 if config.fp16 else cuda_module.forward_fp32
inp_size = input.size()
if inp_size[1] % 16 != 0:
input = torch.cat(
(input,
torch.randn(
(inp_size[0], (16 - (inp_size[1] % 16)), inp_size[2]), device=input.device, dtype=input.dtype)),
1)
input_mask = torch.cat((input_mask, torch.ones((inp_size[0], input_mask.shape[1], input_mask.shape[2], \
(16 - (inp_size[1] % 16))), device=input_mask.device, dtype=input_mask.dtype) * -10000), 3)
(output, inp_norm, qkv_tf, soft_inp, ctx_bufB, attn_o_inp, add_res, ff1_inp, gelu_inp, ff2_inp,
attn_prob_dropout_mask, attn_output_dropout_mask, layer_output_dropout_mask, attn_layer_norm_var,
attn_layer_norm_mean, layer_norm_var, layer_norm_mean) = forward_func(
config.layer_id, input, input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w,
inter_b, output_w, output_b, norm_w, norm_b, config.training and config.is_grad_enabled,
config.pre_layer_norm, config.attn_dropout_checkpoint, config.normalize_invertible,
config.gelu_checkpoint)
# For testing only.
if grads is not None:
for i in [2]:
attn_qkvw.register_hook(lambda x, i=i, self=self: grads.append([
x[i * attn_ow.size(0):(i + 1) * attn_ow.size(0)], ("Q_W" if i == 0 else "K_W" if i == 1 else "V_W")
]))
for i in [2]:
attn_qkvb.register_hook(lambda x, i=i, self=self: grads.append([
x[i * attn_ow.size(0):(i + 1) * attn_ow.size(0)], ("Q_B" if i == 0 else "K_B" if i == 1 else "V_B")
]))
attn_ow.register_hook(lambda x, self=self: grads.append([x, "O_W"]))
attn_ob.register_hook(lambda x, self=self: grads.append([x, "O_B"]))
attn_nw.register_hook(lambda x, self=self: grads.append([x, "N2_W"]))
attn_nb.register_hook(lambda x, self=self: grads.append([x, "N2_B"]))
inter_w.register_hook(lambda x, self=self: grads.append([x, "int_W"]))
inter_b.register_hook(lambda x, self=self: grads.append([x, "int_B"]))
output_w.register_hook(lambda x, self=self: grads.append([x, "out_W"]))
output_b.register_hook(lambda x, self=self: grads.append([x, "out_B"]))
norm_w.register_hook(lambda x, self=self: grads.append([x, "norm_W"]))
norm_b.register_hook(lambda x, self=self: grads.append([x, "norm_B"]))
if config.is_grad_enabled and config.training:
if (config.pre_layer_norm and config.normalize_invertible):
ctx.save_for_backward(input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w,
inter_b, output_w, output_b, norm_w, norm_b)
else:
ctx.save_for_backward(output, input, input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw,
attn_nb, inter_w, inter_b, output_w, output_b, norm_w, norm_b)
ctx.config = config
if (config.pre_layer_norm or not config.normalize_invertible):
ctx.inp_norm = inp_norm
ctx.qkv_tf = qkv_tf
ctx.soft_inp = soft_inp
if not config.attn_dropout_checkpoint:
ctx.ctx_bufB = ctx_bufB
ctx.attn_o_inp = attn_o_inp
if not config.normalize_invertible:
ctx.add_res = add_res
ctx.attn_layer_norm_mean = attn_layer_norm_mean
ctx.layer_norm_mean = layer_norm_mean
ctx.ff1_inp = ff1_inp
if not config.gelu_checkpoint:
ctx.gelu_inp = gelu_inp
ctx.ff2_inp = ff2_inp
ctx.attn_prob_dropout_mask = attn_prob_dropout_mask
ctx.attn_output_dropout_mask = attn_output_dropout_mask
ctx.layer_output_dropout_mask = layer_output_dropout_mask
ctx.attn_layer_norm_var = attn_layer_norm_var
ctx.layer_norm_var = layer_norm_var
if inp_size[1] % 16 != 0:
output = torch.narrow(output, 1, 0, inp_size[1])
if config.return_tuple:
return (output, ) # outputs -> (output) : outputs[0] = output
else:
return output
@staticmethod
def backward(ctx, grad_output):
bsz = grad_output.shape[0]
grad_output_shape = grad_output.size()
if grad_output_shape[1] % 16 != 0:
grad_output = torch.cat((grad_output, torch.zeros((bsz, (16 - (grad_output_shape[1] % 16)), \
grad_output_shape[2]), device=grad_output.device, dtype=grad_output.dtype)), 1)
assert ctx.config.training
if (ctx.config.pre_layer_norm and ctx.config.normalize_invertible):
(input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, inter_b, output_w,
output_b, norm_w, norm_b) = ctx.saved_tensors
else:
(output, input, input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, inter_b,
output_w, output_b, norm_w, norm_b) = ctx.saved_tensors
cuda_module = stochastic_transformer_cuda_module if ctx.config.stochastic_mode else transformer_cuda_module
backward_func = cuda_module.backward_fp16 if ctx.config.fp16 else cuda_module.backward_fp32
(grad_input, grad_attn_qkvw, grad_attn_qkvb, grad_attn_ow, grad_attn_ob, grad_attn_nw, grad_attn_nb,
grad_inter_w, grad_inter_b, grad_output_w, grad_output_b, grad_norm_w, grad_norm_b) = backward_func(
ctx.config.layer_id, grad_output,
(ctx.inp_norm if (ctx.config.pre_layer_norm and ctx.config.normalize_invertible) else output),
(ctx.inp_norm if (ctx.config.pre_layer_norm or not ctx.config.normalize_invertible) else input),
ctx.qkv_tf, ctx.soft_inp, (ctx.soft_inp if ctx.config.attn_dropout_checkpoint else ctx.ctx_bufB),
ctx.attn_o_inp, (ctx.ff1_inp if ctx.config.normalize_invertible else ctx.add_res), ctx.ff1_inp,
(ctx.ff2_inp if ctx.config.gelu_checkpoint else ctx.gelu_inp), ctx.ff2_inp, ctx.attn_prob_dropout_mask,
ctx.attn_output_dropout_mask, ctx.layer_output_dropout_mask, ctx.attn_layer_norm_var,
ctx.attn_layer_norm_mean, ctx.layer_norm_var, ctx.layer_norm_mean,
(ctx.inp_norm if
(ctx.config.pre_layer_norm and ctx.config.normalize_invertible) else input), input_mask, attn_qkvw,
attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, inter_b, output_w, output_b, norm_w, norm_b)
# This appears to be an effective way to release context memory
ctx.qkv_tf = None
ctx.soft_inp = None
ctx.ctx_bufB = None
ctx.gelu_inp = None
ctx.ff2_inp = None
ctx.attn_o_inp = None
ctx.ff1_inp = None
ctx.add_res = None
ctx.inp_norm = None
ctx.config = None
ctx.attn_layer_norm_mean = None
ctx.layer_norm_mean = None
ctx.attn_prob_dropout_mask = None
ctx.attn_output_dropout_mask = None
ctx.layer_output_dropout_mask = None
ctx.attn_layer_norm_var = None
ctx.layer_norm_var = None
if grad_output_shape[1] % 16 != 0:
grad_input = torch.narrow(grad_input, 1, 0, grad_output_shape[1])
return (grad_input, None, None, None, None, grad_attn_qkvw, grad_attn_qkvb, grad_attn_ow, grad_attn_ob,
grad_attn_nw, grad_attn_nb, grad_inter_w, grad_inter_b, grad_output_w, grad_output_b, grad_norm_w,
grad_norm_b, None)
class DeepSpeedTransformerLayer(nn.Module):
"""Initialize the DeepSpeed Transformer Layer.
Static variable:
layer_id: The layer-index counter starting from 0 and incrementing by 1 every time a layer object is instantiated,
e.g. if a model has 24 transformer layers, layer_id goes from 0 to 23.
Arguments:
config: An object of DeepSpeedTransformerConfig
initial_weights: Optional: Only used for unit test
initial_biases: Optional: Only used for unit test
"""
layer_id = 0
def __init__(self, config, initial_weights=None, initial_biases=None):
super(DeepSpeedTransformerLayer, self).__init__()
self.config = config
self.config.layer_id = DeepSpeedTransformerLayer.layer_id
DeepSpeedTransformerLayer.layer_id = DeepSpeedTransformerLayer.layer_id + 1
print("DeepSpeed Transformer config is ", self.config.__dict__)
if self.config.local_rank >= 0:
get_accelerator().set_device(self.config.local_rank)
if initial_weights is None and initial_biases is None:
self.attn_qkvw = nn.Parameter(torch.Tensor(self.config.hidden_size * 3, self.config.hidden_size))
self.attn_qkvb = nn.Parameter(torch.Tensor(self.config.hidden_size * 3))
self.attn_ow = nn.Parameter(torch.Tensor(self.config.hidden_size, self.config.hidden_size))
self.attn_ob = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.inter_w = nn.Parameter(torch.Tensor(self.config.intermediate_size, self.config.hidden_size))
self.inter_b = nn.Parameter(torch.Tensor(self.config.intermediate_size))
self.output_w = nn.Parameter(torch.Tensor(self.config.hidden_size, self.config.intermediate_size))
self.output_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.norm_w = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.norm_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.init_transformer_weights(self.config.adjust_init_range)
else:
# For testing only.
q = initial_weights[0].data
k = initial_weights[1].data
v = initial_weights[2].data
self.attn_qkvw = nn.Parameter(torch.cat((q, k, v)))
#self.attn_qkvw[i * self.config.hidden_size:(i + 1) * self.config.hidden_size] = \
# initial_weights[i].clone()
#torch.empty_like(initial_weights[i]).data.copy_(initial_weights[i].data)
self.attn_qkvb = nn.Parameter(torch.Tensor(self.config.hidden_size * 3))
self.attn_qkvb.data.zero_()
self.attn_ow = initial_weights[3]
self.attn_ob = initial_biases[3]
self.attn_nw = initial_weights[4]
self.attn_nb = initial_biases[4]
self.inter_w = initial_weights[5]
self.inter_b = initial_biases[5]
self.output_w = initial_weights[6]
self.output_b = initial_biases[6]
self.norm_w = initial_weights[7]
self.norm_b = initial_biases[7]
# Load cuda modules if needed
global transformer_cuda_module, stochastic_transformer_cuda_module
if transformer_cuda_module is None and not self.config.stochastic_mode:
transformer_cuda_module = TransformerBuilder().load()
if stochastic_transformer_cuda_module is None and self.config.stochastic_mode:
stochastic_transformer_cuda_module = StochasticTransformerBuilder().load()
# create the layer in cuda kernels.
cuda_module = stochastic_transformer_cuda_module if self.config.stochastic_mode else transformer_cuda_module
create_layer_func = cuda_module.create_transformer_layer_fp16 if self.config.fp16 else cuda_module.create_transformer_layer_fp32
create_layer_func(self.config.layer_id, self.config.batch_size, self.config.hidden_size, self.config.heads,
self.config.intermediate_size, self.config.attn_dropout_ratio,
self.config.hidden_dropout_ratio, self.config.layer_norm_eps, self.config.seed,
self.config.pre_layer_norm, self.config.test_gemm, self.config.attn_dropout_checkpoint,
self.config.normalize_invertible, self.config.gelu_checkpoint, self.config.stochastic_mode)
def init_transformer_weights(self, adjust_init_range=False):
num_layers = self.config.num_hidden_layers
output_std = self.config.initializer_range
if adjust_init_range and self.config.local_rank == 0:
print("Accounting for accumulation on the residual path")
output_std = self.config.initializer_range / math.sqrt(2.0 * num_layers)
self.attn_qkvw.data.normal_(mean=0.0, std=self.config.initializer_range)
self.attn_qkvb.data.zero_()
self.attn_ow.data.normal_(mean=0.0, std=output_std)
self.attn_ob.data.zero_()
self.attn_nw.data.fill_(1.0)
self.attn_nb.data.zero_()
self.inter_w.data.normal_(mean=0.0, std=self.config.initializer_range)
self.inter_b.data.zero_()
self.output_w.data.normal_(mean=0.0, std=output_std)
self.output_b.data.zero_()
self.norm_w.data.fill_(1.0)
self.norm_b.data.zero_()
def forward(self,
hidden_states,
attention_mask=None,
head_mask=None,
layer_head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
grads=None):
self.config.is_grad_enabled = torch.is_grad_enabled()
self.config.training = self.training
return DeepSpeedTransformerFunction.apply(hidden_states, attention_mask, self, grads, self.config.layer_id,
self.attn_qkvw, self.attn_qkvb, self.attn_ow, self.attn_ob,
self.attn_nw, self.attn_nb, self.inter_w, self.inter_b,
self.output_w, self.output_b, self.norm_w, self.norm_b, self.config)
| 20,600 | 48.881356 | 136 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
from .inference.config import DeepSpeedInferenceConfig
from ...model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
from .inference.moe_inference import DeepSpeedMoEInferenceConfig, DeepSpeedMoEInference
| 413 | 40.4 | 94 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/bias_add.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import Optional
import torch
from deepspeed.ops.op_builder import SpatialInferenceBuilder
spatial_cuda_module = None
def nhwc_bias_add(activation: torch.Tensor,
bias: torch.Tensor,
other: Optional[torch.Tensor] = None,
other_bias: Optional[torch.Tensor] = None) -> torch.Tensor:
global spatial_cuda_module
if spatial_cuda_module is None:
spatial_cuda_module = SpatialInferenceBuilder().load()
if other is None:
return spatial_cuda_module.nhwc_bias_add(activation, bias)
elif other_bias is None:
return spatial_cuda_module.nhwc_bias_add_add(activation, bias, other)
else:
return spatial_cuda_module.nhwc_bias_add_bias_add(activation, bias, other, other_bias)
| 876 | 31.481481 | 94 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/triton_ops.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Inspired by original Triton implementation:
https://github.com/openai/triton/blob/b244db06da24a87453a40ad35b085ee37dac3705/python/tutorials/06-fused-attention.py
"""
import torch
import triton
import triton.language as tl
@triton.jit
def _fwd_kernel(
Q,
K,
V,
sm_scale,
TMP,
Out,
stride_qz,
stride_qh,
stride_qm,
stride_qk,
stride_kz,
stride_kh,
stride_kn,
stride_kk,
stride_vz,
stride_vh,
stride_vk,
stride_vn,
stride_oz,
stride_oh,
stride_om,
stride_on,
Z,
H,
N_CTX,
BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL)
off_q = off_hz * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk
off_k = off_hz * stride_kh + offs_n[:, None] * stride_kn + offs_d[None, :] * stride_kk
off_v = off_hz * stride_vh + offs_n[:, None] * stride_qm + offs_d[None, :] * stride_qk
# Initialize pointers to Q, K, V
q_ptrs = Q + off_q
k_ptrs = K + off_k
v_ptrs = V + off_v
# initialize pointer to m and l
t_ptrs = TMP + off_hz * N_CTX + offs_m
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# load q: it will stay in SRAM throughout
q = tl.load(q_ptrs)
# loop over k, v and update accumulator
for start_n in range(0, N_CTX, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(k_ptrs + start_n * stride_kn)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, trans_b=True)
qk *= sm_scale
# -- compute m_ij, p, l_ij
m_ij = tl.max(qk, 1)
p = tl.exp(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.exp(m_i - m_i_new)
beta = tl.exp(m_ij - m_i_new)
l_i_new = alpha * l_i + beta * l_ij
# -- update output accumulator --
# scale p
p_scale = beta / l_i_new
p = p * p_scale[:, None]
# scale acc
acc_scale = l_i / l_i_new * alpha
tl.store(t_ptrs, acc_scale)
acc_scale = tl.load(t_ptrs) # BUG: have to store and immediately load
acc = acc * acc_scale[:, None]
# update acc
v = tl.load(v_ptrs + start_n * stride_vk)
p = p.to(tl.float16)
acc += tl.dot(p, v)
# update m_i and l_i
l_i = l_i_new
m_i = m_i_new
# initialize pointers to output
offs_n = tl.arange(0, BLOCK_DMODEL)
off_o = off_hz * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on
out_ptrs = Out + off_o
tl.store(out_ptrs, acc)
class triton_flash_attn(torch.nn.Module):
def __init__(self, ):
super(triton_flash_attn, self).__init__()
def forward(self, q, k, v, sm_scale, block_128=True):
BLOCK = 128 if block_128 else 64
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
o = torch.empty_like(q)
grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1])
tmp = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
num_warps = 4 if Lk <= 64 else 8
_fwd_kernel[grid](
q,
k,
v,
sm_scale,
tmp,
o,
q.stride(0),
q.stride(1),
q.stride(2),
q.stride(3),
k.stride(0),
k.stride(1),
k.stride(2),
k.stride(3),
v.stride(0),
v.stride(1),
v.stride(2),
v.stride(3),
o.stride(0),
o.stride(1),
o.stride(2),
o.stride(3),
k.shape[0],
k.shape[1],
k.shape[2],
BLOCK_M=BLOCK,
BLOCK_N=BLOCK,
BLOCK_DMODEL=Lk,
num_warps=num_warps,
num_stages=1,
)
return o
| 4,434 | 27.798701 | 117 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/diffusers_2d_transformer.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
class Diffusers2DTransformerConfig():
def __init__(self, int8_quantization=False):
self.int8_quantization = int8_quantization
| 236 | 20.545455 | 50 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/ds_attention.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import torch
import torch.nn as nn
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
from .op_binding import LinearOp, VectorMatMulOp, SoftmaxContextOp, QKVGemmOp, SoftmaxOp
minus_inf = -10000.0
class DeepSpeedSelfAttention(nn.Module):
num_layers = 0
_qkv_buffers = []
def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1):
super(DeepSpeedSelfAttention, self).__init__()
self.config = config
data_type = self.config.dtype
data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
self.config.layer_id = DeepSpeedSelfAttention.num_layers
DeepSpeedSelfAttention.num_layers = DeepSpeedSelfAttention.num_layers + 1
device = get_accelerator().current_device_name() #if config.bigscience_bloom else 'cpu'
if self.config.set_empty_params:
self.attn_qw = None
self.attn_qb = None
self.attn_kw = None
self.attn_kb = None
self.attn_vw = None
self.attn_vb = None
self.attn_qkvw = None
self.attn_qkvb = None
self.attn_ow = None
self.attn_ob = None
else:
qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3
self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size,
qkv_size_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
out_size_per_partition = self.config.hidden_size // self.config.mp_size
self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size
self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size
self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads
self.mp_group = mp_group
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups
self.merge_count = int(math.log2(merge_count))
self.norm_factor = math.sqrt(self.config.hidden_size // self.config.heads)
if not config.use_mup:
self.norm_factor = math.sqrt(self.norm_factor)
if self.config.scale_attn_by_inverse_layer_idx is True:
self.norm_factor *= math.sqrt(self.config.layer_id + 1)
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191
self.qkv_func = QKVGemmOp(config)
self.score_context_func = SoftmaxContextOp(config)
self.linear_func = LinearOp(config)
self.vector_matmul_func = VectorMatMulOp(config)
if len(DeepSpeedSelfAttention._qkv_buffers) == 0:
DeepSpeedSelfAttention._qkv_buffers = [
torch.empty(self.hidden_size_per_partition * 3,
self.config.hidden_size,
dtype=data_type_fp,
device=device),
torch.empty(self.hidden_size_per_partition * 3, dtype=data_type_fp, device=device)
]
def compute_attention(self, qkv_out, input_mask, layer_past, alibi):
if isinstance(qkv_out, list) or isinstance(qkv_out, tuple):
qkv_out = qkv_out[0]
no_masking = input_mask is None
if no_masking:
input_mask = torch.empty(1)
attn_key_value = self.score_context_func(
query_key_value=qkv_out,
attn_mask=((1 - input_mask).to(qkv_out.dtype) *
minus_inf) if input_mask.dtype == torch.int64 else input_mask,
heads=self.num_attention_heads_per_partition,
norm_factor=(1 / self.norm_factor if self.config.scale_attention else 1.0),
no_masking=no_masking,
layer_id=self.config.layer_id,
num_layers=DeepSpeedSelfAttention.num_layers,
alibi=alibi)
context_layer, key_layer, value_layer = attn_key_value
return context_layer, key_layer, value_layer
def _merge_qkv(self):
qvkw = DeepSpeedSelfAttention._qkv_buffers[0]
qvkw[:self.hidden_size_per_partition, :] = self.attn_qw # type: ignore
qvkw[self.hidden_size_per_partition:2 * self.hidden_size_per_partition, :] = self.attn_kw # type: ignore
qvkw[2 * self.hidden_size_per_partition:, :] = self.attn_vw # type: ignore
if self.attn_qb is not None:
qvkb = DeepSpeedSelfAttention._qkv_buffers[1]
qvkb[:self.hidden_size_per_partition] = self.attn_qb
qvkb[self.hidden_size_per_partition:2 * self.hidden_size_per_partition] = self.attn_kb # type: ignore
qvkb[2 * self.hidden_size_per_partition:] = self.attn_vb # type: ignore
return DeepSpeedSelfAttention._qkv_buffers
def forward(self,
input,
input_mask,
head_mask=None,
layer_past=None,
get_present=False,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
norm_w=None,
norm_b=None,
alibi=None):
if self.attn_qkvw is None:
self._attn_qkvw, self._attn_qkvb = self._merge_qkv()
else:
self._attn_qkvw = self.attn_qkvw
self._attn_qkvb = self.attn_qkvb
if not self.config.pre_layer_norm:
qkv_out = self.linear_func(input=input,
weight=self._attn_qkvw,
bias=self._attn_qkvb,
add_bias=self.attn_qkvb is not None,
do_flash_attn=False,
num_heads=self.num_attention_heads_per_partition,
num_layers=DeepSpeedSelfAttention.num_layers)
else:
qkv_out = self.qkv_func(input=input,
weight=self._attn_qkvw,
bias=self._attn_qkvb,
gamma=norm_w,
beta=norm_b)
context_layer, key_layer, value_layer = self.compute_attention(qkv_out=qkv_out,
input_mask=input_mask,
layer_past=layer_past,
alibi=alibi)
output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow)
inp_norm = qkv_out[-1]
if self.config.mlp_after_attn and self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1:
dist.all_reduce(output, group=self.mp_group)
return (output, key_layer, value_layer, context_layer, inp_norm)
class BloomSelfAttention(DeepSpeedSelfAttention):
def __init__(self, *args, **kwargs):
super(BloomSelfAttention, self).__init__(*args, **kwargs)
self.softmax_func = SoftmaxOp(self.config)
########### This part is taken/modified form the HF modeling_bloom.py ################
# Reference: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py
def _transpose_for_context(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_layer_shape = x.size()[:-2] + \
(self.hidden_size_per_partition,)
return x.view(*new_x_layer_shape).contiguous()
def _split_tensor_along_last_dim(self, tensor, num_partitions, contiguous_split_chunks=True):
"""Split a tensor along its last dimension.
Args:
tensor: ([`torch.tensor`], *required*):
input tensor to split
num_partitions ([`int`], *required*):
number of partitions to split the tensor
contiguous_split_chunks ([`bool`], *optional*, default=`False`)::
If True, make each chunk contiguous in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
numerator, denominator = tensor.size()[last_dim], num_partitions
if not (numerator % denominator == 0):
raise ValueError(f"{numerator} is not divisible by {denominator}")
last_dim_size = numerator // denominator
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
def compute_attention(self, qkv_out, input_mask, layer_past, alibi):
if isinstance(qkv_out, list) or isinstance(qkv_out, tuple):
qkv_out = qkv_out[0]
no_masking = input_mask is None
if no_masking:
input_mask = torch.empty(1)
mixed_x_layer = qkv_out
alibi = alibi.to(get_accelerator().current_device_name())
head_dim = self.hidden_size_per_partition // self.num_attention_heads_per_partition
new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_attention_heads_per_partition, 3 * head_dim)
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
query_layer, key_layer, value_layer = self._split_tensor_along_last_dim(mixed_x_layer, 3)
# [batch_size, head_dim, q_length, k_length]
output_size = (query_layer.size(0), query_layer.size(2), query_layer.size(1), key_layer.size(1))
# [batch_size, q_length, num_heads, head_dim] -> [q_length, batch_size * num_heads, head_dim]
query_layer = query_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[2], -1)
# [batch_size, k_length, num_heads, head_dim] -> [k_length, batch_size * num_heads, head_dim]
key_layer = key_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[3],
-1).transpose(-1, -2)
value_layer = value_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[3], -1)
if layer_past is not None:
past_key, past_value = layer_past
# concatenate along seq_length dimension -> [batch_size, qk_length, num_heads, head_dim]
key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=-1)
value_layer = torch.cat((past_value.type_as(value_layer), value_layer), dim=-2)
presents = (key_layer, value_layer)
# Raw attention scores. [batch_size * num_heads, q_length, k_length]
matmul_result = torch.matmul(query_layer, key_layer)
# change view to [batch_size, num_heads, q_length, k_length]
attention_scores = matmul_result.view(output_size[0], output_size[1], output_size[2], -1)
offset = dist.get_rank() * self.num_attention_heads_per_partition if dist.is_initialized() else 0
target_dtype = torch.float16 if self.config.dtype == torch.int8 else self.config.dtype
attention_probs = self.softmax_func(attn_scores=attention_scores,
attn_mask=((1 - input_mask).to(target_dtype) * minus_inf),
alibi=alibi,
triangular=(self.config.triangular_masking
and (attention_scores.shape[-2] > 1)),
recompute=False,
local_attention=False,
window_size=1,
async_op=False,
layer_scale=1 / (self.norm_factor * self.norm_factor),
head_offset=offset)
# change view [batch_size x num_heads, q_length, k_length]
attention_probs_reshaped = attention_probs.view(*matmul_result.shape)
# matmul: [batch_size * num_heads, q_length, head_dim]
context_layer = torch.bmm(attention_probs_reshaped, value_layer)
# change view [batch_size, num_heads, q_length, head_dim]
context_layer = context_layer.view(
context_layer.size(0) // self.num_attention_heads_per_partition, self.num_attention_heads_per_partition,
context_layer.size(1), context_layer.shape[-1])
context_layer = self._transpose_for_context(context_layer)
key_layer = presents[0]
value_layer = presents[1]
return context_layer, key_layer, value_layer
###################### End of HF modeling_bloom addition ########################
| 13,853 | 48.478571 | 121 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/moe_inference.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import json
import math
import torch
from torch.autograd import Function
# accelerator modules will be imported if needed
inference_module = None
specialized_mode = None
import torch.nn as nn
from .ds_attention import DeepSpeedSelfAttention
from .config import DeepSpeedInferenceConfig
from ....moe.sharded_moe import TopKGate
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
class DeepSpeedMoEInferenceConfig(DeepSpeedInferenceConfig):
"""Initialize the DeepSpeed Transformer Config.
Arguments:
hidden_size: The hidden size of the transformer layer
intermediate_size: The intermediate size of the feed-forward part of transformer layer
heads: The number of heads in the self-attention of the transformer layer
num_hidden_layers: The number of transformer layers
layer_norm_eps: The epsilon value for the layer norm
local_rank: Optional: The rank of GPU running the transformer kernel, it is not required
to use if the model already set the current device, otherwise need to set it
so that the transformer kernel can work on the right device
mp_size (optional): This argument is mainly used to create the parameters on the kernel side
using model-parallel architecture. If the client model already takes care of this, there is no
need to pass this argument.
fp16: Enable half-precision computation
bf16: Enable bf16 floating point computation
pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture
stochastic_mode: Enable for high performance, please note that this flag has some level of
non-determinism and can produce different results on different runs. However, we have seen
that by enabling it, the pretraining tasks such as BERT are not affected and can obtain
a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend
to turn it off in order to be able to reproduce the same result through the regular kernel execution.
scale_attention: If true, both q and k are scaled by 1/sqrt(attention_heads) before attention computation.
return_tuple: if True, returns the transformer output as a tuple, otherwise returns as a tensor
"""
def __init__(self,
hidden_size=-1,
intermediate_size=-1,
heads=-1,
num_hidden_layers=-1,
layer_norm_eps=1e-12,
local_rank=-1,
mp_size=1,
fp16=False,
bf16=False,
q_int8=False,
pre_layer_norm=True,
stochastic_mode=False,
scale_attention=True,
triangular_masking=True,
local_attention=False,
window_size=256,
return_tuple=True,
moe_experts=1,
global_experts=1,
k=1,
capacity_factor=1.,
eval_capacity_factor=1.,
min_capacity=1,
noisy_gate_policy=None,
drop_tokens=True,
use_rts=False,
mlp_type='standard',
scale_attn_by_inverse_layer_idx=False):
super(DeepSpeedMoEInferenceConfig,
self).__init__(hidden_size, (intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads,
num_hidden_layers, layer_norm_eps, local_rank, mp_size, fp16, bf16, q_int8,
pre_layer_norm, stochastic_mode, scale_attention, triangular_masking, local_attention,
window_size, return_tuple)
self.moe_experts = moe_experts
self.k = k
self.capacity_factor = capacity_factor
self.eval_capacity_factor = eval_capacity_factor
self.min_capacity = min_capacity
self.noisy_gate_policy = noisy_gate_policy
self.drop_tokens = drop_tokens
self.use_rts = use_rts
self.global_experts = global_experts
self.mlp_type = mlp_type
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
@classmethod
def from_dict(cls, json_object):
config = DeepSpeedInferenceConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
class DeepSpeedMLPFunction(Function):
@staticmethod
def forward(ctx, input, inter_w, inter_b, config, output_b, output_w, q_scales, q_groups, merge_count, mp_group,
async_op):
if config.q_int8:
intermediate = inference_module.fused_gemm_gelu_int8(input, inter_w, inter_b, config.epsilon, q_scales[2],
(q_groups * (2**merge_count)), config.pre_layer_norm)
output = inference_module.vector_matmul_int8(intermediate, output_w, q_scales[3], q_groups, (merge_count))
else:
mlp_gemm_func = inference_module.fused_gemm_gelu_fp16 if config.fp16 else \
inference_module.fused_gemm_gelu_fp32
output = mlp_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op)
if mp_group is not None and dist.get_world_size(group=mp_group) > 1:
dist.all_reduce(output, group=mp_group, async_op=async_op)
return output + output_b
@staticmethod
def backward(ctx, grad_output):
raise RuntimeError('You are running with DeepSpeed Inference mode. \
Please switch to Training mode for running backward!')
class DeepSpeedMoEMLP(nn.Module):
def __init__(self, config, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False, mp_group=None):
super(DeepSpeedMoEMLP, self).__init__()
self.config = config
self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size))
interm_size = self.config.intermediate_size // (1 if mp_group is None else dist.get_world_size(group=mp_group))
self.inter_w = nn.Parameter(torch.Tensor(self.config.hidden_size, interm_size))
self.inter_b = nn.Parameter(torch.Tensor(interm_size))
self.output_w = nn.Parameter(torch.Tensor((interm_size), self.config.hidden_size))
self.output_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups
self.merge_count = int(math.log2(merge_count))
self.mp_group = mp_group
def forward(self, input, async_op=False):
return DeepSpeedMLPFunction.apply(input, self.inter_w, self.inter_b, self.config, self.output_b, self.output_w,
self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op)
class DeepSpeedMoEInference(nn.Module):
"""Initialize the DeepSpeed MoE Transformer Layer.
Arguments:
layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
layer_id will be 0,1,2...23 when each layer object is instantiated
config: An object of DeepSpeedInferenceConfig
mp_group: Model parallelism group initialized on the modeling side.
quantize_scales: This argument groups all the layers' scales used for quantization
quantize_groups: Number of groups used for quantizing the model
merge_count: Shows the number of model-parallel checkpoints merged before running inference.
We use this argument to control the quantization scale for the model parameters if a bigger
quantize-grouping than 1 is used.
mlp_extra_grouping: This flag is used to show a 2x higher number of groups used for the MLP part
of a Transformer layer. We use this feature for quantization to reduce the convergence impact
for specific downstream tasks.
"""
layer_id = 0
def __init__(self,
config,
mp_group=None,
ep_group=None,
expert_mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super(DeepSpeedMoEInference, self).__init__()
self.config = config
self.config.layer_id = DeepSpeedMoEInference.layer_id
global inference_module
global specialized_mode
if inference_module is None:
specialized_mode = False
# InferenceSpecializedBuilder is not among DeepSpeed provided builder yet, so we infer by builder name string
builder = get_accelerator().create_op_builder("InferenceSpecializedBuilder")
if builder is not None and builder.is_compatible():
inference_module = builder.load()
specialized_mode = True
else:
inference_module = InferenceBuilder().load()
self.config.specialized_mode = specialized_mode
assert self.config.dtype != torch.bfloat16, "DeepSpeed MoE Transformer Inference not yet tested for bfloat support"
DeepSpeedMoEInference.layer_id += 1
self.attention = DeepSpeedSelfAttention(self.config, mp_group, quantize_scales, quantize_groups, merge_count)
self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.norm_w = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.norm_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
if config.mlp_type == 'residual':
self.res_mlp = DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping,
mp_group)
self.res_coef = nn.Parameter(torch.Tensor(self.config.hidden_size, 2))
self.coef_func = inference_module.softmax_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
inference_module.softmax_fp32
self.vector_matmul_func = inference_module.vector_matmul_fp16 if self.config.dtype == torch.float16 else \
inference_module.vector_matmul_fp32
config.mp_size = 1
self.mlp = nn.ModuleList(
DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping, expert_mp_group)
for i in range(self.config.moe_experts))
self.moe_gate = TopKGate(self.config.hidden_size, self.config.global_experts, self.config.k,
self.config.capacity_factor, self.config.eval_capacity_factor,
self.config.min_capacity, self.config.noisy_gate_policy, self.config.drop_tokens,
self.config.use_rts)
self.ep_group = ep_group
self.mp_group = mp_group
self.expert_mp_group = expert_mp_group
print("DeepSpeed MoE Transformer Inference config is ", self.config.__dict__)
self.bias_residual_func = inference_module.bias_residual_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
inference_module.bias_residual_fp32
self.ds_layernorm = inference_module.layer_norm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
inference_module.layer_norm_fp32
self.einsum_sec_sm_ecm = inference_module.einsum_sec_sm_ecm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
inference_module.einsum_sec_sm_ecm_fp32
def res_coef_func(self, inp, async_op):
inp = self.vector_matmul_func(inp, self.res_coef, async_op)
return self.coef_func(inp, torch.empty(1), False, False, False, 256, async_op)
def moe_gate_einsum(self, attention_output):
_, combined_weights, dispatch_mask, _ = self.moe_gate(
attention_output.view(-1, self.config.hidden_size),
None,
)
dispatched_attention = self.einsum_sec_sm_ecm(dispatch_mask.type_as(attention_output),
attention_output.view(-1, self.config.hidden_size))
return dispatched_attention, combined_weights
def expert_exec(self, dispatched_input):
dispatched_input = dispatched_input.reshape(self.config.global_experts // self.config.moe_experts,
self.config.moe_experts, -1, self.config.hidden_size)
chunks = dispatched_input.chunk(self.config.moe_experts, dim=1)
expert_outputs = torch.empty((
self.config.moe_experts,
chunks[0].shape[0],
) + chunks[0].shape[2:],
dtype=dispatched_input.dtype,
device=dispatched_input.device)
for chunk, expert in zip(chunks, range(len(self.mlp))):
expert_outputs[expert] = self.mlp[expert](chunk.view(-1, dispatched_input.shape[-2],
dispatched_input.shape[-1]))
return expert_outputs
def _alltoall(self, dispatched_attention):
if dist.get_world_size(group=self.ep_group) > 1:
dispatched_input = torch.empty_like(dispatched_attention)
dist.all_to_all_single(dispatched_input, dispatched_attention, group=self.ep_group)
return dispatched_input
else:
return dispatched_attention
def scale_expert_output(self, attention_output, expert_output, combined_weights):
combined_output = torch.matmul(
combined_weights.type_as(attention_output).reshape(combined_weights.shape[0], -1),
expert_output.reshape(-1, expert_output.shape[-1]))
return combined_output.reshape(attention_output.shape)
def forward(self,
input,
input_mask=None,
attention_mask=None,
head_mask=None,
layer_past=None,
get_key_value=False,
get_present=False,
encoder_output=None,
enc_dec_attn_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False):
get_present = (get_present or get_key_value or use_cache)
input_mask = input_mask if attention_mask is None else attention_mask
input_type = input.dtype
if (self.config.dtype in [torch.float16, torch.int8]) and input_type == torch.float:
input = input.half()
with torch.no_grad():
attention_output = self.attention(input, input_mask, head_mask, layer_past, get_present,
encoder_hidden_states, encoder_attention_mask, output_attentions,
self.norm_w, self.norm_b)
if get_present:
attention_output, p_key, p_value = attention_output[0:3]
presents = (p_key, p_value)
elif output_attentions:
attention_output, _, _, context_output = attention_output[0:4]
else:
attention_output = attention_output[0]
residual_add = attention_output + self.attention.attn_ob
attention_output = self.ds_layernorm(residual_add, self.attn_nw, self.attn_nb, self.config.epsilon)
if self.config.mlp_type == 'residual':
res_mlp_out = self.res_mlp(attention_output, async_op=True)
res_coef_out = self.res_coef_func(attention_output, async_op=True)
if self.expert_mp_group is not None:
tensor_list = [
torch.empty_like(attention_output) for _ in range(dist.get_world_size(group=self.expert_mp_group))
]
tensor_list[dist.get_rank(group=self.expert_mp_group)] = attention_output
dist.all_gather(tensor_list, attention_output, group=self.expert_mp_group)
attention_output = torch.cat(tensor_list).contiguous()
############## MoE Gating + Experts ###############
dispatched_attention, combined_weights = self.moe_gate_einsum(attention_output)
dispatched_input = self._alltoall(dispatched_attention)
expert_outputs = self.expert_exec(dispatched_input)
expert_output = self._alltoall(expert_outputs)
output = self.scale_expert_output(attention_output, expert_output, combined_weights)
################################################
if self.expert_mp_group is not None:
output = output.split(output.shape[0] // dist.get_world_size(group=self.expert_mp_group),
dim=0)[dist.get_rank(group=self.expert_mp_group)]
if self.config.mlp_type == 'residual':
inference_module.moe_res_matmul(res_mlp_out, res_coef_out, output)
output = self.bias_residual_func(output, residual_add, torch.empty(1))
if not self.config.pre_layer_norm:
output = self.ds_layernorm(output, self.norm_w, self.norm_b, self.config.epsilon)
if input_type != output.dtype:
output = output.to(input_type)
if get_present:
output = (output, presents)
if self.config.return_tuple:
return output if type(output) is tuple else (output, )
else:
return output
| 18,458 | 49.434426 | 131 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/config.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import json
import torch
from deepspeed.utils.types import ActivationFuncType, NormType
class TransformerConfig():
def __init__(self, hidden_size, intermediate_size, heads, num_hidden_layers):
self.layer_id = -1
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.heads = heads
self.num_hidden_layers = num_hidden_layers
class DeepSpeedInferenceConfig(TransformerConfig):
"""Initialize the DeepSpeed Transformer Config.
Arguments:
hidden_size: The hidden size of the transformer layer
intermediate_size: The intermediate size of the feed-forward part of transformer layer
heads: The number of heads in the self-attention of the transformer layer
num_hidden_layers: The number of transformer layers
layer_norm_eps: The epsilon value for the layer norm
local_rank: Optional: The rank of GPU running the transformer kernel, it is not required
to use if the model already set the current device, otherwise need to set it
so that the transformer kernel can work on the right device
mp_size (optional): This argument is mainly used to create the parameters on the kernel side
using model-parallel architecture. If the client model already takes care of this, there is no
need to pass this argument.
fp16: Enable half-precision computation
bf16: Enable bf16 floating point computation
pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture
stochastic_mode: Enable for high performance, please note that this flag has some level of
non-determinism and can produce different results on different runs. However, we have seen
that by enabling it, the pretraining tasks such as BERT are not affected and can obtain
a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend
to turn it off in order to be able to reproduce the same result through the regular kernel execution.
scale_attention: If true, both q and k are scaled by 1/sqrt(attention_heads) before attention computation.
return_tuple: if True, returns the transformer output as a tuple, otherwise returns as a tensor
bigscience_bloom: This flag is added temporarily for supporting the BLOOM-176B model architecture.
use_triton: This flag is to enable triton kernels in inference or not.
"""
def __init__(self,
hidden_size=-1,
intermediate_size=-1,
heads=-1,
num_hidden_layers=-1,
layer_norm_eps=1e-12,
local_rank=-1,
mp_size=1,
dtype=torch.float16,
pre_layer_norm=True,
norm_type=NormType.LayerNorm,
stochastic_mode=False,
scale_attention=True,
triangular_masking=True,
local_attention=False,
window_size=256,
rotary_dim=-1,
rotate_half=False,
rotate_every_two=True,
return_tuple=True,
mlp_after_attn=True,
mlp_act_func_type=ActivationFuncType.GELU,
training_mp_size=1,
bigscience_bloom=False,
max_out_tokens=1024,
min_out_tokens=1,
enable_qkv_quantization=False,
use_mup=False,
scale_attn_by_inverse_layer_idx=False,
return_single_tuple=False,
set_empty_params=False,
transposed_mode=False,
use_triton=False,
triton_autotune=False):
super(DeepSpeedInferenceConfig,
self).__init__(hidden_size, (intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads,
num_hidden_layers)
self.dtype = dtype
self.pre_layer_norm = pre_layer_norm
self.norm_type = norm_type
self.local_rank = local_rank
self.stochastic_mode = stochastic_mode
self.epsilon = layer_norm_eps
self.mp_size = mp_size
self.scale_attention = scale_attention
self.triangular_masking = triangular_masking
self.local_attention = local_attention
self.window_size = window_size
self.rotary_dim = rotary_dim
self.rotate_half = rotate_half
self.rotate_every_two = rotate_every_two
self.return_tuple = return_tuple
self.mlp_after_attn = mlp_after_attn
self.mlp_act_func_type = mlp_act_func_type
self.specialized_mode = False
self.training_mp_size = training_mp_size
self.bigscience_bloom = bigscience_bloom
self.max_out_tokens = max_out_tokens
self.min_out_tokens = min_out_tokens
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
self.enable_qkv_quantization = enable_qkv_quantization
self.use_mup = use_mup
self.return_single_tuple = return_single_tuple
self.set_empty_params = set_empty_params
self.transposed_mode = transposed_mode
self.use_triton = use_triton
self.triton_autotune = triton_autotune
@classmethod
def from_dict(cls, json_object):
config = DeepSpeedInferenceConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
| 5,967 | 44.907692 | 118 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/ds_mlp.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import torch
import torch.nn as nn
from deepspeed import comm as dist
from deepspeed.utils.types import GATED_ACTIVATION_TYPES
from deepspeed.accelerator import get_accelerator
from .op_binding import MLPGemmOp, VectorMatMulOp, GELUGemmOp, ResidualAddOp
class DeepSpeedMLP(nn.Module):
_inter_w_buffers = []
def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False):
super(DeepSpeedMLP, self).__init__()
self.config = config
data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype
data_type_fp = data_type
device = get_accelerator().current_device_name()
proj_factor = 2 if self.config.mlp_act_func_type in GATED_ACTIVATION_TYPES else 1
self.config.intermediate_size = self.config.intermediate_size if self.config.intermediate_size > 0 else 4 * self.config.hidden_size
self.intm_w_sz_per_partition = self.config.intermediate_size * proj_factor // self.config.mp_size
self.intm_o_sz_per_partition = self.config.intermediate_size // self.config.mp_size
if self.config.set_empty_params:
self.attn_nw = None
self.attn_nb = None
self.inter_w = None
self.inter_b = None
self.inter_up_w = None
self.inter_up_b = None
self.inter_gate_w = None
self.inter_gate_b = None
self.output_w = None
self.output_b = None
else:
self.attn_nw = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.attn_nb = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.inter_w = nn.Parameter(torch.empty(self.config.hidden_size,
self.intm_w_sz_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.inter_b = nn.Parameter(torch.empty(self.intm_w_sz_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
self.output_w = nn.Parameter(torch.empty(self.intm_o_sz_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.output_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups
self.merge_count = int(math.log2(merge_count))
self.mp_group = mp_group
self.mlp_gemm_func = MLPGemmOp(config)
self.vector_matmul_func = VectorMatMulOp(config)
self.fused_gemm_gelu = GELUGemmOp(config)
self.residual_add_func = ResidualAddOp(config)
if len(DeepSpeedMLP._inter_w_buffers) == 0:
DeepSpeedMLP._inter_w_buffers = [
torch.empty(self.config.hidden_size, self.intm_w_sz_per_partition, dtype=data_type, device=device),
torch.empty(self.intm_w_sz_per_partition, dtype=data_type_fp, device=device)
]
def _merge_inter_w(self):
inter_w = DeepSpeedMLP._inter_w_buffers[0]
inter_w[:self.intm_w_sz_per_partition, :] = self.inter_up_w # type: ignore
inter_w[self.intm_w_sz_per_partition:, :] = self.inter_gate_w # type: ignore
if self.inter_up_b is not None:
inter_b = DeepSpeedMLP._inter_w_buffers[1]
inter_b[:self.intm_w_sz_per_partition] = self.inter_up_b # type: ignore
inter_b[self.intm_w_sz_per_partition:] = self.inter_gate_b # type: ignore
return DeepSpeedMLP._inter_w_buffers
def forward(self, input, residual, residual_norm, bias):
if self.inter_w is None:
self._inter_w, self._inter_b = self._merge_inter_w()
else:
self._inter_w = self.inter_w
self._inter_b = self.inter_b
residual_add = None
if self.attn_nw is None:
output = self.fused_gemm_gelu(input=residual_norm,
weight=self.inter_w,
bias=self.inter_b,
weight_out=self.output_w)
else:
output, residual_add = self.mlp_gemm_func(input=input,
residual=residual,
weight_interm=self.inter_w,
weight_out=self.output_w,
input_bias=bias,
bias=self.inter_b,
gamma=self.attn_nw,
beta=self.attn_nb)
residual = self.residual_add_func(hidden_state=output,
residual=residual,
add_bias=bias is not None,
attention_output=input,
attention_bias=bias if bias is not None else self.output_b,
final_bias=self.output_b,
residual_add=residual_add)
if self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1:
dist.all_reduce(residual, group=self.mp_group)
return residual
| 6,212 | 48.309524 | 139 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/diffusers_transformer_block.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import torch.nn as nn
from deepspeed import module_inject
from .diffusers_attention import DeepSpeedDiffusersAttention
from .bias_add import nhwc_bias_add
from .diffusers_2d_transformer import Diffusers2DTransformerConfig
from deepspeed.ops.op_builder import InferenceBuilder, SpatialInferenceBuilder
from deepspeed.utils.types import ActivationFuncType
# Ops will be loaded on demand
transformer_cuda_module = None
spatial_cuda_module = None
def load_transformer_module():
global transformer_cuda_module
if transformer_cuda_module is None:
transformer_cuda_module = InferenceBuilder().load()
return transformer_cuda_module
def load_spatial_module():
global spatial_cuda_module
if spatial_cuda_module is None:
spatial_cuda_module = SpatialInferenceBuilder().load()
return spatial_cuda_module
class DeepSpeedDiffusersTransformerBlock(nn.Module):
def __init__(self, equivalent_module: nn.Module, config: Diffusers2DTransformerConfig):
super(DeepSpeedDiffusersTransformerBlock, self).__init__()
self.quantizer = module_inject.GroupQuantizer(q_int8=config.int8_quantization)
# Ensure ops are built by the time we start running
self.config = config
self.ff1_w = self.quantizer.quantize(
nn.Parameter(equivalent_module.ff.net[0].proj.weight.data, requires_grad=False))
self.ff1_b = nn.Parameter(equivalent_module.ff.net[0].proj.bias.data, requires_grad=False)
self.ff2_w = self.quantizer.quantize(nn.Parameter(equivalent_module.ff.net[2].weight.data,
requires_grad=False))
self.ff2_b = nn.Parameter(equivalent_module.ff.net[2].bias.data, requires_grad=False)
self.norm1_g = nn.Parameter(equivalent_module.norm1.weight.data, requires_grad=False)
self.norm1_b = nn.Parameter(equivalent_module.norm1.bias.data, requires_grad=False)
self.norm1_eps = equivalent_module.norm1.eps
self.norm2_g = nn.Parameter(equivalent_module.norm2.weight.data, requires_grad=False)
self.norm2_b = nn.Parameter(equivalent_module.norm2.bias.data, requires_grad=False)
self.norm2_eps = equivalent_module.norm2.eps
self.norm3_g = nn.Parameter(equivalent_module.norm3.weight.data, requires_grad=False)
self.norm3_b = nn.Parameter(equivalent_module.norm3.bias.data, requires_grad=False)
self.norm3_eps = equivalent_module.norm3.eps
self.attn_1 = equivalent_module.attn1
self.attn_2 = equivalent_module.attn2
# Pull the bias in if we can
if isinstance(self.attn_1, DeepSpeedDiffusersAttention):
self.attn_1.do_out_bias = False
self.attn_1_bias = self.attn_1.attn_ob
else:
self.attn_1_bias = nn.Parameter(torch.zeros_like(self.norm2_g), requires_grad=False)
# Pull the bias in if we can
if isinstance(self.attn_2, DeepSpeedDiffusersAttention):
self.attn_2.do_out_bias = False
self.attn_2_bias = self.attn_2.attn_ob
else:
self.attn_2_bias = nn.Paramaeter(torch.zeros_like(self.norm3_g), requires_grad=False)
self.transformer_cuda_module = load_transformer_module()
load_spatial_module()
def forward(self, hidden_states, context=None, timestep=None, **kwargs):
# In v0.12.0 of diffuser, several new kwargs were added. Capturing
# those with kwargs to maintain backward compatibility
# In v0.11.0 of diffusers, the kwarg was changed from 'context' to 'encoder_hidden_states'
# This is so we can support older and newer versions of diffusers
if "encoder_hidden_states" in kwargs and kwargs["encoder_hidden_states"] is not None:
context = kwargs["encoder_hidden_states"]
out_norm_1 = self.transformer_cuda_module.layer_norm(hidden_states, self.norm1_g, self.norm1_b, self.norm1_eps)
out_attn_1 = self.attn_1(out_norm_1)
out_norm_2, out_attn_1 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res(
out_attn_1, self.attn_1_bias, hidden_states, self.norm2_g, self.norm2_b, self.norm2_eps)
out_attn_2 = self.attn_2(out_norm_2, context=context)
out_norm_3, out_attn_2 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res(
out_attn_2, self.attn_2_bias, out_attn_1, self.norm3_g, self.norm3_b, self.norm3_eps)
out_ff1 = nn.functional.linear(out_norm_3, self.ff1_w)
out_geglu = self.transformer_cuda_module.gated_activation(out_ff1, self.ff1_b, ActivationFuncType.GATED_GELU)
out_ff2 = nn.functional.linear(out_geglu, self.ff2_w)
return nhwc_bias_add(out_ff2, self.ff2_b, other=out_attn_2)
| 4,857 | 45.266667 | 119 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .config import DeepSpeedInferenceConfig
from ....model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
from .moe_inference import DeepSpeedMoEInferenceConfig, DeepSpeedMoEInference
| 315 | 34.111111 | 95 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/diffusers_attention.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import torch
from torch.autograd import Function
import torch.nn as nn
from packaging import version as pkg_version
from deepspeed.utils.logging import log_dist
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
# Cuda modules will be imported if needed
inference_module = None
minus_inf = -10000.0
triton_flash_attn = None
def load_triton_flash_attn():
global triton_flash_attn
try:
import triton
except ImportError:
raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`")
if pkg_version.parse(triton.__version__) < pkg_version.parse("2.0"):
raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`")
from .triton_ops import triton_flash_attn
class DeepSpeedDiffusersAttentionFunction(Function):
@staticmethod
def forward(ctx, input, context, input_mask, config, attn_qkvw, attn_qw, attn_kw, attn_vw, attn_qkvb,
num_attention_heads_per_partition, norm_factor, hidden_size_per_partition, attn_ow, attn_ob,
do_out_bias, score_context_func, linear_func, triton_flash_attn_kernel):
def _transpose_for_context(x):
x = x.permute(0, 2, 1, 3)
new_x_layer_shape = x.size()[:-2] + \
(hidden_size_per_partition,)
return x.reshape(*new_x_layer_shape)
def _transpose_for_scores(x):
attention_head_size = x.shape[-1] // num_attention_heads_per_partition
new_x_shape = x.size()[:-1] + (num_attention_heads_per_partition, attention_head_size)
x = x.reshape(*new_x_shape)
x = x.permute(0, 2, 1, 3)
return x.contiguous()
def selfAttention_fp(input, context, input_mask):
if config.fp16 and input.dtype == torch.float32:
input = input.half()
head_size = input.shape[-1] // config.heads
do_flash_attn = (head_size <= 128)
scale = (1 / norm_factor) * (1 / norm_factor)
if do_flash_attn and context is None:
qkv_out = linear_func(input, attn_qkvw, attn_qkvb if attn_qkvb is not None else attn_qkvw, attn_qkvb
is not None, do_flash_attn, config.heads, False)
context_layer = triton_flash_attn_kernel(qkv_out[0], qkv_out[1], qkv_out[2], scale,
input.shape[-2] % 128 == 0)
context_layer = _transpose_for_context(context_layer[:, :, :, :head_size])
else:
do_flash_attn = False
if context is not None:
query = torch.matmul(input, attn_qw)
key = torch.matmul(context, attn_kw)
value = torch.matmul(context, attn_vw)
else:
qkv = torch.matmul(input, attn_qkvw)
query, key, value = qkv.chunk(3, dim=-1)
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
query, key, value = inference_module.pad_transform_fp16(query, key, value, config.heads, do_flash_attn)
attention_scores = (torch.matmul(query, key.transpose(-1, -2)) * scale).softmax(dim=-1)
context_layer = _transpose_for_context(torch.matmul(attention_scores, value))
output = linear_func(context_layer, attn_ow, attn_ob, do_out_bias, False, config.heads, False)
return output
output = selfAttention_fp(input, context, input_mask)
return output
@staticmethod
def backward(ctx, grad_output, grad_output1, grad_output2, grad_output3):
raise RuntimeError('You are running with DeepSpeed Inference mode. \
Please switch to Training mode for running backward!')
class DeepSpeedDiffusersAttention(nn.Module):
"""Initialize the DeepSpeed Transformer Layer.
Arguments:
layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
layer_id will be 0,1,2...23 when each layer object is instantiated
config: An object of DeepSpeedInferenceConfig
"""
layer_id = 0
def __init__(
self,
config,
):
super(DeepSpeedDiffusersAttention, self).__init__()
self.config = config
self.config.layer_id = DeepSpeedDiffusersAttention.layer_id
DeepSpeedDiffusersAttention.layer_id += 1
device = get_accelerator().current_device_name() if config.bigscience_bloom else 'cpu'
qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3
data_type = self.config.dtype
data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
global inference_module
if inference_module is None:
builder = InferenceBuilder()
inference_module = builder.load()
if DeepSpeedDiffusersAttention.layer_id == 1:
log_dist(f"DeepSpeed-Attention config: {self.config.__dict__}", [0])
self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size,
qkv_size_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_kw = nn.Parameter(torch.empty(self.config.hidden_size,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_vw = nn.Parameter(torch.empty(self.config.hidden_size,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_qw = nn.Parameter(torch.empty(self.config.hidden_size,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
out_size_per_partition = self.config.hidden_size // self.config.mp_size
self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.do_out_bias = True
if triton_flash_attn is None:
load_triton_flash_attn()
self.triton_flash_attn_kernel = triton_flash_attn()
self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size
self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size
self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads
self.norm_factor = math.sqrt(math.sqrt(self.config.hidden_size // self.config.heads))
if self.config.scale_attn_by_inverse_layer_idx is True:
self.norm_factor *= math.sqrt(self.config.layer_id + 1)
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191
if self.config.dtype in [torch.float16, torch.int8]:
self.score_context_func = inference_module.softmax_context_fp16
self.linear_func = inference_module.linear_layer_fp16
self.allocate_workspace = inference_module.allocate_workspace_fp16
else:
self.score_context_func = inference_module.softmax_context_fp32
self.linear_func = inference_module.linear_layer_fp32
self.allocate_workspace = inference_module.allocate_workspace_fp32
def forward(self, input, context=None, input_mask=None):
if self.config.layer_id == 0:
self.allocate_workspace(self.config.hidden_size, self.config.heads,
input.size()[1],
input.size()[0], DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False,
0, self.config.max_out_tokens, self.config.min_out_tokens)
output = DeepSpeedDiffusersAttentionFunction.apply(input, context, input_mask, self.config, self.attn_qkvw,
self.attn_qw, self.attn_kw, self.attn_vw, self.attn_qkvb,
self.num_attention_heads_per_partition, self.norm_factor,
self.hidden_size_per_partition, self.attn_ow, self.attn_ob,
self.do_out_bias, self.score_context_func, self.linear_func,
self.triton_flash_attn_kernel)
return output
| 9,830 | 48.903553 | 121 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/base.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..config import DeepSpeedInferenceConfig
from deepspeed.ops.op_builder import InferenceBuilder
class BaseOp(torch.nn.Module):
inference_module = None
def __init__(self, config: DeepSpeedInferenceConfig):
super(BaseOp, self).__init__()
self.config = config
if BaseOp.inference_module is None:
builder = InferenceBuilder()
BaseOp.inference_module = builder.load()
| 536 | 24.571429 | 57 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/softmax.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import torch.nn.functional as F
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
class SoftmaxOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(SoftmaxOp, self).__init__(config)
self.num_attention_heads_per_partition = config.heads // config.mp_size
try:
if self.config.dtype in [torch.float16, torch.int8]:
self.softmax_func = self.inference_module.softmax_fp16
elif self.config.dtype == torch.bfloat16:
self.softmax_func = self.inference_module.softmax_bf16
else:
self.softmax_func = self.inference_module.softmax_fp32
except AttributeError:
self.softmax_func = self.softmax_fallback
def softmax_fallback(self, attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size,
async_op, layer_scale, head_offset, mp_size):
if os.environ.get('DS_KI_FALLBACK') == 'True':
alibi = alibi[head_offset:head_offset + self.num_attention_heads_per_partition]
input_dtype = attn_scores.dtype
if (triangular):
tri = ~torch.tril(torch.ones(attn_scores.size(), device=attn_scores.device)).to(bool)
attn_scores = torch.masked_fill(attn_scores * layer_scale, tri, torch.finfo(input_dtype).min)
if alibi is not None:
attn_scores += alibi
if attn_mask is not None:
# expand atten_mask from two dim into 4 dim, insert two dims in the middle
attn_mask = attn_mask[:, None, None, :]
attn_scores += attn_mask
output = F.softmax(attn_scores, dim=-1, dtype=torch.float32).to(input_dtype)
return output
else:
raise NotImplementedError
def forward(self, attn_scores: torch.Tensor, attn_mask: torch.Tensor, alibi: torch.Tensor, triangular: bool,
recompute: bool, local_attention: bool, window_size: int, async_op: bool, layer_scale: float,
head_offset: int):
output = self.softmax_func(attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size,
async_op, layer_scale, head_offset, self.config.mp_size)
return output
| 2,460 | 44.574074 | 118 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import torch.nn.functional as F
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
import deepspeed
from deepspeed.utils.types import NormType
class QKVGemmOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(QKVGemmOp, self).__init__(config)
try:
if self.config.norm_type == NormType.LayerNorm:
if self.config.dtype in [torch.float16, torch.int8]:
if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16:
from deepspeed.ops.transformer.inference.triton.ops import qkv_gemm_func as _triton_qkv_gemm_func
self.qkv_gemm_func = _triton_qkv_gemm_func
triton_autotune = config.triton_autotune and config.layer_id == 0
if triton_autotune:
__class__._triton_autotune(2, self.config.max_out_tokens, self.config.hidden_size)
else:
self.qkv_gemm_func = self.inference_module.qkv_gemm_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.qkv_gemm_func = self.inference_module.qkv_gemm_bf16
else:
self.qkv_gemm_func = self.inference_module.qkv_gemm_fp32 # type: ignore
elif self.config.norm_type == NormType.RMSNorm:
if self.config.dtype in [torch.float16, torch.int8]:
self.qkv_gemm_func = self.inference_module.rms_qkv_gemm_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.qkv_gemm_func = self.inference_module.rms_qkv_gemm_bf16
else:
self.qkv_gemm_func = self.inference_module.rms_qkv_gemm_fp32 # type: ignore
except AttributeError:
if self.config.norm_type == NormType.LayerNorm:
self.qkv_gemm_func = self.qkv_gemm_fallback
elif self.config.norm_type == NormType.RMSNorm:
self.qkv_gemm_func = self.rms_qkv_gemm_fallback
@staticmethod
def _triton_autotune(min_seqlen, max_seqlen, hidden_size, dtype=torch.float16):
from deepspeed.ops.transformer.inference.triton.matmul_ext import Fp16Matmul, matmul
seqlen = [(min_seqlen + i)
for i in range(0, max_seqlen - min_seqlen + Fp16Matmul._cache_stride + 1, Fp16Matmul._cache_stride)]
Fp16Matmul._read_autotune_table()
for N in seqlen:
A = torch.randn((N, hidden_size), dtype=dtype, device='cuda')
B = torch.randn((hidden_size, 3 * hidden_size), dtype=dtype, device='cuda')
matmul(A, B)
Fp16Matmul._update_autotune_table()
def qkv_gemm_fallback(self, input, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose):
if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose:
inp_norm = F.layer_norm(input, (input.shape[2], ), gamma, beta, eps)
tmp = torch.matmul(inp_norm, weight)
if add_bias:
tmp += bias
output = [tmp, inp_norm]
return output
else:
raise NotImplementedError
def rms_qkv_gemm_fallback(self, input, weight, q_scale, gamma, eps, q_int8, transpose):
raise NotImplementedError
def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, gamma: torch.Tensor,
beta: torch.Tensor):
add_bias = bias is not None
bias = bias if add_bias else torch.empty(1) # type: ignore
q_scale = weight.scale if hasattr(weight, 'scale') else torch.empty(1) # type: ignore
q_int8 = self.config.dtype == torch.int8
if self.config.norm_type == NormType.LayerNorm:
output, norm = self.qkv_gemm_func(input, weight, q_scale, bias, gamma, beta, self.config.epsilon, add_bias,
q_int8, self.config.transposed_mode)
else:
output, norm = self.qkv_gemm_func(input, weight, q_scale, gamma, self.config.epsilon, q_int8,
self.config.transposed_mode)
return output, norm
| 4,369 | 48.101124 | 121 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
import deepspeed
class GELUGemmOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(GELUGemmOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16:
from deepspeed.ops.transformer.inference.triton.ops import fused_gemm_gelu as _triton_fused_gemm_gelu
self.fused_gemm_gelu = _triton_fused_gemm_gelu # type: ignore
else:
self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_bf16 # type: ignore
else:
self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_fp32 # type: ignore
except AttributeError:
self.fused_gemm_gelu = self.gelu_gemm_fallback
def gelu_gemm_fallback(self, input, weight, scale, bias, out, out_scale, dtype, transpose):
raise NotImplementedError
def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, weight_out: torch.Tensor):
output = self.fused_gemm_gelu(
input,
weight,
weight.scale if hasattr(weight, 'scale') else torch.empty(1), # type: ignore
bias,
weight_out,
weight_out.scale if hasattr(weight_out, 'scale') else torch.empty(1), # type: ignore
self.config.dtype == torch.int8,
self.config.transposed_mode)
return output
| 1,867 | 39.608696 | 121 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/softmax_context.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed import comm as dist
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
class SoftmaxContextOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(SoftmaxContextOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
self.softmax_context_func = self.inference_module.softmax_context_fp16
elif self.config.dtype == torch.bfloat16:
self.softmax_context_func = self.inference_module.softmax_context_bf16
else:
self.softmax_context_func = self.inference_module.softmax_context_fp32
except AttributeError:
self.softmax_context_func = self.softmax_context_fallback
def softmax_context_fallback(self, query_key_value, attn_mask, rotary_dim, rotate_half, roteate_every_two, heads,
norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id,
num_layers, alibi):
raise NotImplementedError
def forward(self, query_key_value: torch.Tensor, attn_mask: torch.Tensor, heads: int, norm_factor: float,
no_masking: bool, layer_id: int, num_layers: int, alibi: torch.Tensor):
if alibi is not None:
batch_heads = query_key_value.shape[0] * heads
offset = dist.get_rank() * batch_heads if dist.is_initialized() else 0
alibi = alibi[offset:batch_heads + offset, :, :]
else:
alibi = torch.empty(1)
output = self.softmax_context_func(query_key_value, attn_mask, self.config.rotary_dim, self.config.rotate_half,
self.config.rotate_every_two, heads, norm_factor,
self.config.triangular_masking, self.config.local_attention,
self.config.window_size, no_masking, layer_id, num_layers, alibi)
return output
| 2,131 | 44.361702 | 119 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/linear.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
import deepspeed
class LinearOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(LinearOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16:
from deepspeed.ops.transformer.inference.triton.ops import linear_func as _triton_linear_func
self.linear_func = _triton_linear_func
triton_autotune = config.triton_autotune and config.layer_id == 0
if triton_autotune:
__class__._triton_autotune(2, self.config.max_out_tokens, self.config.hidden_size)
else:
self.linear_func = self.inference_module.linear_layer_fp16
self.linear_func = self.inference_module.linear_layer_fp16
elif self.config.dtype == torch.bfloat16:
self.linear_func = self.inference_module.linear_layer_bf16
else:
self.linear_func = self.inference_module.linear_layer_fp32
except AttributeError:
self.linear_func = self.linear_fallback
def linear_fallback(self, input, weight, bias, add_bias, do_flash_attn, num_heads, transpose):
raise NotImplementedError
def forward(self,
input: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
add_bias: bool,
do_flash_attn: bool,
num_heads: int,
external_cache: bool = None,
num_layers: int = None):
qkv_out = self.linear_func(input, weight, bias, add_bias, do_flash_attn, num_heads,
self.config.transposed_mode)
return qkv_out
@staticmethod
def _triton_autotune(min_seqlen, max_seqlen, hidden_size, dtype=torch.float16):
from deepspeed.ops.transformer.inference.triton.matmul_ext import Fp16Matmul, matmul
seqlen = [(min_seqlen + i)
for i in range(0, max_seqlen - min_seqlen + Fp16Matmul._cache_stride + 1, Fp16Matmul._cache_stride)]
Fp16Matmul._read_autotune_table()
for N in seqlen:
A = torch.randn((N, hidden_size), dtype=dtype, device='cuda')
B = torch.randn((hidden_size, 3 * hidden_size), dtype=dtype, device='cuda')
matmul(A, B)
Fp16Matmul._update_autotune_table()
| 2,683 | 43 | 118 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import Optional
import os
import torch
import torch.nn.functional as F
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
from deepspeed.utils.types import NormType
class MLPGemmOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(MLPGemmOp, self).__init__(config)
try:
if self.config.norm_type == NormType.LayerNorm:
if self.config.dtype in [
torch.float16, torch.int8
]: # non-triton cuda kernel has a higher performance in MLP than mlp_gemm_func in triton.ops
self.mlp_gemm_func = self.inference_module.mlp_gemm_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.mlp_gemm_func = self.inference_module.mlp_gemm_bf16
else:
self.mlp_gemm_func = self.inference_module.mlp_gemm_fp32 # type: ignore
elif self.config.norm_type == NormType.RMSNorm:
if self.config.dtype in [torch.float16, torch.int8]:
self.mlp_gemm_func = self.inference_module.rms_mlp_gemm_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.mlp_gemm_func = self.inference_module.rms_mlp_gemm_bf16
else:
self.mlp_gemm_func = self.inference_module.rms_mlp_gemm_fp32 # type: ignore
except AttributeError:
if self.config.norm_type == NormType.LayerNorm:
self.mlp_gemm_func = self.mlp_gemm_fallback
elif self.config.norm_type == NormType.RMSNorm:
self.mlp_gemm_func = self.rms_mlp_gemm_fallback
def mlp_gemm_fallback(self, input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps,
pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type,
transpose):
if os.environ.get('DS_KI_FALLBACK') == 'True' and mlp_after_attn and not transpose:
residual_add = F.layer_norm(input + residual + input_bias, (input.shape[2], ), gamma, beta,
self.config.epsilon)
tmp = torch.matmul(residual_add, weight_interm)
tmp = F.gelu(tmp + bias)
output = torch.matmul(tmp, weight_out)
return (output, residual_add)
else:
raise NotImplementedError
def rms_mlp_gemm_fallback(self, input, residual, weight_interm, weight_out, gamma, eps, interm_scale, out_scale,
dtype, mlp_act_func_type, transpose):
raise NotImplementedError
def forward(self,
input: torch.Tensor,
residual: torch.Tensor,
weight_interm: torch.Tensor,
weight_out: torch.Tensor,
input_bias: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
gamma: Optional[torch.Tensor] = None,
beta: Optional[torch.Tensor] = None):
if self.config.norm_type == NormType.LayerNorm:
output, residual_add = self.mlp_gemm_func(
input,
residual,
input_bias,
weight_interm,
weight_out,
bias,
gamma,
beta,
self.config.epsilon,
self.config.pre_layer_norm,
self.config.mlp_after_attn,
weight_interm.scale if hasattr(weight_interm, 'scale') else torch.empty(1), # type: ignore
weight_out.scale if hasattr(weight_out, 'scale') else torch.empty(1), # type: ignore
self.config.dtype == torch.int8,
self.config.mlp_act_func_type,
self.config.transposed_mode)
else:
output, residual_add = self.mlp_gemm_func(
input,
residual,
weight_interm,
weight_out,
gamma,
self.config.epsilon,
weight_interm.scale if hasattr(weight_interm, 'scale') else torch.empty(1), # type: ignore
weight_out.scale if hasattr(weight_out, 'scale') else torch.empty(1), # type: ignore
self.config.dtype == torch.int8,
self.config.mlp_act_func_type,
self.config.transposed_mode)
return output, residual_add
| 4,605 | 44.60396 | 116 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .linear import LinearOp
from .vector_matmul import VectorMatMulOp
from .softmax_context import SoftmaxContextOp
from .qkv_gemm import QKVGemmOp
from .softmax import SoftmaxOp
from .mlp_gemm import MLPGemmOp
from .gelu_gemm import GELUGemmOp
from .residual_add import ResidualAddOp
| 382 | 26.357143 | 45 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/residual_add.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
from typing import Optional
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
class ResidualAddOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(ResidualAddOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
self.residual_add_func = self.inference_module.residual_add_bias_fp16
elif self.config.dtype == torch.bfloat16:
self.residual_add_func = self.inference_module.residual_add_bias_bf16
else:
self.residual_add_func = self.inference_module.residual_add_bias_fp32
except AttributeError:
self.residual_add_func = None
try:
self._vector_add = self.inference_module._vector_add
except AttributeError:
self._vector_add = None
def forward(self,
hidden_state: torch.Tensor,
residual: torch.Tensor,
add_bias: bool,
attention_output: Optional[torch.Tensor] = None,
residual_add: Optional[torch.Tensor] = None,
attention_bias: Optional[torch.Tensor] = None,
final_bias: Optional[torch.Tensor] = None):
if self.residual_add_func is not None:
if final_bias is None:
residual = self._vector_add(residual, hidden_state, 1.0 / self.config.mp_size)
else:
if not self.config.pre_layer_norm and residual_add is not None:
# only use residual add if its set and we are not pre layer norm
residual = residual_add
self.residual_add_func(hidden_state, residual, attention_output, attention_bias, final_bias,
self.config.mp_size, self.config.mlp_after_attn, add_bias,
self.config.pre_layer_norm)
else:
# fallback
if os.environ.get('DS_KI_FALLBACK') == 'True' and self.config.mlp_after_attn:
if self.config.pre_layer_norm:
tmp = (residual.float() + attention_output.float() + attention_bias.float() +
final_bias.float()) / self.config.mp_size + hidden_state.float()
else:
tmp = residual.float() + hidden_state.float() + final_bias.float()
input_dtype = hidden_state.dtype
residual = tmp.to(input_dtype)
else:
raise NotImplementedError
return residual
| 2,708 | 40.676923 | 108 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
import deepspeed
class VectorMatMulOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(VectorMatMulOp, self).__init__(config)
try:
if self.config.dtype == torch.float16:
if deepspeed.HAS_TRITON and config.use_triton:
from deepspeed.ops.transformer.inference.triton.ops import vector_matmul_func as _triton_vector_matmul_func
self.vector_matmul_func = _triton_vector_matmul_func
triton_autotune = config.triton_autotune and config.layer_id == 0
if triton_autotune:
__class__._triton_autotune(2, self.config.max_out_tokens, self.config.hidden_size)
else:
self.vector_matmul_func = self.inference_module.vector_matmul_fp16
elif self.config.dtype == torch.int8:
self.vector_matmul_func = self.inference_module.vector_matmul_fp16
elif self.config.dtype == torch.bfloat16:
self.vector_matmul_func = self.inference_module.vector_matmul_bf16
else:
self.vector_matmul_func = self.inference_module.vector_matmul_fp32
except AttributeError:
self.vector_matmul_func = self.vector_matmul_fallback
def vector_matmul_fallback(self, input, weight, async_op, q_scale, q_int8, transpose):
if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose:
return torch.matmul(input, weight)
else:
raise NotImplementedError
def forward(self, input: torch.Tensor, weight: torch.Tensor, async_op: bool = False):
q_scale = weight.scale if hasattr(weight, 'scale') else torch.empty(1)
q_int8 = self.config.dtype == torch.int8
output = self.vector_matmul_func(input, weight, async_op, q_scale, q_int8, self.config.transposed_mode)
return output
@staticmethod
def _triton_autotune(min_seqlen, max_seqlen, hidden_size, dtype=torch.float16):
from deepspeed.ops.transformer.inference.triton.matmul_ext import Fp16Matmul, matmul
seqlen = [(min_seqlen + i)
for i in range(0, max_seqlen - min_seqlen + Fp16Matmul._cache_stride + 1, Fp16Matmul._cache_stride)]
Fp16Matmul._read_autotune_table()
for N in seqlen:
A = torch.randn((N, hidden_size), dtype=dtype, device='cuda')
B = torch.randn((hidden_size, hidden_size), dtype=dtype, device='cuda')
matmul(A, B)
Fp16Matmul._update_autotune_table()
| 2,750 | 45.627119 | 127 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/triton/softmax.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import triton
import triton.language as tl
'''
softmax
modified the triton kernel in
https://github.com/openai/triton/blob/34817ecc954a6f4ca7b4dfb352fdde1f8bd49ca5/python/tutorials/02-fused-softmax.py
'''
@triton.jit
def softmax_kernel(output_ptr, input_ptr, stride, n_cols, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
row_start_ptr = input_ptr + row_idx * stride
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + col_offsets
row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float('inf')).to(tl.float32)
row_minus_max = row - tl.max(row, axis=0)
numerator = tl.exp(row_minus_max)
denominator = tl.sum(numerator, axis=0)
softmax_output = numerator / denominator
output_row_start_ptr = output_ptr + row_idx * stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, softmax_output, mask=col_offsets < n_cols)
@triton.jit
def masked_softmax_kernel(output_ptr, input_ptr, stride, mask_ptr, mask_stride, n_cols, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
row_start_ptr = input_ptr + row_idx * stride
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + col_offsets
mask_ptrs = mask_ptr + col_offsets + row_idx * mask_stride # mask_stride is 0 for 1d mask
row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float('inf')).to(tl.float32)
mask = tl.load(mask_ptrs, mask=col_offsets < n_cols, other=0).to(tl.float32)
row_minus_max = row - tl.max(row, axis=0)
row_minus_max = row_minus_max + mask
numerator = tl.exp(row_minus_max)
denominator = tl.sum(numerator, axis=0)
softmax_output = numerator / denominator
output_row_start_ptr = output_ptr + row_idx * stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, softmax_output, mask=col_offsets < n_cols)
def softmax(input: torch.Tensor, mask: torch.Tensor = None, dim=-1) -> torch.Tensor:
assert input.is_contiguous()
assert (dim == -1) or (dim == len(input.shape) - 1), "Only dim=-1 is supported"
use_mask = False if mask is None else True
input_arg = input.view(-1, input.shape[-1])
n_rows, n_cols = input_arg.shape
BLOCK_SIZE = max(triton.next_power_of_2(n_cols), 2)
num_warps = 4
if BLOCK_SIZE >= 2048:
num_warps = 8
if BLOCK_SIZE >= 4096:
num_warps = 16
# Allocate output
output = torch.empty_like(input)
if use_mask:
assert mask.is_contiguous()
mask = mask.view(-1, mask.shape[-1])
mask_stride = mask.shape[-1] if mask.shape[-2] > 1 else 0
masked_softmax_kernel[(n_rows, )](
output,
input,
input_arg.stride(0),
mask,
mask_stride,
n_cols,
num_warps=num_warps,
BLOCK_SIZE=BLOCK_SIZE,
)
else:
softmax_kernel[(n_rows, )](
output,
input,
input_arg.stride(0),
n_cols,
num_warps=num_warps,
BLOCK_SIZE=BLOCK_SIZE,
)
return output
| 3,208 | 34.655556 | 115 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/triton/matmul_ext.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import triton
import os
from filelock import FileLock
import deepspeed.ops.transformer.inference.triton.triton_matmul_kernel as triton_matmul_kernel
import pickle
from io import open
import deepspeed
from pathlib import Path
import atexit
# -----------------------------------------------------------------------------
# util class/functions for triton
def _default_cache_dir():
return os.path.join(Path.home(), ".triton", "autotune")
def bias_add_activation(C, bias=None, activation=""):
if bias is not None:
C += bias
# activation
if activation == "relu":
relu = torch.nn.Relu()
C = relu(C)
elif activation == "leaky_relu":
leaky_relu = torch.nn.LeakyReLU(0.01)
C = leaky_relu(C)
elif activation == "gelu":
sigmoid = torch.nn.Sigmoid()
C = sigmoid(1.702 * C) * C
elif activation == "sigmoid":
sigmoid = torch.nn.Sigmoid()
C = sigmoid(C)
return C
class AutotuneCacheManager:
"""
Cache manager for autotune
"""
def __init__(self, key):
self.key = key
self.file_path = None
self.lock_path = None
# if caching is enabled, get the lock and bin path
self.cache_dir = os.environ.get('TRITON_CACHE_DIR', _default_cache_dir())
if self.cache_dir:
os.makedirs(self.cache_dir, exist_ok=True)
if self.cache_dir:
self.file_path = os.path.join(self.cache_dir, self.key + ".pickle")
self.lock_path = self.file_path + ".lock"
def has_file(self):
return self.file_path and os.path.exists(self.file_path)
def put(self, table):
if self.file_path:
assert self.lock_path is not None
with FileLock(self.lock_path):
with open(self.file_path + ".tmp", 'wb') as handle:
pickle.dump(table, handle)
os.rename(self.file_path + ".tmp", self.file_path)
def load(self):
if os.path.exists(self.file_path):
with open(self.file_path, 'rb') as handle:
loaded_dict = pickle.load(handle)
return loaded_dict
else:
return None
# -----------------------------------------------------------------------------
# triton matmul class
class MatmulExt(torch.autograd.Function):
"""
a wrapper class that can call different triton matmul kernels depending on the input parameters
"""
@staticmethod
def forward(A, B, bias=None, activation="", use_triton=True, update_autotune_table=False):
"""
A: input, activation matrix A
B: input, weight matrix B
"""
matmul = None
quantize_activation = False
Batch = 0
if len(A.shape) == 3: # if A is 3d-tensor where batch index is given as 0-axis
assert A.is_contiguous(), "matrix A must be contiguous"
Batch, M, K = A.shape
A = A.view(-1, K)
# fp16 activation and fp16 weight matmul into fp16 output
matmul = fp16_matmul
C = matmul.forward(A, B, use_triton=use_triton, bias=bias, activation=activation)
if matmul and update_autotune_table:
matmul._update_autotune_table()
if Batch > 0:
C = C.view(Batch, M, -1)
return C
class TritonMatmul(torch.autograd.Function):
"""
triton matmul kernel superclass
"""
def __init__(self):
pass
@staticmethod
def _ref_forward(A, B, ref_dtype=torch.float32):
C = torch.matmul(A.type(ref_dtype), B.type(ref_dtype))
return C
@staticmethod
def _read_autotune_table(cache_key, triton_kernel):
cache_manager = AutotuneCacheManager(cache_key)
table = cache_manager.load()
if table:
triton_kernel.cache = table
@staticmethod
def _write_autotune_table(cache_key, triton_kernel):
cache_manager = AutotuneCacheManager(cache_key)
cache_manager.put(triton_kernel.cache)
@staticmethod
def _update_autotune_table(cache_key, triton_kernel):
cache_manager = AutotuneCacheManager(cache_key)
autotune_table = cache_manager.load()
if autotune_table is None:
autotune_table = dict()
autotune_table.update(triton_kernel.cache) # always overwrite with the new autotune results
cache_manager = AutotuneCacheManager(cache_key)
cache_manager.put(autotune_table)
@staticmethod
def forward(
A,
B,
ref_dtype=torch.float32, # fp32 only
bias=None,
activation=""):
C = torch.matmul(A.type(ref_dtype), B.type(ref_dtype))
C = bias_add_activation(C, bias, activation)
return C
class Fp16Matmul(TritonMatmul):
"""
fp16 matrix multiplication kernel
dtypes: fp16 x fp16 = fp16
"""
_2d_kernel = triton_matmul_kernel._fp_matmul
_4d_kernel = triton_matmul_kernel.matmul_4d_kernel
_cache_stride = 32
def __init__(self, read_cache=True):
super().__init__()
if read_cache:
__class__._read_autotune_table()
def skip_autotune(self):
__class__._2d_kernel.configs = [__class__._2d_kernel.configs[0]]
__class__._4d_kernel.configs = [__class__._4d_kernel.configs[0]]
@staticmethod
def forward(A, B, use_triton=True, bias=None, activation=""):
if use_triton:
device = A.device
# handle non-contiguous inputs if necessary
if A.stride(0) > 1 and A.stride(1) > 1:
A = A.contiguous()
if B.stride(0) > 1 and B.stride(1) > 1:
B = B.contiguous()
# checks constraints
assert A.shape[1] == B.shape[0], "incompatible dimensions"
M, K = A.shape
_, N = B.shape
# allocates output
C = torch.empty((M, N), device=device, dtype=A.dtype)
# accumulator types
ACC_TYPE = triton.language.float32 if A.dtype in [torch.float16, torch.bfloat16, torch.float32
] else triton.language.int32
# launch kernel
grid = lambda META: (triton.cdiv(M, META['BLOCK_M']) * triton.cdiv(N, META['BLOCK_N']), META['SPLIT_K'])
__class__._2d_kernel[grid](A,
B,
C,
M,
N,
K,
bias,
A.stride(0),
A.stride(1),
B.stride(0),
B.stride(1),
C.stride(0),
C.stride(1),
M // __class__._cache_stride,
N // __class__._cache_stride,
K // __class__._cache_stride,
GROUP_M=8,
ACC_TYPE=ACC_TYPE,
BIAS_ADD=(0 if bias is None else 1),
ACTIVATION=activation)
else:
C = torch.matmul(A, B)
return C
@staticmethod
def _matmul_4d(a, b):
assert a.shape[-1] == b.shape[-2], "incompatible dimensions"
assert a.is_contiguous(), "matrix A must be contiguous"
assert b.is_contiguous(), "matrix B must be contiguous"
B, H, M, K = a.shape
B, H, K, N = b.shape
assert K > 1, "inner-product dimension K should be larger than 1"
c = torch.empty((B, H, M, N), device=a.device, dtype=a.dtype)
grid = lambda META: (
triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),
H,
B,
)
__class__._4d_kernel[grid](
a,
b,
c,
M,
N,
K,
M // __class__._cache_stride,
N // __class__._cache_stride,
K // __class__._cache_stride,
a.stride(0),
a.stride(1),
a.stride(2),
a.stride(3),
b.stride(0),
b.stride(1),
b.stride(2),
b.stride(3),
c.stride(0),
c.stride(1),
c.stride(2),
c.stride(3),
scale=-1.0,
MASK=False,
)
return c
@staticmethod
def _score_4d_matmul(input, head_size, input_mask, scale=-1.0):
assert input.is_contiguous(), "matrix input must be contiguous"
batches = input.shape[0]
d_model = input.shape[-1] // 3
num_of_heads = d_model // head_size
q = input[:, :, :d_model]
k = input[:, :, d_model:d_model * 2]
q = q.view(batches, -1, num_of_heads, head_size)
k = k.view(batches, -1, num_of_heads, head_size)
# checks constraints
assert q.shape == k.shape, "incompatible dimensions"
B, M, H, K = q.shape
B, N, H, K = k.shape
assert K > 1, "inner-product dimension K should be larger than 1"
# allocates output
output = torch.empty((B, H, M, N), device=q.device, dtype=q.dtype)
grid = lambda META: (
triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]),
H,
B,
)
__class__._4d_kernel[grid](
q,
k,
output,
M,
N,
K,
M // __class__._cache_stride,
N // __class__._cache_stride,
K // __class__._cache_stride,
q.stride(0),
q.stride(2),
q.stride(1),
q.stride(3),
k.stride(0),
k.stride(2),
k.stride(3),
k.stride(1),
output.stride(0),
output.stride(1),
output.stride(2),
output.stride(3),
scale=scale,
MASK=False,
)
return output
@staticmethod
def _context_4d_matmul(prob, input, head_size):
assert prob.is_contiguous(), "matrix prob must be contiguous"
assert input.is_contiguous(), "matrix input must be contiguous"
batches = input.shape[0]
d_model = input.shape[-1] // 3
num_of_heads = d_model // head_size
v = input[:, :, d_model * 2:]
v = v.view(batches, -1, num_of_heads, head_size)
# checks constraints
assert (prob.shape[0] == v.shape[0] and prob.shape[1] == v.shape[2] and prob.shape[2] == v.shape[1]
and prob.shape[3] == v.shape[1]), "incompatible dimensions"
B, H, M, K = prob.shape
B, K, H, N = v.shape
assert K > 1, "inner-product dimension K should be larger than 1"
# allocates output
output = torch.empty((B, M, H, N), device=v.device, dtype=v.dtype)
grid = lambda META: (
triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]),
H,
B,
)
__class__._4d_kernel[grid](
prob,
v,
output,
M,
N,
K,
M // __class__._cache_stride,
N // __class__._cache_stride,
K // __class__._cache_stride,
prob.stride(0),
prob.stride(1),
prob.stride(2),
prob.stride(3),
v.stride(0),
v.stride(2),
v.stride(1),
v.stride(3),
# Here we also transpose the output when writing to memory.
output.stride(0),
output.stride(2),
output.stride(1),
output.stride(3),
scale=-1,
MASK=False,
)
return output.view(batches, -1, d_model)
@staticmethod
def _ref_forward(A, B, ref_dtype=torch.float32, bias=None, activation=""):
C = torch.matmul(A.type(ref_dtype), B.type(ref_dtype))
C = bias_add_activation(C, bias, activation)
return C
@staticmethod
def _check_parity(A,
B,
output_dtype,
SA=None,
SB=None,
qblock_size=None,
ref_dtype=torch.float32,
tol=0.01,
use_triton=True,
bias=None,
activation=""):
torch_output = __class__._ref_forward(A, B, ref_dtype=ref_dtype, bias=bias, activation=activation)
triton_output = __class__.forward(A, B, use_triton=use_triton, bias=bias, activation=activation)
assert triton.testing.allclose(triton_output.cpu().type(torch_output.dtype), torch_output.cpu(), tol=tol)
print(f"{__class__.__name__}: PASSed the parity check")
return triton_output, torch_output
@staticmethod
def _read_autotune_table():
TritonMatmul._read_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel)
TritonMatmul._read_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel)
@staticmethod
def _write_autotune_table():
TritonMatmul._write_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel)
TritonMatmul._write_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel)
@staticmethod
def _update_autotune_table():
TritonMatmul._update_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel)
TritonMatmul._update_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel)
# -----------------------------------------------------------------------------
# mapping
if deepspeed.HAS_TRITON:
fp16_matmul = Fp16Matmul()
matmul = MatmulExt.forward
matmul_4d = fp16_matmul._matmul_4d
score_4d_matmul = fp16_matmul._score_4d_matmul
context_4d_matmul = fp16_matmul._context_4d_matmul
else:
fp16_matmul = None
matmul = None
matmul_4d = None
score_4d_matmul = None
context_4d_matmul = None
@atexit.register
def matmul_ext_update_autotune_table():
if deepspeed.HAS_TRITON:
fp16_matmul._update_autotune_table()
| 14,630 | 31.878652 | 116 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/triton/gelu.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import triton
import triton.language as tl
from deepspeed.accelerator import get_accelerator
@triton.jit
def gelu_functor(x):
# Using approximation introduces greater parity errors.
# return tl.sigmoid(1.702 * x) * x
return x * 0.5 * (1.0 + tl.libdevice.erf(x / 1.41421356237))
@triton.jit
def gelu_kernel(x_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
output = gelu_functor(x)
tl.store(output_ptr + offsets, output, mask=mask)
def gelu(activations: torch.Tensor) -> torch.Tensor:
assert activations.is_contiguous()
assert get_accelerator().on_accelerator(activations)
output = torch.empty_like(activations)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
gelu_kernel[grid](activations, output, n_elements, BLOCK_SIZE=1024)
return output
| 1,157 | 28.692308 | 73 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/triton/mlp.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import math
import torch.nn as nn
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
from ..op_binding import MLPGemmOp, VectorMatMulOp, GELUGemmOp, ResidualAddOp
class TritonMLP(nn.Module):
def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False):
super(TritonMLP, self).__init__()
self.config = config
data_type = self.config.dtype
data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
device = get_accelerator().current_device_name()
self.attn_nw = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.attn_nb = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
intm_size_per_partition = self.config.intermediate_size // self.config.mp_size
self.inter_w = nn.Parameter(torch.empty(self.config.hidden_size,
intm_size_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.inter_b = nn.Parameter(torch.empty(intm_size_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
self.output_w = nn.Parameter(torch.empty(intm_size_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.output_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups
self.merge_count = int(math.log2(merge_count))
self.mp_group = mp_group
self.mlp_gemm_func = MLPGemmOp(config)
self.vector_matmul_func = VectorMatMulOp(config)
self.fused_gemm_gelu = GELUGemmOp(config)
self.residual_add_func = ResidualAddOp(config)
def forward(self, input, residual, residual_norm, bias):
residual_add = None
if self.attn_nw is None:
output = self.fused_gemm_gelu(input=residual_norm,
weight=self.inter_w,
bias=self.inter_b,
weight_out=self.output_w)
else:
output, residual_add = self.mlp_gemm_func(input=input,
residual=residual,
input_bias=bias,
weight_interm=self.inter_w,
weight_out=self.output_w,
bias=self.inter_b,
gamma=self.attn_nw,
beta=self.attn_nb)
residual = self.residual_add_func(hidden_state=output,
residual=residual,
attention_output=input,
attention_bias=bias if bias is not None else self.output_b,
final_bias=self.output_b,
add_bias=bias is not None,
residual_add=residual_add)
if self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1:
dist.all_reduce(residual, group=self.mp_group)
return residual
| 4,225 | 50.536585 | 114 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/triton/triton_matmul_kernel.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import triton
import triton.language as tl
from .gelu import gelu_functor
import torch
AUTOTUNE_TOP_K = 10
SKIP_AUTOTUNE = False
def _fp16_matmul_prune_config(configs, named_args, skip_autotune=SKIP_AUTOTUNE):
if skip_autotune:
configs = [configs[0]]
else:
configs = triton.ops.matmul_perf_model.early_config_prune(configs, named_args)
return configs
"""
fp16 matmul implementation is adapted from triton matmul:
https://github.com/openai/triton/blob/34817ecc954a6f4ca7b4dfb352fdde1f8bd49ca5/python/triton/ops/matmul.py
"""
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({
'BLOCK_M': 128,
'BLOCK_N': 256,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=3, num_warps=8),
triton.Config({
'BLOCK_M': 256,
'BLOCK_N': 128,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=3, num_warps=8),
triton.Config({
'BLOCK_M': 256,
'BLOCK_N': 64,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=4, num_warps=4),
triton.Config({
'BLOCK_M': 64,
'BLOCK_N': 256,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=4, num_warps=4),
triton.Config({
'BLOCK_M': 128,
'BLOCK_N': 128,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=4, num_warps=4),
triton.Config({
'BLOCK_M': 128,
'BLOCK_N': 64,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=4, num_warps=4),
triton.Config({
'BLOCK_M': 64,
'BLOCK_N': 128,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=4, num_warps=4),
triton.Config({
'BLOCK_M': 128,
'BLOCK_N': 32,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=4, num_warps=4),
triton.Config({
'BLOCK_M': 64,
'BLOCK_N': 32,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=5, num_warps=2),
],
key=['CACHE_M', 'CACHE_N', 'CACHE_K'],
prune_configs_by={
'early_config_prune': _fp16_matmul_prune_config,
'perf_model': None,
'top_k': AUTOTUNE_TOP_K
},
)
@triton.heuristics({
'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] * args['SPLIT_K']) == 0,
})
@triton.jit
def _fp_matmul(
A,
B,
C,
M,
N,
K,
bias,
stride_am,
stride_ak,
stride_bk,
stride_bn,
stride_cm,
stride_cn,
CACHE_M,
CACHE_N,
CACHE_K,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
GROUP_M: tl.constexpr,
SPLIT_K: tl.constexpr,
EVEN_K: tl.constexpr,
ACC_TYPE: tl.constexpr,
BIAS_ADD: tl.constexpr,
ACTIVATION: tl.constexpr,
):
# matrix multiplication
pid = tl.program_id(0)
pid_z = tl.program_id(1)
grid_m = (M + BLOCK_M - 1) // BLOCK_M
grid_n = (N + BLOCK_N - 1) // BLOCK_N
# re-order program ID for better L2 performance
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + (pid % group_size)
pid_n = (pid % width) // (group_size)
# do matrix multiplication
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
# pointers
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
for k in range(K, 0, -BLOCK_K * SPLIT_K):
if EVEN_K:
a = tl.load(A)
b = tl.load(B)
else:
a = tl.load(A, mask=rk[None, :] < k, other=0.)
b = tl.load(B, mask=rk[:, None] < k, other=0.)
acc += tl.dot(a, b)
A += BLOCK_K * SPLIT_K * stride_ak
B += BLOCK_K * SPLIT_K * stride_bk
# bias addition
if BIAS_ADD:
bias_offset = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
bias_ptr = bias + bias_offset
b = tl.load(bias_ptr, mask=bias_offset < N)
acc = acc + b[None, :]
# activation
if ACTIVATION == "relu":
acc = tl.where(acc >= 0, acc, 0)
elif ACTIVATION == "leaky_relu":
acc = tl.where(acc >= 0, acc, 0.01 * acc)
elif ACTIVATION == "gelu":
#acc = tl.sigmoid(1.702 * acc) * acc
acc = gelu_functor(acc)
elif ACTIVATION == "sigmoid":
acc = tl.sigmoid(acc) # sigmoid
acc = acc.to(C.dtype.element_ty)
# rematerialize rm and rn to save registers
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
# handles write-back with reduction-splitting
if SPLIT_K == 1:
tl.store(C, acc, mask=mask)
else:
tl.atomic_add(C, acc, mask=mask)
def matmul_4d_prune_config(configs, named_args, skip_autotune=SKIP_AUTOTUNE):
if skip_autotune:
configs = [configs[0]]
else:
device = torch.cuda.current_device() #ignore-cuda
capability = torch.cuda.get_device_capability() #ignore-cuda
# BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages
dtsize = named_args['a_ptr'].element_size()
dtype = named_args['a_ptr'].dtype
# make sure we have enough smem
pruned_configs = []
for config in configs:
kw = config.kwargs
BLOCK_M, BLOCK_N, BLOCK_K, num_stages = \
kw['BLOCK_SIZE_M'], kw['BLOCK_SIZE_N'], kw['BLOCK_SIZE_K'], config.num_stages
triton.compiler.init_cuda_utils()
max_shared_memory = triton.compiler.cuda_utils.get_device_properties(device)["max_shared_mem"]
required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize
if required_shared_memory <= max_shared_memory:
pruned_configs.append(config)
configs = pruned_configs
return configs
@triton.autotune(
configs=[
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 64,
"GROUP_SIZE_M": 8
},
num_stages=1, # this is mainly for unit test, to minimize the share memory usage
num_warps=8),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=5,
num_warps=2,
),
triton.Config(
{
"BLOCK_SIZE_M": 32,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=5,
num_warps=2,
),
],
key=['CACHE_M', 'CACHE_N', 'CACHE_K'],
prune_configs_by={
'early_config_prune': matmul_4d_prune_config,
'perf_model': None,
'top_k': AUTOTUNE_TOP_K
},
)
@triton.jit
def matmul_4d_kernel(
# Pointers to matrices
a_ptr,
b_ptr,
c_ptr,
# Matrix dimensions
M,
N,
K,
CACHE_M,
CACHE_N,
CACHE_K,
stride_ab,
stride_ah,
stride_am,
stride_ak,
stride_bb,
stride_bh,
stride_bk,
stride_bn,
stride_cb,
stride_ch,
stride_cm,
stride_cn,
scale,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
MASK: tl.constexpr,
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
pid = tl.program_id(axis=0)
head = tl.program_id(axis=1)
batch = tl.program_id(axis=2)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
if MASK:
if (pid_m + 1) * BLOCK_SIZE_M - 1 < pid_n * BLOCK_SIZE_N:
c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=c_ptr.dtype.element_ty) - float("inf")
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = (c_ptr + batch * stride_cb + head * stride_ch + stride_cm * offs_cm[:, None] +
stride_cn * offs_cn[None, :])
tl.store(c_ptrs, c)
return
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = (a_ptr + batch * stride_ab + head * stride_ah +
(offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak))
b_ptrs = (b_ptr + batch * stride_bb + head * stride_bh +
(offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn))
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, K, BLOCK_SIZE_K):
a_mask = (offs_am[:, None] < M) & (offs_k[None, :] + k < K)
b_mask = (offs_k[:, None] + k < K) & (offs_bn[None, :] < N)
a = tl.load(a_ptrs, mask=a_mask, other=0.)
b = tl.load(b_ptrs, mask=b_mask, other=0.)
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
c = accumulator.to(c_ptr.dtype.element_ty)
if scale > 0:
c = c * scale.to(c_ptr.dtype.element_ty)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
if MASK:
c += tl.where(offs_cm[:, None] >= offs_cn[None, :], 0, float("-inf"))
c_ptrs = (c_ptr + batch * stride_cb + head * stride_ch + stride_cm * offs_cm[:, None] +
stride_cn * offs_cn[None, :])
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
| 11,738 | 30.055556 | 106 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/triton/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .residual_add import residual_add_bias
from .layer_norm import layer_norm, layer_norm_residual
from .gelu import gelu
from .softmax import softmax
from .ops import *
from .matmul_ext import fp16_matmul, matmul_4d, score_4d_matmul, context_4d_matmul
| 350 | 28.25 | 82 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/triton/ops.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
from deepspeed.ops.op_builder import InferenceBuilder
import deepspeed.ops.transformer.inference.triton.matmul_ext as matmul_ext
from deepspeed.ops.transformer.inference.triton.layer_norm import layer_norm, layer_norm_residual
inference_module = None
def vector_matmul_func(input, weight, async_op, q_scale, q_int8, transposed_mode):
assert not transposed_mode and not async_op and not q_int8
return matmul_ext.matmul(input, weight, bias=None, activation="", use_triton=True)
def fused_gemm_gelu(input,
weight,
weight_scale,
bias,
weight_out,
weight_out_scale,
epsilon,
pre_layer_norm,
q_int8,
async_op,
transposed_mode,
use_triton_ln=True):
assert not transposed_mode
# activation
activation = "gelu"
# intermediate fc in FF
intm_out = matmul_ext.matmul(input, weight, bias=bias, activation=activation, use_triton=True)
# output fc in FF
ff_out = matmul_ext.matmul(
intm_out,
weight_out,
bias=None,
activation="", # bias added layer with residual_add + bias + layerNorm layer
use_triton=True)
return ff_out
def linear_func(input, weight, bias, add_bias, do_flash_attn, num_heads, transposed_mode=False):
assert not transposed_mode and not do_flash_attn
qkv_out = matmul_ext.matmul(input, weight, bias=(bias if add_bias else None), activation="", use_triton=True)
return qkv_out
def mlp_gemm_func(input,
residual,
input_bias,
weight_interm,
weight_out,
bias,
gamma,
beta,
epsilon,
pre_layer_norm,
mlp_after_attn,
weight_interm_scale,
weight_out_scale,
q_int8,
mlp_act_func_type,
transposed_mode,
use_triton_ln=True):
assert not transposed_mode
# residual add and layerNorm after attention
if use_triton_ln:
mlp_input = layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon)
else:
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
mlp_input = inference_module._layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon)
# activation
if deepspeed.utils.types.ActivationFuncType(mlp_act_func_type) == deepspeed.utils.types.ActivationFuncType.GELU:
activation = "gelu"
elif deepspeed.utils.types.ActivationFuncType(mlp_act_func_type) == deepspeed.utils.types.ActivationFuncType.ReLU:
activation = "relu"
else:
activation = ""
# intermediate fc in FF
intm_out = matmul_ext.matmul(mlp_input, weight_interm, bias=bias, activation=activation, use_triton=True)
# output fc in FF
ff_out = matmul_ext.matmul(
intm_out,
weight_out,
bias=None,
activation="", # bias added layer with residual_add + bias + layerNorm layer
use_triton=True)
return ff_out, mlp_input
def qkv_gemm_func(
input,
weight,
q_scale,
bias,
gamma,
beta,
epsilon,
add_bias,
q_int8,
transposed_mode=False,
use_triton_ln=True,
):
assert not transposed_mode
# residual add and layerNorm after attention
if use_triton_ln:
qkv_input = layer_norm(input, gamma, beta, epsilon)
else:
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
qkv_input = inference_module.layer_norm(input, gamma, beta, epsilon)
qkv_out = matmul_ext.matmul(qkv_input, weight, bias=(bias if add_bias else None), activation="", use_triton=True)
return qkv_out, qkv_input
| 4,125 | 30.257576 | 118 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/triton/layer_norm.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import triton
import triton.language as tl
'''
layer-normalization
modified the triton kernel in
https://github.com/openai/triton/blob/34817ecc954a6f4ca7b4dfb352fdde1f8bd49ca5/python/tutorials/05-layer-norm.py
'''
@triton.jit
def layer_norm_kernel(
Out,
A,
Weight,
Bias,
stride,
N,
eps,
BLOCK_SIZE: tl.constexpr,
):
# position of elements processed by this program
row = tl.program_id(0)
Out += row * stride
A += row * stride
# compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
a = tl.where(cols < N, a - mean, 0.0)
_var += a * a
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# multiply by weight and add bias
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
weight = tl.load(Weight + cols, mask=mask)
bias = tl.load(Bias + cols, mask=mask)
a = tl.load(A + cols, mask=mask, other=0.0).to(tl.float32)
a_hat = (a - mean) * rstd
out = a_hat * weight + bias
# # write-back
tl.store(Out + cols, out, mask=mask)
@triton.jit
def layer_norm_residual_kernel(
Out,
A,
Residual,
ln_input,
Weight,
Bias,
stride,
N,
eps,
BLOCK_SIZE: tl.constexpr,
):
# position of elements processed by this program
row = tl.program_id(0)
Out += row * stride
A += row * stride
Residual += row * stride
ln_input += row * stride
# compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
res = tl.load(Residual + cols, mask=cols < N, other=0.0).to(tl.float32)
a = a + res
tl.store(ln_input + cols, a, mask=cols < N)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(ln_input + cols, mask=cols < N, other=0.0).to(tl.float32)
a = tl.where(cols < N, a - mean, 0.0)
_var += a * a
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# multiply by weight and add bias
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
weight = tl.load(Weight + cols, mask=mask)
bias = tl.load(Bias + cols, mask=mask)
a = tl.load(ln_input + cols, mask=mask, other=0.0).to(tl.float32)
a_hat = (a - mean) * rstd
out = a_hat * weight + bias
# write-back
tl.store(Out + cols, out, mask=mask)
@triton.jit
def layer_norm_residual_bias_kernel(
Out,
A,
Residual,
InputBias,
ln_input,
Weight,
Bias,
stride,
N,
eps,
BLOCK_SIZE: tl.constexpr,
):
# position of elements processed by this program
row = tl.program_id(0)
Out += row * stride
A += row * stride
Residual += row * stride
ln_input += row * stride
# compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
res = tl.load(Residual + cols, mask=cols < N, other=0.0).to(tl.float32)
b = tl.load(InputBias + cols, mask=cols < N, other=0.0).to(tl.float32)
a = a + b + res
tl.store(ln_input + cols, a, mask=cols < N)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(ln_input + cols, mask=cols < N, other=0.0).to(tl.float32)
a = tl.where(cols < N, a - mean, 0.0)
_var += a * a
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# multiply by weight and add bias
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
weight = tl.load(Weight + cols, mask=mask)
bias = tl.load(Bias + cols, mask=mask)
a = tl.load(ln_input + cols, mask=mask, other=0.0).to(tl.float32)
a_hat = (a - mean) * rstd
out = a_hat * weight + bias
# write-back
tl.store(Out + cols, out, mask=mask)
def layer_norm(a, weight, bias, eps):
assert a.is_contiguous()
assert weight.is_contiguous()
assert bias.is_contiguous()
# allocate output
out = torch.empty_like(a)
# reshape input data into 2D tensor
a_arg = a.view(-1, a.shape[-1])
M, N = a_arg.shape
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // a.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
BLOCK_SIZE = max(BLOCK_SIZE, 128)
BLOCK_SIZE = min(BLOCK_SIZE, 4096)
BLOCK_SIZE = BLOCK_SIZE if N <= 4096 else 8192
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
layer_norm_kernel[(M, )](
out,
a_arg,
weight,
bias,
a_arg.stride(0),
N,
eps,
BLOCK_SIZE=BLOCK_SIZE,
num_warps=num_warps,
)
return out
def layer_norm_residual(a, input_bias, residual, weight, bias, eps):
assert a.is_contiguous()
assert weight.is_contiguous()
assert bias.is_contiguous()
assert residual.is_contiguous()
# allocate output and scratch-pad for residual addition
out = torch.empty_like(a)
ln_input = torch.empty_like(a)
# reshape input data into 2D tensor
a_arg = a.view(-1, a.shape[-1])
residual = residual.view(-1, residual.shape[-1])
M, N = a_arg.shape
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // a.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
BLOCK_SIZE = max(BLOCK_SIZE, 128)
BLOCK_SIZE = min(BLOCK_SIZE, 4096)
BLOCK_SIZE = BLOCK_SIZE if N <= 4096 else 8192
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
if input_bias is None:
layer_norm_residual_kernel[(M, )](
out,
a_arg,
residual,
ln_input,
weight,
bias,
a_arg.stride(0),
N,
eps,
BLOCK_SIZE=BLOCK_SIZE,
num_warps=num_warps,
)
else:
layer_norm_residual_bias_kernel[(M, )](
out,
a_arg,
residual,
input_bias,
ln_input,
weight,
bias,
a_arg.stride(0),
N,
eps,
BLOCK_SIZE=BLOCK_SIZE,
num_warps=num_warps,
)
return out
| 7,512 | 29.052 | 112 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/triton/residual_add.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import triton
import triton.language as tl
from deepspeed.accelerator import get_accelerator
@triton.jit
def residual_add_bias_kernel(
hidden_state_ptr,
residual_ptr,
attn_output_ptr,
hidden_state_size,
attn_bias_ptr,
final_bias_ptr,
bias_size,
output_ptr,
mp_size: tl.constexpr,
mlp_after_attn: tl.constexpr,
pre_attn_norm: tl.constexpr,
add_attn_bias: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < hidden_state_size
bias_offsets = offsets % bias_size
bias_mask = bias_offsets < bias_size
tl_hidden_state = tl.load(hidden_state_ptr + offsets, mask=mask)
tl_residual = tl.load(residual_ptr + offsets, mask=mask)
tl_attn_output = tl.load(attn_output_ptr + offsets, mask=mask)
tl_attn_bias = tl.load(attn_bias_ptr + bias_offsets, mask=bias_mask)
tl_final_bias = tl.load(final_bias_ptr + bias_offsets, mask=bias_mask)
if mlp_after_attn:
if pre_attn_norm:
output = tl_hidden_state + (tl_residual + tl_final_bias + tl_attn_output + tl_attn_bias) / mp_size
else:
output = tl_hidden_state + tl_residual + tl_final_bias
else:
output = tl_hidden_state + tl_attn_output + (tl_residual + tl_final_bias) / mp_size
if add_attn_bias:
output += tl_attn_bias / mp_size
tl.store(output_ptr + offsets, output, mask=mask)
def residual_add_bias(hidden_state: torch.Tensor, residual: torch.Tensor, attn_output: torch.Tensor,
attn_bias: torch.Tensor, final_bias: torch.Tensor, mp_size: int, mlp_after_attn: bool,
add_attn_bias: bool, pre_attn_norm: bool):
# check that all tensors are on the same device
assert get_accelerator().on_accelerator(hidden_state) \
and get_accelerator().on_accelerator(residual) \
and get_accelerator().on_accelerator(attn_output) \
and get_accelerator().on_accelerator(attn_bias) \
and get_accelerator().on_accelerator(final_bias)
# check that all tensors have the same dtype
assert hidden_state.dtype == residual.dtype == attn_output.dtype \
== attn_bias.dtype == final_bias.dtype
# check that all tensors have the right shape
assert hidden_state.shape == residual.shape == attn_output.shape
assert attn_bias.shape == final_bias.shape
assert attn_bias.shape[0] == hidden_state.shape[2]
output = torch.empty_like(hidden_state)
hidden_state_size = output.numel()
bias_size = attn_bias.numel()
grid = lambda meta: (triton.cdiv(hidden_state_size, meta['BLOCK_SIZE']), )
residual_add_bias_kernel[grid](hidden_state, residual, attn_output, hidden_state_size,\
attn_bias, final_bias, bias_size, output, mp_size, mlp_after_attn, pre_attn_norm, \
add_attn_bias, \
BLOCK_SIZE=1024)
return output
| 3,118 | 34.044944 | 110 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/transformer/inference/triton/attention.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import torch
import torch.nn as nn
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
from deepspeed.ops.transformer.inference.op_binding import LinearOp, VectorMatMulOp, SoftmaxContextOp, QKVGemmOp
from deepspeed.ops.transformer.inference.triton import (
softmax,
score_4d_matmul,
context_4d_matmul,
)
minus_inf = -10000.0
class TritonSelfAttention(nn.Module):
num_layers = 0
def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1, qkv_merging=False):
super(TritonSelfAttention, self).__init__()
self.config = config
data_type = self.config.dtype
data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
assert data_type_fp == torch.half, "triton supports fp16 data_type_fp"
self.config.layer_id = TritonSelfAttention.num_layers
TritonSelfAttention.num_layers = TritonSelfAttention.num_layers + 1
device = get_accelerator().current_device_name() #if config.bigscience_bloom else 'cpu'
assert config.mp_size == 1, "mp_size has to be 1 with triton attention yet"
if self.config.set_empty_params:
self.attn_qw = None
self.attn_qb = None
self.attn_kw = None
self.attn_kb = None
self.attn_vw = None
self.attn_vb = None
self.attn_qkvw = None
self.attn_qkvb = None
self.attn_ow = None
self.attn_ob = None
else:
qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3
self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size,
qkv_size_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
# self-ouput weights
out_size_per_partition = self.config.hidden_size // self.config.mp_size
self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size
self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size
self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads
self.mp_group = mp_group
self.use_flash = False
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups
self.merge_count = int(math.log2(merge_count))
self.norm_factor = math.sqrt(self.config.hidden_size // self.config.heads)
if not config.use_mup:
self.norm_factor = math.sqrt(self.norm_factor)
if self.config.scale_attn_by_inverse_layer_idx is True:
self.norm_factor *= math.sqrt(self.config.layer_id + 1)
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191
triton_autotune = self.config.triton_autotune and self.config.layer_id == 0
self.qkv_func = QKVGemmOp(config)
self.score_context_func = SoftmaxContextOp(config)
self.linear_func = LinearOp(config)
self.vector_matmul_func = VectorMatMulOp(config)
self.hidden_size = config.hidden_size
self.head_size = config.hidden_size // config.heads
self.scale = (1 / self.norm_factor / self.norm_factor if self.config.scale_attention else 1.0
) # making it back to 1/sqrt(head_size)
self.triangular_masking = self.config.triangular_masking
# triton autotune table update for score/context matmul
if triton_autotune:
print(f"running triton autotune for attention")
__class__._triton_autotune(2, self.config.max_out_tokens, self.head_size, self.config.hidden_size,
self.triangular_masking, self.scale)
@staticmethod
def _triton_autotune(min_seqlen,
max_seqlen,
head_size,
hidden_size,
triangular_masking,
scale,
dtype=torch.float16):
from deepspeed.ops.transformer.inference.triton.matmul_ext import Fp16Matmul, score_4d_matmul, context_4d_matmul
seqlen = [(min_seqlen + i)
for i in range(0, max_seqlen - min_seqlen + Fp16Matmul._cache_stride + 1, Fp16Matmul._cache_stride)]
Fp16Matmul._read_autotune_table()
for N in seqlen:
qkv = torch.randn((1, N, 3 * hidden_size), dtype=dtype, device='cuda')
output = score_4d_matmul(qkv, head_size, triangular_masking, scale)
context_4d_matmul(output, qkv, head_size)
Fp16Matmul._update_autotune_table()
def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi):
if isinstance(qkv_out, list):
qkv_out = qkv_out[0]
no_masking = input_mask is None
if no_masking:
input_mask = torch.empty(1)
attn_key_value = self.score_context_func(
query_key_value=qkv_out,
attn_mask=((1 - input_mask).to(qkv_out.dtype) *
minus_inf) if input_mask.dtype == torch.int64 else input_mask,
heads=self.num_attention_heads_per_partition,
norm_factor=(1 / self.norm_factor if self.config.scale_attention else 1.0),
no_masking=no_masking,
layer_id=self.config.layer_id,
num_layers=TritonSelfAttention.num_layers,
alibi=alibi)
context_layer, key_layer, value_layer = attn_key_value
return context_layer, key_layer, value_layer
def forward(
self,
input,
input_mask,
head_mask=None,
layer_past=None,
get_present=False, # not used
encoder_hidden_states=None, # not used
encoder_attention_mask=None, # not used
triangularutput_attentions=False, # not used
norm_w=None,
norm_b=None,
alibi=None,
use_triton_attention=True):
if not self.config.pre_layer_norm:
qkv_out = self.linear_func(input=input,
weight=self.attn_qkvw,
bias=self.attn_qkvb,
add_bias=self.attn_qkvb is not None,
do_flash_attn=False,
num_heads=self.num_attention_heads_per_partition,
num_layers=TritonSelfAttention.num_layers)
qkv = qkv_out
else:
qkv_out = self.qkv_func(input=input,
weight=self.attn_qkvw,
bias=(self.attn_qkvb if self.attn_qkvb is not None else norm_b),
gamma=norm_w,
beta=norm_b)
qkv = qkv_out[0]
if use_triton_attention and (alibi is None):
context_layer = compute_attention(qkv=qkv,
input_mask=input_mask,
scale=self.scale,
layer_past=layer_past,
alibi=alibi,
head_size=self.head_size,
use_triton_flash=self.use_flash,
use_cuda_flash=False,
triangular=self.triangular_masking)
key_layer, value_layer = qkv[:, :, self.hidden_size:2 * self.hidden_size], qkv[:, :, 2 * self.hidden_size:]
else:
context_layer, key_layer, value_layer = self.ds_compute_attention(qkv_out=qkv_out,
input_mask=input_mask,
layer_past=layer_past,
alibi=alibi)
output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow)
inp_norm = qkv_out[-1]
if self.config.mlp_after_attn and self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1:
dist.all_reduce(output, group=self.mp_group)
return (output, key_layer, value_layer, context_layer, inp_norm)
global inference_module
def compute_attention(qkv,
input_mask,
layer_past,
alibi,
scale,
head_size,
triangular=False,
use_cuda_flash=False,
use_triton_flash=False,
use_ds_attention=False):
if isinstance(qkv, list):
qkv = qkv[0]
#assert layer_past is None, "layer_past not supported in triton yet"
assert alibi is None, "layer_past not supported in alibi yet"
output = score_4d_matmul(qkv, head_size, triangular, scale)
if triangular:
output = softmax(output)
else:
output = softmax(output, input_mask)
output = context_4d_matmul(output, qkv, head_size)
return output
| 10,335 | 43.93913 | 121 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/lamb/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .fused_lamb import FusedLamb
| 130 | 17.714286 | 38 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/ops/lamb/fused_lamb.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from NVIDIA/apex/optimizer/fused_adam and implements the LAMB optimizer
"""
import types
import torch
from deepspeed.ops.op_builder import FusedLambBuilder
class FusedLamb(torch.optim.Optimizer):
"""Implements the LAMB algorithm. Currently GPU-only.
LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes.
https://arxiv.org/abs/1904.00962
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
bias_correction (bool, optional): bias correction (default: True)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
max_grad_norm (float, optional): value used to clip global grad norm
(default: 0.0)
max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0)
min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01)
amsgrad (boolean, optional): NOT SUPPORTED in FusedLamb!
"""
def __init__(self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.,
max_grad_norm=0.,
max_coeff=10.0,
min_coeff=0.01,
amsgrad=False):
self.fused_lamb_cuda = FusedLambBuilder().load()
if amsgrad:
raise RuntimeError('FusedLamb does not support the AMSGrad variant.')
defaults = dict(lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm,
max_coeff=max_coeff,
min_coeff=min_coeff)
super(FusedLamb, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
self.lamb_coeffs = []
def step(self, closure=None, grads=None, output_params=None, scale=1., grad_norms=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
if output_params is None:
output_params_group = [None] * len(self.param_groups)
elif isinstance(output_params, types.GeneratorType):
output_params_group = [output_params]
elif type(output_params[0]) != list:
output_params_group = [output_params]
else:
output_params_group = output_params
if grad_norms is None:
grad_norms = [None] * len(self.param_groups)
#remove the previous coeffs
del self.lamb_coeffs[:]
for group, grads_this_group, output_params_this_group, grad_norm_group in zip(
self.param_groups, grads_group, output_params_group, grad_norms):
if grads_this_group is None:
grads_this_group = [None] * len(group['params'])
if output_params_this_group is None:
output_params_this_group = [None] * len(group['params'])
if grad_norm_group is None:
grad_norm_group = [None] * len(group['params'])
elif not isinstance(grad_norm_group, list):
grad_norm_group = [grad_norm_group]
bias_correction = 1 if group['bias_correction'] else 0
for p, grad, output_param, grad_norm in zip(group['params'], grads_this_group, output_params_this_group,
grad_norm_group):
# compute combined scale factor for this group
combined_scale = scale
if group['max_grad_norm'] > 0:
# norm is in fact norm*scale
clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm']
if clip > 1:
combined_scale = clip * scale
#note: p.grad should not ever be set for correct operation of mixed precision optimizer that sometimes sends None gradients
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('FusedLamb does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
max_coeff = group['max_coeff']
min_coeff = group['min_coeff']
state['step'] += 1
out_p = torch.tensor([], dtype=torch.float) if output_param is None else output_param
lamb_coeff = self.fused_lamb_cuda.lamb(p.data, out_p, exp_avg, exp_avg_sq, grad, group['lr'], beta1,
beta2, max_coeff, min_coeff, group['eps'], combined_scale,
state['step'], self.eps_mode, bias_correction,
group['weight_decay'])
self.lamb_coeffs.append(lamb_coeff)
return loss
def get_lamb_coeffs(self):
lamb_coeffs = [lamb_coeff.item() for lamb_coeff in self.lamb_coeffs]
return lamb_coeffs
| 7,815 | 43.662857 | 139 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .transformers.ds_transformer import DeepSpeedTransformerInference
from .transformers.clip_encoder import DSClipEncoder
| 220 | 26.625 | 70 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/features/cuda_graph.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import ABC, abstractmethod
class CUDAGraph(ABC):
def __init__(self, enable_cuda_graph=False):
super().__init__()
self.enable_cuda_graph = enable_cuda_graph
@abstractmethod
def _create_cuda_graph(self):
"""
Create CUDA graph(s)
"""
raise NotImplementedError
@abstractmethod
def _graph_replay(self):
"""
Replay CUDA graph(s)
"""
raise NotImplementedError
| 563 | 19.142857 | 50 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/features/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
| 140 | 22.5 | 44 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/diffusers/vae.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..features.cuda_graph import CUDAGraph
class DSVAE(CUDAGraph, torch.nn.Module):
def __init__(self, vae, enable_cuda_graph=True):
super().__init__(enable_cuda_graph=enable_cuda_graph)
self.vae = vae
self.config = vae.config
self.device = self.vae.device
self.dtype = self.vae.dtype
self.vae.requires_grad_(requires_grad=False)
self.decoder_cuda_graph_created = False
self.encoder_cuda_graph_created = False
self.all_cuda_graph_created = False
def _graph_replay_decoder(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_decoder_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_decoder_kwargs[k].copy_(kwargs[k])
self._decoder_cuda_graph.replay()
return self.static_decoder_output
def _decode(self, x, return_dict=True):
return self.vae.decode(x, return_dict=return_dict)
def _create_cuda_graph_decoder(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._decode(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._decoder_cuda_graph = torch.cuda.CUDAGraph()
self.static_decoder_inputs = inputs
self.static_decoder_kwargs = kwargs
with torch.cuda.graph(self._decoder_cuda_graph):
self.static_decoder_output = self._decode(*self.static_decoder_inputs, **self.static_decoder_kwargs)
self.decoder_cuda_graph_created = True
def decode(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.decoder_cuda_graph_created:
outputs = self._graph_replay_decoder(*inputs, **kwargs)
else:
self._create_cuda_graph_decoder(*inputs, **kwargs)
outputs = self._graph_replay_decoder(*inputs, **kwargs)
return outputs
else:
return self._decode(*inputs, **kwargs)
def _graph_replay_encoder(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_encoder_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_encoder_kwargs[k].copy_(kwargs[k])
self._encoder_cuda_graph.replay()
return self.static_encoder_output
def _encode(self, x, return_dict=True):
return self.vae.encode(x, return_dict=return_dict)
def _create_cuda_graph_encoder(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._encode(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._encoder_cuda_graph = torch.cuda.CUDAGraph()
self.static_encoder_inputs = inputs
self.static_encoder_kwargs = kwargs
with torch.cuda.graph(self._encoder_cuda_graph):
self.static_encoder_output = self._encode(*self.static_encoder_inputs, **self.static_encoder_kwargs)
self.encoder_cuda_graph_created = True
def encode(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.encoder_cuda_graph_created:
outputs = self._graph_replay_encoder(*inputs, **kwargs)
else:
self._create_cuda_graph_encoder(*inputs, **kwargs)
outputs = self._graph_replay_encoder(*inputs, **kwargs)
return outputs
else:
return self._encode(*inputs, **kwargs)
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[k].copy_(kwargs[k])
self._all_cuda_graph.replay()
return self.static_output
def forward(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.cuda_graph_created:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
return outputs
else:
return self._forward(*inputs, **kwargs)
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._forward(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._all_cuda_graph = torch.cuda.CUDAGraph()
self.static_inputs = inputs
self.static_kwargs = kwargs
with torch.cuda.graph(self._all_cuda_graph):
self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)
self.all_cuda_graph_created = True
def _forward(self, sample, timestamp, encoder_hidden_states, return_dict=True):
return self.vae(sample, timestamp, encoder_hidden_states, return_dict)
| 6,025 | 38.907285 | 112 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/diffusers/unet.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..features.cuda_graph import CUDAGraph
class DSUNet(CUDAGraph, torch.nn.Module):
def __init__(self, unet, enable_cuda_graph=True):
super().__init__(enable_cuda_graph=enable_cuda_graph)
self.unet = unet
# SD pipeline accesses this attribute
self.in_channels = unet.in_channels
self.device = self.unet.device
self.dtype = self.unet.dtype
self.config = self.unet.config
self.fwd_count = 0
self.unet.requires_grad_(requires_grad=False)
self.unet.to(memory_format=torch.channels_last)
self.cuda_graph_created = False
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[k].copy_(kwargs[k])
self._cuda_graphs.replay()
return self.static_output
def forward(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.cuda_graph_created:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
return outputs
else:
return self._forward(*inputs, **kwargs)
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._forward(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._cuda_graphs = torch.cuda.CUDAGraph()
self.static_inputs = inputs
self.static_kwargs = kwargs
with torch.cuda.graph(self._cuda_graphs):
self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)
self.cuda_graph_created = True
def _forward(self, sample, timestamp, encoder_hidden_states, return_dict=True, cross_attention_kwargs=None):
if cross_attention_kwargs:
return self.unet(sample,
timestamp,
encoder_hidden_states,
return_dict,
cross_attention_kwargs=cross_attention_kwargs)
else:
return self.unet(sample, timestamp, encoder_hidden_states, return_dict)
| 2,792 | 36.743243 | 112 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/diffusers/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
| 140 | 22.5 | 44 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/transformers/ds_transformer.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import torch.nn as nn
from deepspeed import comm as dist
from deepspeed.utils.logging import log_dist
from deepspeed.ops.transformer.inference.ds_mlp import DeepSpeedMLP
from deepspeed.ops.transformer.inference.ds_attention import DeepSpeedSelfAttention, BloomSelfAttention
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
import deepspeed
if deepspeed.HAS_TRITON:
from deepspeed.ops.transformer.inference.triton.mlp import TritonMLP
from deepspeed.ops.transformer.inference.triton.attention import TritonSelfAttention
inference_module = None
class DeepSpeedTransformerInference(nn.Module):
"""Initialize the DeepSpeed Transformer Layer.
Arguments:
layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
layer_id will be 0,1,2...23 when each layer object is instantiated
config: An object of DeepSpeedInferenceConfig
mp_group: Model parallelism group initialized on the modeling side.
quantize_scales: This argument groups all the layers' scales used for quantization
quantize_groups: Number of groups used for quantizing the model
merge_count: Shows the number of model-parallel checkpoints merged before running inference.
We use this argument to control the quantization scale for the model parameters if a bigger
quantize-grouping than 1 is used.
mlp_extra_grouping: This flag is used to show a 2x higher number of groups used for the MLP part
of a Transformer layer. We use this feature for quantization to reduce the convergence impact
for specific downstream tasks.
"""
layer_id = 0
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super(DeepSpeedTransformerInference, self).__init__()
self.config = config
self.config.layer_id = DeepSpeedTransformerInference.layer_id
DeepSpeedTransformerInference.layer_id += 1
data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype
global inference_module
if inference_module is None:
builder = InferenceBuilder()
inference_module = builder.load()
if DeepSpeedTransformerInference.layer_id == 1:
log_dist(f"DeepSpeed-Inference config: {self.config.__dict__}", [0])
if deepspeed.HAS_TRITON and self.config.use_triton:
log_dist(f"Injecting Triton kernels ...", [0])
if self.config.bigscience_bloom:
self.attention = BloomSelfAttention(self.config, mp_group, quantize_scales, quantize_groups, merge_count)
assert not self.config.use_triton
else:
if deepspeed.HAS_TRITON and self.config.use_triton:
self.attention = TritonSelfAttention(self.config)
else:
self.attention = DeepSpeedSelfAttention(self.config, mp_group, quantize_scales, quantize_groups,
merge_count)
if deepspeed.HAS_TRITON and self.config.use_triton:
self.mlp = TritonMLP(self.config)
else:
self.mlp = DeepSpeedMLP(self.config, mp_group, quantize_scales, quantize_groups, merge_count,
mlp_extra_grouping)
device = get_accelerator().current_device_name() # if config.bigscience_bloom else 'cpu'
if self.config.set_empty_params:
self.norm_w = None
self.norm_b = None
else:
self.norm_w = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device),
requires_grad=False)
self.norm_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device),
requires_grad=False)
self.layer_past = None
try:
if config.dtype == torch.float32:
self.allocate_workspace = inference_module.allocate_workspace_fp32
elif config.dtype == torch.bfloat16:
self.allocate_workspace = inference_module.allocate_workspace_bf16
else:
self.allocate_workspace = inference_module.allocate_workspace_fp32
self._alloc_workspace = True
except AttributeError:
self.allocate_workspace = None
self._alloc_workspace = False
@classmethod
def reset_cache(cls):
if inference_module is not None:
inference_module.reset_cache()
def forward(
self,
input=None,
input_mask=None,
attention_mask=None,
attn_mask=None,
head_mask=None,
layer_past=None,
get_key_value=False,
get_present=False,
encoder_output=None,
enc_dec_attn_mask=None,
x=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
alibi=None,
output_attentions=False,
# TODO(arashb): 'layer_head_mask' and 'past_key_value' are only added to satisfy the OPT models API.
# This needs to be redesigned later!
layer_head_mask=None,
past_key_value=None,
**kwargs):
if x is not None:
input = x
if "hidden_states" in kwargs:
input = kwargs["hidden_states"]
input_mask = (input_mask if attn_mask is None else attn_mask) if attention_mask is None else attention_mask
# Allocate memory only on first layer forward
if self.config.layer_id == 0 and self._alloc_workspace:
self.allocate_workspace(self.config.hidden_size, self.config.heads,
input.size()[1],
input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size,
self.config.bigscience_bloom,
dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens,
self.config.min_out_tokens)
self._alloc_workspace = False
get_present = (get_present or get_key_value or use_cache)
input_mask = input_mask if attention_mask is None else attention_mask
# We set the prev key/value to None when there is a prompt
if input.shape[1] > 1:
self.layer_past = None
layer_past = layer_past if layer_past is not None else self.layer_past
head_mask = layer_head_mask if layer_head_mask is not None else head_mask
attn_mask = None
if isinstance(input, tuple):
attn_mask = input[1]
input = input[0]
input_type = input.dtype
if (self.config.dtype in [torch.float16, torch.bfloat16, torch.int8]) \
and input.dtype == torch.float:
target_dtype = torch.half if self.dtype == torch.int8 else self.dtype
input = input.to(target_dtype)
with torch.no_grad():
attention_output, key, value, context_outputtn_ctx, inp_norm = \
self.attention(input,
input_mask,
head_mask,
layer_past,
get_present,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
self.norm_w,
self.norm_b,
alibi)
presents = (key, value)
self.layer_past = presents if layer_past is None else None
output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob)
if not self.config.pre_layer_norm:
output = inference_module.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon)
output = output.to(input_type)
if get_present:
output = (output, presents)
if self.config.return_single_tuple:
return (output, )
elif self.config.return_tuple:
return output if type(output) is tuple else (output, attn_mask)
else:
return output
| 8,909 | 43.55 | 117 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/transformers/clip_encoder.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.accelerator import get_accelerator
from ..features.cuda_graph import CUDAGraph
class DSClipEncoder(CUDAGraph, torch.nn.Module):
def __init__(self, enc, enable_cuda_graph=False):
super().__init__(enable_cuda_graph=enable_cuda_graph)
enc.text_model._build_causal_attention_mask = self._build_causal_attention_mask
self.enc = enc
self.device = self.enc.device
self.dtype = self.enc.dtype
self.cuda_graph_created = [False, False]
self.static_inputs = [None, None]
self.static_kwargs = [None, None]
self.static_output = [None, None]
self._cuda_graphs = [None, None]
self.iter = 0
self.config = self.enc.config
def _build_causal_attention_mask(self, bsz, seq_len, dtype):
mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype, device=get_accelerator().current_device_name())
mask.fill_(torch.tensor(torch.finfo(dtype).min))
mask.triu_(1)
mask = mask.unsqueeze(1)
return mask
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[self.iter][i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[self.iter][k].copy_(kwargs[k])
self._cuda_graphs[self.iter].replay()
return self.static_output[self.iter]
def forward(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.cuda_graph_created[self.iter]:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
self.iter = (self.iter + 1) % 2
return outputs
else:
return self.enc(*inputs, **kwargs)
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._forward(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._cuda_graphs[self.iter] = torch.cuda.CUDAGraph()
self.static_inputs[self.iter] = inputs
self.static_kwargs[self.iter] = kwargs
with torch.cuda.graph(self._cuda_graphs[self.iter]):
self.static_output[self.iter] = self._forward(*self.static_inputs[self.iter],
**self.static_kwargs[self.iter])
self.cuda_graph_created[self.iter] = True
def _forward(self, *inputs, **kwargs):
return self.enc(*inputs, **kwargs)
| 3,045 | 38.051282 | 110 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/transformers/ds_base.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch.nn as nn
class DeepSpeedTransformerBase(nn.module):
def __init__(self):
pass
# this would be the new clean base class that will replace DeepSpeedTransformerInference.
# we currently don't know how this will look like but keeping it here as a placeholder.
| 388 | 23.3125 | 93 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/transformers/ds_megatron_gpt.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
class DeepSpeedMegatronGPTInference(DeepSpeedTransformerInference):
"""Initialize the DeepSpeed Megatron GPT Transformer Layer.
"""
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
| 682 | 31.52381 | 109 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/transformers/ds_bloom.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
class DeepSpeedBloomInference(DeepSpeedTransformerInference):
"""Initialize the DeepSpeed Bloom Transformer Layer.
"""
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
| 669 | 30.904762 | 109 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/transformers/ds_gpt.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
class DeepSpeedGPTInference(DeepSpeedTransformerInference):
"""Initialize the DeepSpeed GPT Transformer Layer.
"""
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
| 665 | 30.714286 | 109 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/transformers/ds_bert.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
class DeepSpeedBERTInference(DeepSpeedTransformerInference):
"""Initialize the DeepSpeed BERT Transformer Layer.
"""
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
| 667 | 30.809524 | 109 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/transformers/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
| 140 | 22.5 | 44 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/model_implementations/transformers/ds_opt.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
class DeepSpeedOPTInference(DeepSpeedTransformerInference):
"""Initialize the DeepSpeed OPT Transformer Layer.
"""
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
| 665 | 30.714286 | 109 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/nebula/constants.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#########################################
# nebula
#########################################
# Nebula. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
NEBULA_FORMAT = '''
nebula should be enabled as:
"session_params": {
"nebula": {
"enabled": true,
"persistent_storage_path": "/foo/bar",
"persistent_time_interval": 100,
"num_of_version_in_retention": 2,
"enable_nebula_load": true
}
}
'''
NEBULA = "nebula"
NEBULA_ENABLED = "enabled"
NEBULA_ENABLED_DEFAULT = False
# There is a case where customer want to load the checkpoint saved
# by raw torch. Because nebula cannot load torch checkpoint directly
# as they have different folder structures to bring the gap for
# loading(the data are totally same in bytes for torch and nebula
# saving).
# In this case, we must disable nebula load to use raw torch load.
# Customer can just set NEBULA_ENABLE_NEBULA_LOAD to False. Then use
# original way of deepspeed to load, i.e. set the value of "--load".
NEBULA_ENABLE_NEBULA_LOAD = "enable_nebula_load"
NEBULA_ENABLE_NEBULA_LOAD_DEFAULT = True
# When you want to resume the previous checkpoint saved by nebula,
# you can set NEBULA_LOAD_PATH as the parent folder of checkpoint.
# If NEBULA_LOAD_PATH is None, the NEBULA_PERSISTENT_STORAGE_PATH
# will be the default path to load.
NEBULA_LOAD_PATH = "nebula_load_path"
NEBULA_LOAD_PATH_DEFAULT = None
# Nebula will save the checkpoint under NEBULA_LOAD_PATH in the
# asynchronous way.
NEBULA_PERSISTENT_STORAGE_PATH = "persistent_storage_path"
NEBULA_PERSISTENT_STORAGE_PATH_DEFAULT = None
# Time interval to trigger the nebula persistence.
NEBULA_PERSISTENT_TIME_INTERVAL = "persistent_time_interval"
NEBULA_PERSISTENT_TIME_INTERVAL_DEFAULT = 100
# Checkpoint number which will be kept in memory. Let us say,
# if the value is 2. Then we have checkpoints 1 and 2 are ready
# now. When it comes to checkpoint 3, the 1 will be removed if
# 1 has been persisted to disk.
NEBULA_NUM_OF_VERSION_IN_RETENTION = "num_of_version_in_retention"
NEBULA_NUM_OF_VERSION_IN_RETENTION_DEFAULT = 2
# Nebula envs
NEBULA_EXPORT_ENVS = [
'DLTS_JOB_ID', 'DLTS_NUM_WORKER', 'NEBULA_PERSISTENT_STORAGE_PATH', 'NEBULA_PERSISTENT_TIME_INTERVAL',
'AML_RUN_ID', 'AZUREML_RUN_TOKEN', 'AZUREML_WORKSPACE_SCOPE', 'AZUREML_EXPERIMENT_SCOPE',
'AZUREML_RUN_HISTORY_SERVICE_ENDPOINT', 'AZUREML_RUN_ID', 'NEBULA_MEMORY_BUFFER_SIZE',
'AZUREML_PARAMETER_ITPJOB_NAME', 'FC_TASKROLE_NAME', 'FC_TASK_INDEX', 'MASTER_HOST', 'LOCAL_HOST',
'AZUREML_BLOB_ACCOUNT_NAME', 'AZUREML_BLOB_ACCOUNT_KEY'
]
# ITP env files
DLTS_POD_ENV_PATH = '/dlts-runtime/env/pod.env'
| 2,786 | 36.662162 | 106 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/nebula/config.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.runtime.config_utils import get_scalar_param, DeepSpeedConfigObject
from deepspeed.nebula.constants import *
class DeepSpeedNebulaConfig(DeepSpeedConfigObject):
def __init__(self, param_dict):
super(DeepSpeedNebulaConfig, self).__init__()
self.enabled = None
self.persistent_storage_path = None
self.persistent_time_interval = None
self.num_of_version_in_retention = None
self.enable_nebula_load = None
if NEBULA in param_dict.keys():
nebula_dict = param_dict[NEBULA]
else:
nebula_dict = {}
self._initialize(nebula_dict)
def _initialize(self, nebula_dict):
self.enabled = get_scalar_param(nebula_dict, NEBULA_ENABLED, NEBULA_ENABLED_DEFAULT)
self.load_path = get_scalar_param(nebula_dict, NEBULA_LOAD_PATH, NEBULA_LOAD_PATH_DEFAULT)
self.enable_nebula_load = get_scalar_param(nebula_dict, NEBULA_ENABLE_NEBULA_LOAD,
NEBULA_ENABLE_NEBULA_LOAD_DEFAULT)
self.persistent_storage_path = get_scalar_param(nebula_dict, NEBULA_PERSISTENT_STORAGE_PATH,
NEBULA_PERSISTENT_STORAGE_PATH_DEFAULT)
self.persistent_time_interval = get_scalar_param(nebula_dict, NEBULA_PERSISTENT_TIME_INTERVAL,
NEBULA_PERSISTENT_TIME_INTERVAL_DEFAULT)
self.num_of_version_in_retention = get_scalar_param(nebula_dict, NEBULA_NUM_OF_VERSION_IN_RETENTION,
NEBULA_NUM_OF_VERSION_IN_RETENTION_DEFAULT)
| 1,764 | 39.113636 | 108 |
py
|
DeepSpeed
|
DeepSpeed-master/deepspeed/nebula/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
| 140 | 22.5 | 44 |
py
|
DeepSpeed
|
DeepSpeed-master/accelerator/mps_accelerator.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from .abstract_accelerator import DeepSpeedAccelerator
# During setup stage torch may not be installed, pass on no torch will
# allow op builder related API to be executed.
try:
import torch.mps
except ImportError:
pass
class MPS_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = "mps"
self._communication_backend_name = None
def is_synchronized_device(self):
return False
# Device APIs
def device_name(self, device_index=None):
if device_index == None:
return "mps"
return "mps:{}".format(device_index)
def device(self, device_index):
return torch.device("mps", index=0)
def set_device(self, device_index):
return
def current_device(self):
return torch.device("mps", index=0)
def current_device_name(self):
return "mps:0"
def device_count(self):
return 1
def synchronize(self, device_index=None):
return torch.mps.synchronize()
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
return torch.mps.set_rng_state(new_state)
def get_rng_state(self, device_index=None):
return torch.mps.get_rng_state()
def manual_seed(self, seed):
return torch.mps.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.mps.manual_seed(seed)
def seed(self):
return torch.mps.seed()
def initial_seed(self, seed):
return
def default_generator(self, device_index):
return
# Streams/Events
@property
def Stream(self):
return None
def stream(self, stream):
return None
def current_stream(self, device_index=None):
return None
def default_stream(self, device_index=None):
return None
@property
def Event(self):
return None
# Memory management
def empty_cache(self):
return torch.mps.empty_cache()
def memory_allocated(self, device_index=None):
return torch.mps.current_allocated_memory()
def max_memory_allocated(self, device_index=None):
return torch.mps.driver_allocated_memory()
def set_per_process_memory_fraction(self, fraction):
return torch.mps.set_per_process_memory_fraction(fraction)
def reset_max_memory_allocated(self, device_index=None):
return
def memory_cached(self, device_index=None):
return
def max_memory_cached(self, device_index=None):
return
def reset_max_memory_cached(self, device_index=None):
return
def memory_stats(self, device_index=None):
return
def reset_peak_memory_stats(self, device_index=None):
return
def memory_reserved(self, device_index=None):
return
def max_memory_reserved(self, device_index=None):
return
def total_memory(self, device_index=None):
return
# Data types
def is_bf16_supported(self):
return False
def is_fp16_supported(self):
return False
# Misc
def amp(self):
return
def is_available(self):
return hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
def range_push(self, msg):
return
def range_pop(self):
return
def lazy_call(self, callback):
return
def communication_backend_name(self):
return self._communication_backend_name
# Tensor operations
@property
def BFloat16Tensor(self):
return
@property
def ByteTensor(self):
return
@property
def DoubleTensor(self):
return
@property
def FloatTensor(self):
return
@property
def HalfTensor(self):
return
@property
def IntTensor(self):
return
@property
def LongTensor(self):
return
def pin_memory(self, tensor):
return tensor.pin_memory()
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith("mps"):
return True
else:
return False
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
return "op_builder"
except ImportError:
return "deepspeed.ops.op_builder"
# create an instance of op builder, specified by class_name
def create_op_builder(self, op_name):
builder_class = self.get_op_builder(op_name)
if builder_class != None:
return builder_class()
return None
# return an op builder class, specified by class_name
def get_op_builder(self, class_name):
from deepspeed.ops.op_builder.cpu import NotImplementedBuilder
return NotImplementedBuilder
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension
| 5,266 | 22.30531 | 102 |
py
|
DeepSpeed
|
DeepSpeed-master/accelerator/real_accelerator.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
try:
# Importing logger currently requires that torch is installed, hence the try...except
# TODO: Remove logger dependency on torch.
from deepspeed.utils import logger as accel_logger
except ImportError as e:
accel_logger = None
try:
from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1
except ImportError as e:
dsa1 = None
try:
from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa2
except ImportError as e:
dsa2 = None
ds_accelerator = None
def _validate_accelerator(accel_obj):
# because abstract_accelerator has different path during
# build time (accelerator.abstract_accelerator)
# and run time (deepspeed.accelerator.abstract_accelerator)
# and extension would import the
# run time abstract_accelerator/DeepSpeedAccelerator as its base
# class, so we need to compare accel_obj with both base class.
# if accel_obj is instance of DeepSpeedAccelerator in one of
# accelerator.abstractor_accelerator
# or deepspeed.accelerator.abstract_accelerator, consider accel_obj
# is a conforming object
if not ((dsa1 != None and isinstance(accel_obj, dsa1)) or (dsa2 != None and isinstance(accel_obj, dsa2))):
raise AssertionError(f"{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator")
# TODO: turn off is_available test since this breaks tests
# assert accel_obj.is_available(), \
# f'{accel_obj.__class__.__name__} accelerator fails is_available() test'
def get_accelerator():
global ds_accelerator
if ds_accelerator is not None:
return ds_accelerator
accelerator_name = None
ds_set_method = None
# 1. Detect whether there is override of DeepSpeed accelerators from environment variable.
# DS_ACCELERATOR = 'cuda'|'xpu'|'cpu'
if "DS_ACCELERATOR" in os.environ.keys():
accelerator_name = os.environ["DS_ACCELERATOR"]
if accelerator_name == "xpu":
try:
from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401
except ImportError as e:
raise ValueError(
f"XPU_Accelerator requires intel_extension_for_deepspeed, which is not installed on this system.")
elif accelerator_name == "cpu":
try:
import intel_extension_for_pytorch # noqa: F401
except ImportError as e:
raise ValueError(
f"CPU_Accelerator requires intel_extension_for_pytorch, which is not installed on this system.")
elif accelerator_name == "cuda":
pass
elif accelerator_name == "mps":
try:
import torch.mps
# should use torch.mps.is_available() if it exists someday but this is used as proxy
torch.mps.current_allocated_memory()
except (RuntimeError, ImportError) as e:
raise ValueError(f"MPS_Accelerator requires torch.mps, which is not installed on this system.")
else:
raise ValueError(
f'DS_ACCELERATOR must be one of "cuda", "cpu", or "xpu". Value "{accelerator_name}" is not supported')
ds_set_method = "override"
# 2. If no override, detect which accelerator to use automatically
if accelerator_name == None:
try:
from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401,F811
accelerator_name = "xpu"
except ImportError as e:
# We need a way to choose between CUDA_Accelerator and CPU_Accelerator
# Currently we detect whether intel_extension_for_pytorch is installed
# in the environment and use CPU_Accelerator if the answer is True.
# An alternative might be detect whether CUDA device is installed on
# the system but this comes with two pitfalls:
# 1. the system may not have torch pre-installed, so
# get_accelerator().is_available() may not work.
# 2. Some scenario like install on login node (without CUDA device)
# and run on compute node (with CUDA device) may cause mismatch
# between installation time and runtime.
try:
import intel_extension_for_pytorch # noqa: F401,F811
accelerator_name = "cpu"
except ImportError as e:
try:
import torch.mps
# should use torch.mps.is_available() if it exists someday but this is used as proxy
torch.mps.current_allocated_memory()
accelerator_name = "mps"
except (RuntimeError, ImportError) as e:
accelerator_name = "cuda"
ds_set_method = "auto detect"
# 3. Set ds_accelerator accordingly
if accelerator_name == "cuda":
from .cuda_accelerator import CUDA_Accelerator
ds_accelerator = CUDA_Accelerator()
elif accelerator_name == "cpu":
from .cpu_accelerator import CPU_Accelerator
ds_accelerator = CPU_Accelerator()
elif accelerator_name == "xpu":
# XPU_Accelerator is already imported in detection stage
ds_accelerator = XPU_Accelerator()
elif accelerator_name == "mps":
from .mps_accelerator import MPS_Accelerator
ds_accelerator = MPS_Accelerator()
_validate_accelerator(ds_accelerator)
if accel_logger is not None:
accel_logger.info(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})")
return ds_accelerator
def set_accelerator(accel_obj):
global ds_accelerator
_validate_accelerator(accel_obj)
if accel_logger is not None:
accel_logger.info(f"Setting ds_accelerator to {accel_obj._name} (model specified)")
ds_accelerator = accel_obj
"""
-----------[code] test_get.py -----------
from deepspeed.accelerator import get_accelerator
my_accelerator = get_accelerator()
logger.info(f'{my_accelerator._name=}')
logger.info(f'{my_accelerator._communication_backend=}')
logger.info(f'{my_accelerator.HalfTensor().device=}')
logger.info(f'{my_accelerator.total_memory()=}')
-----------[code] test_get.py -----------
---[output] python test_get.py---------
my_accelerator.name()='cuda'
my_accelerator.communication_backend='nccl'
my_accelerator.HalfTensor().device=device(type='cuda', index=0)
my_accelerator.total_memory()=34089730048
---[output] python test_get.py---------
**************************************************************************
-----------[code] test_set.py -----------
from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator
cu_accel = CUDA_Accelerator()
logger.info(f'{id(cu_accel)=}')
from deepspeed.accelerator import set_accelerator, get_accelerator
set_accelerator(cu_accel)
my_accelerator = get_accelerator()
logger.info(f'{id(my_accelerator)=}')
logger.info(f'{my_accelerator._name=}')
logger.info(f'{my_accelerator._communication_backend=}')
logger.info(f'{my_accelerator.HalfTensor().device=}')
logger.info(f'{my_accelerator.total_memory()=}')
-----------[code] test_set.py -----------
---[output] python test_set.py---------
id(cu_accel)=139648165478304
my_accelerator=<deepspeed.accelerator.cuda_accelerator.CUDA_Accelerator object at 0x7f025f4bffa0>
my_accelerator.name='cuda'
my_accelerator.communication_backend='nccl'
my_accelerator.HalfTensor().device=device(type='cuda', index=0)
my_accelerator.total_memory()=34089730048
---[output] python test_set.py---------
"""
| 7,666 | 39.781915 | 119 |
py
|
DeepSpeed
|
DeepSpeed-master/accelerator/cpu_accelerator.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator
import oneccl_bindings_for_pytorch # noqa: F401
import psutil
import os
# accelerator for Intel CPU
class CPU_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = 'cpu'
self._communication_backend_name = 'ccl'
self.max_mem = psutil.Process().memory_info().rss
def is_synchronized_device(self):
return True
# Device APIs
def device_name(self, device_index=None):
return 'cpu'
def device(self, device_index=None):
return None
def set_device(self, device_index):
return
def current_device(self):
return os.environ.get('LOCAL_RANK', 0)
def current_device_name(self):
return 'cpu'
def device_count(self):
device_count = int(os.environ.get('LOCAL_SIZE', 0))
if device_count > 0:
return device_count
else:
from deepspeed.utils.numa import get_numa_cores
# Count NUMA node for number of cpu accelerators. On machine with HBM
# In flat mode, HBM is in separate NUMA node with no cores on this node.
# Ignore these NUMA nodes with no cores.
numa_core_lists = get_numa_cores()
numa_count = 0
for core_list in numa_core_lists:
if len(core_list) > 0:
numa_count += 1
return numa_count
def synchronize(self, device_index=None):
return
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
if device_index == None:
return torch.set_rng_state(new_state)
return torch.set_rng_state(new_state, device_index)
def get_rng_state(self, device_index=None):
return torch.get_rng_state()
def manual_seed(self, seed):
return torch.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.manual_seed(seed)
def initial_seed(self, seed):
return torch.initial_seed(seed)
def default_generator(self, device_index):
return torch.default_generator
# Streams/Events
@property
def Stream(self):
return None
def stream(self, stream):
from deepspeed.runtime.utils import noop_decorator
return noop_decorator
def current_stream(self, device_index=None):
return None
def default_stream(self, device_index=None):
return None
@property
def Event(self):
return None
# Memory management
def empty_cache(self):
return
def get_rss(self):
mem = psutil.Process().memory_info().rss
if mem > self.max_mem:
self.max_mem = mem
return mem
def reset_rss(self):
mem = psutil.Process().memory_info().rss
self.max_mem = mem
return mem
def memory_allocated(self, device_index=None):
return self.get_rss()
def max_memory_allocated(self, device_index=None):
self.get_rss()
return self.max_mem
def reset_max_memory_allocated(self, device_index=None):
self.reset_rss()
return
def memory_cached(self, device_index=None):
return self.get_rss()
def max_memory_cached(self, device_index=None):
self.get_rss()
return self.max_mem
def reset_max_memory_cached(self, device_index=None):
self.reset_rss()
return
def memory_stats(self, device_index=None):
return self.get_rss()
def reset_peak_memory_stats(self, device_index=None):
self.reset_rss()
return
def memory_reserved(self, device_index=None):
return self.get_rss()
def max_memory_reserved(self, device_index=None):
self.get_rss()
return self.max_mem
def total_memory(self, device_index=None):
return psutil.virtual_memory().total
# Misc
def amp(self):
return torch.cpu.amp
def is_available(self):
return True
def range_push(self, msg):
# TODO itt is currently not supported yet
# return torch.profiler.itt.range_push(msg)
return
def range_pop(self):
# TODO itt is currently not supported yet
# return torch.profiler.itt.range_pop()
return
def lazy_call(self, callback):
return callback()
def communication_backend_name(self):
return self._communication_backend_name
# Data types
def is_bf16_supported(self):
return True
def is_fp16_supported(self):
return True
# Tensor operations
@property
def BFloat16Tensor(self):
return torch.BFloat16Tensor
@property
def ByteTensor(self):
return torch.ByteTensor
@property
def DoubleTensor(self):
return torch.DoubleTensor
@property
def FloatTensor(self):
return torch.FloatTensor
@property
def HalfTensor(self):
return torch.HalfTensor
@property
def IntTensor(self):
return torch.IntTensor
@property
def LongTensor(self):
return torch.LongTensor
def pin_memory(self, tensor):
return tensor
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
return "op_builder.cpu"
except ImportError:
return "deepspeed.ops.op_builder.cpu"
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith('cpu'):
return True
else:
return False
# create an instance of op builder and return, name specified by class_name
def create_op_builder(self, op_name):
builder_class = self.get_op_builder(op_name)
if builder_class != None:
return builder_class()
return None
# return an op builder class, name specified by class_name
def get_op_builder(self, class_name):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
from op_builder.cpu import CCLCommBuilder, NotImplementedBuilder
except ImportError:
from deepspeed.ops.op_builder.cpu import CCLCommBuilder, NotImplementedBuilder
if class_name == "CCLCommBuilder":
return CCLCommBuilder
else:
# return a NotImplementedBuilder to avoid get NoneType[Name] in unit tests
return NotImplementedBuilder
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension
| 7,124 | 26.298851 | 102 |
py
|
DeepSpeed
|
DeepSpeed-master/accelerator/npu_accelerator.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .abstract_accelerator import DeepSpeedAccelerator
# During setup stage torch may not be installed, pass on no torch will
# allow op builder related API to be executed.
try:
import torch.npu
except ImportError:
pass
class NPU_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = 'npu'
self._communication_backend_name = 'hccl'
def is_synchronized_device(self):
return False
# Device APIs
def device_name(self, device_index=None):
if device_index == None:
return 'npu'
return 'npu:{}'.format(device_index)
def device(self, device_index=None):
return torch.npu.device(device_index)
def set_device(self, device_index):
torch.npu.set_device(device_index)
def current_device(self):
return torch.npu.current_device()
def current_device_name(self):
return 'npu:{}'.format(torch.npu.current_device())
def device_count(self):
return torch.npu.device_count()
def synchronize(self, device_index=None):
return torch.npu.synchronize(device_index)
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
if device_index is None:
return torch.npu.set_rng_state(new_state)
return torch.npu.set_rng_state(new_state, device_index)
def get_rng_state(self, device_index=None):
if device_index is None:
return torch.npu.get_rng_state()
return torch.npu.get_rng_state(device_index)
def manual_seed(self, seed):
return torch.npu.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.npu.manual_seed_all(seed)
def initial_seed(self, seed):
return torch.npu.initial_seed(seed)
def default_generator(self, device_index):
return torch.npu.default_generators[device_index]
# Streams/Events
@property
def Stream(self):
return torch.npu.Stream
def stream(self, stream):
return torch.npu.stream(stream)
def current_stream(self, device_index=None):
return torch.npu.current_stream(device_index)
def default_stream(self, device_index=None):
return torch.npu.default_stream(device_index)
@property
def Event(self):
return torch.npu.Event
# Memory management
def empty_cache(self):
return torch.npu.empty_cache()
def memory_allocated(self, device_index=None):
return torch.npu.memory_allocated(device_index)
def max_memory_allocated(self, device_index=None):
return torch.npu.max_memory_allocated(device_index)
def reset_max_memory_allocated(self, device_index=None):
return torch.npu.reset_max_memory_allocated(device_index)
def memory_cached(self, device_index=None):
return torch.npu.memory_cached(device_index)
def max_memory_cached(self, device_index=None):
return torch.npu.max_memory_cached(device_index)
def reset_max_memory_cached(self, device_index=None):
return torch.npu.reset_max_memory_cached(device_index)
def memory_stats(self, device_index=None):
if hasattr(torch.npu, 'memory_stats'):
return torch.npu.memory_stats(device_index)
def reset_peak_memory_stats(self, device_index=None):
if hasattr(torch.npu, 'reset_peak_memory_stats'):
return torch.npu.reset_peak_memory_stats(device_index)
def memory_reserved(self, device_index=None):
if hasattr(torch.npu, 'memory_reserved'):
return torch.npu.memory_reserved(device_index)
def max_memory_reserved(self, device_index=None):
if hasattr(torch.npu, 'max_memory_reserved'):
return torch.npu.max_memory_reserved(device_index)
def total_memory(self, device_index=None):
return torch.npu.get_device_properties(device_index).total_memory
# Data types
def is_bf16_supported(self):
return torch.npu.is_bf16_supported()
def is_fp16_supported(self):
return True
# Misc
def amp(self):
if hasattr(torch.npu, 'amp'):
return torch.npu.amp
return None
def is_available(self):
return torch.npu.is_available()
def range_push(self, msg):
return
def range_pop(self):
return
def lazy_call(self, callback):
return torch.npu._lazy_call(callback)
def communication_backend_name(self):
return self._communication_backend_name
# Tensor operations
@property
def BFloat16Tensor(self):
return torch.npu.BFloat16Tensor
@property
def ByteTensor(self):
return torch.npu.ByteTensor
@property
def DoubleTensor(self):
return torch.npu.DoubleTensor
@property
def FloatTensor(self):
return torch.npu.FloatTensor
@property
def HalfTensor(self):
return torch.npu.HalfTensor
@property
def IntTensor(self):
return torch.npu.IntTensor
@property
def LongTensor(self):
return torch.npu.LongTensor
def pin_memory(self, tensor):
return tensor.pin_memory()
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith('npu:'):
return True
else:
return False
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
return "op_builder.npu"
except ImportError:
return "deepspeed.ops.op_builder.npu"
# dict that holds class name <--> class type mapping i.e.
# 'AsyncIOBuilder': <class 'op_builder.async_io.AsyncIOBuilder'>
# this dict will be filled at init stage
class_dict = None
def _lazy_init_class_dict(self):
if self.class_dict != None:
return
else:
self.class_dict = {}
# create an instance of op builder and return, name specified by class_name
def create_op_builder(self, class_name):
self._lazy_init_class_dict()
if class_name in self.class_dict:
return self.class_dict[class_name]()
else:
return None
# return an op builder class, name specified by class_name
def get_op_builder(self, class_name):
self._lazy_init_class_dict()
if class_name in self.class_dict:
return self.class_dict[class_name]
else:
return None
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension
| 6,893 | 27.487603 | 102 |
py
|
DeepSpeed
|
DeepSpeed-master/accelerator/cuda_accelerator.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import pkgutil
import importlib
from .abstract_accelerator import DeepSpeedAccelerator
# During setup stage torch may not be installed, pass on no torch will
# allow op builder related API to be executed.
try:
import torch.cuda
except ImportError:
pass
class CUDA_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = 'cuda'
self._communication_backend_name = 'nccl'
def is_synchronized_device(self):
return False
# Device APIs
def device_name(self, device_index=None):
if device_index == None:
return 'cuda'
return 'cuda:{}'.format(device_index)
def device(self, device_index=None):
return torch.cuda.device(device_index)
def set_device(self, device_index):
torch.cuda.set_device(device_index)
def current_device(self):
return torch.cuda.current_device()
def current_device_name(self):
return 'cuda:{}'.format(torch.cuda.current_device())
def device_count(self):
return torch.cuda.device_count()
def synchronize(self, device_index=None):
return torch.cuda.synchronize(device_index)
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
if device_index is None:
return torch.cuda.set_rng_state(new_state)
return torch.cuda.set_rng_state(new_state, device_index)
def get_rng_state(self, device_index=None):
if device_index is None:
return torch.cuda.get_rng_state()
return torch.cuda.get_rng_state(device_index)
def manual_seed(self, seed):
return torch.cuda.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.cuda.manual_seed_all(seed)
def initial_seed(self, seed):
return torch.cuda.initial_seed(seed)
def default_generator(self, device_index):
return torch.cuda.default_generators[device_index]
# Streams/Events
@property
def Stream(self):
return torch.cuda.Stream
def stream(self, stream):
return torch.cuda.stream(stream)
def current_stream(self, device_index=None):
return torch.cuda.current_stream(device_index)
def default_stream(self, device_index=None):
return torch.cuda.default_stream(device_index)
@property
def Event(self):
return torch.cuda.Event
# Memory management
def empty_cache(self):
return torch.cuda.empty_cache()
def memory_allocated(self, device_index=None):
return torch.cuda.memory_allocated(device_index)
def max_memory_allocated(self, device_index=None):
return torch.cuda.max_memory_allocated(device_index)
def reset_max_memory_allocated(self, device_index=None):
return torch.cuda.reset_max_memory_allocated(device_index)
def memory_cached(self, device_index=None):
return torch.cuda.memory_cached(device_index)
def max_memory_cached(self, device_index=None):
return torch.cuda.max_memory_cached(device_index)
def reset_max_memory_cached(self, device_index=None):
return torch.cuda.reset_max_memory_cached(device_index)
def memory_stats(self, device_index=None):
if hasattr(torch.cuda, 'memory_stats'):
return torch.cuda.memory_stats(device_index)
def reset_peak_memory_stats(self, device_index=None):
if hasattr(torch.cuda, 'reset_peak_memory_stats'):
return torch.cuda.reset_peak_memory_stats(device_index)
def memory_reserved(self, device_index=None):
if hasattr(torch.cuda, 'memory_reserved'):
return torch.cuda.memory_reserved(device_index)
def max_memory_reserved(self, device_index=None):
if hasattr(torch.cuda, 'max_memory_reserved'):
return torch.cuda.max_memory_reserved(device_index)
def total_memory(self, device_index=None):
return torch.cuda.get_device_properties(device_index).total_memory
# Data types
def is_bf16_supported(self):
return torch.cuda.is_bf16_supported()
def is_fp16_supported(self):
major, _ = torch.cuda.get_device_capability()
if major >= 7:
return True
else:
return False
# Misc
def amp(self):
if hasattr(torch.cuda, 'amp'):
return torch.cuda.amp
return None
def is_available(self):
return torch.cuda.is_available()
def range_push(self, msg):
if hasattr(torch.cuda.nvtx, 'range_push'):
return torch.cuda.nvtx.range_push(msg)
def range_pop(self):
if hasattr(torch.cuda.nvtx, 'range_pop'):
return torch.cuda.nvtx.range_pop()
def lazy_call(self, callback):
return torch.cuda._lazy_call(callback)
def communication_backend_name(self):
return self._communication_backend_name
# Tensor operations
@property
def BFloat16Tensor(self):
return torch.cuda.BFloat16Tensor
@property
def ByteTensor(self):
return torch.cuda.ByteTensor
@property
def DoubleTensor(self):
return torch.cuda.DoubleTensor
@property
def FloatTensor(self):
return torch.cuda.FloatTensor
@property
def HalfTensor(self):
return torch.cuda.HalfTensor
@property
def IntTensor(self):
return torch.cuda.IntTensor
@property
def LongTensor(self):
return torch.cuda.LongTensor
def pin_memory(self, tensor):
return tensor.pin_memory()
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith('cuda:'):
return True
else:
return False
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
return "op_builder"
except ImportError:
return "deepspeed.ops.op_builder"
# dict that holds class name <--> class type mapping i.e.
# 'AsyncIOBuilder': <class 'op_builder.async_io.AsyncIOBuilder'>
# this dict will be filled at init stage
class_dict = None
def _lazy_init_class_dict(self):
if self.class_dict != None:
return
else:
self.class_dict = {}
# begin initialize for create_op_builder()
# put all valid class name <--> class type mapping into class_dict
op_builder_dir = self.op_builder_dir()
op_builder_module = importlib.import_module(op_builder_dir)
for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(op_builder_module.__file__)]):
# avoid self references
if module_name != 'all_ops' and module_name != 'builder' and module_name != 'cpu':
module = importlib.import_module("{}.{}".format(op_builder_dir, module_name))
for member_name in module.__dir__():
if member_name.endswith(
'Builder'
) and member_name != "OpBuilder" and member_name != "CUDAOpBuilder" and member_name != "TorchCPUOpBuilder": # avoid abstract classes
if not member_name in self.class_dict:
self.class_dict[member_name] = getattr(module, member_name)
# end initialize for create_op_builder()
# create an instance of op builder and return, name specified by class_name
def create_op_builder(self, class_name):
self._lazy_init_class_dict()
if class_name in self.class_dict:
return self.class_dict[class_name]()
else:
return None
# return an op builder class, name specified by class_name
def get_op_builder(self, class_name):
self._lazy_init_class_dict()
if class_name in self.class_dict:
return self.class_dict[class_name]
else:
return None
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension
| 8,389 | 30.423221 | 157 |
py
|
DeepSpeed
|
DeepSpeed-master/accelerator/abstract_accelerator.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import abc
from abc import ABC
class DeepSpeedAccelerator(ABC):
def __init__(self):
self._name = None
self._communication_backend_name = None
@abc.abstractmethod
def is_synchronized_device(self):
...
# Device APIs
@abc.abstractmethod
def device_name(self, device_index):
...
@abc.abstractmethod
def device(self, device_index):
...
@abc.abstractmethod
def set_device(self, device_index):
...
@abc.abstractmethod
def current_device(self):
...
@abc.abstractmethod
def current_device_name(self):
...
@abc.abstractmethod
def device_count(self):
...
@abc.abstractmethod
def synchronize(self, device_index=None):
...
# RNG APIs
@abc.abstractmethod
def random(self):
...
@abc.abstractmethod
def set_rng_state(self, new_state, device_index=None):
...
@abc.abstractmethod
def get_rng_state(self, device_index=None):
...
@abc.abstractmethod
def manual_seed(self, seed):
...
@abc.abstractmethod
def manual_seed_all(self, seed):
...
@abc.abstractmethod
def initial_seed(self, seed):
...
@abc.abstractmethod
def default_generator(self, device_index):
...
# Streams/Events
@property
@abc.abstractmethod
def Stream(self):
...
@abc.abstractmethod
def stream(self, stream):
...
@abc.abstractmethod
def current_stream(self, device_index=None):
...
@abc.abstractmethod
def default_stream(self, device_index=None):
...
@property
@abc.abstractmethod
def Event(self):
...
# Memory management
@abc.abstractmethod
def empty_cache(self):
...
@abc.abstractmethod
def memory_allocated(self, device_index=None):
...
@abc.abstractmethod
def max_memory_allocated(self, device_index=None):
...
@abc.abstractmethod
def reset_max_memory_allocated(self, device_index=None):
...
@abc.abstractmethod
def memory_cached(self, device_index=None):
...
@abc.abstractmethod
def max_memory_cached(self, device_index=None):
...
@abc.abstractmethod
def reset_max_memory_cached(self, device_index=None):
...
@abc.abstractmethod
def memory_stats(self, device_index=None):
...
@abc.abstractmethod
def reset_peak_memory_stats(self, device_index=None):
...
@abc.abstractmethod
def memory_reserved(self, device_index=None):
...
@abc.abstractmethod
def max_memory_reserved(self, device_index=None):
...
@abc.abstractmethod
def total_memory(self, device_index=None):
...
# Data types
@abc.abstractmethod
def is_bf16_supported(self):
...
@abc.abstractmethod
def is_fp16_supported(self):
...
# Misc
@abc.abstractmethod
def amp(self):
...
@abc.abstractmethod
def is_available(self):
...
@abc.abstractmethod
def range_push(self, msg):
...
@abc.abstractmethod
def range_pop(self):
...
@abc.abstractmethod
def lazy_call(self, callback):
...
@abc.abstractmethod
def communication_backend_name(self):
...
# Tensor operations
@property
@abc.abstractmethod
def BFloat16Tensor(self):
...
@property
@abc.abstractmethod
def ByteTensor(self):
...
@property
@abc.abstractmethod
def DoubleTensor(self):
...
@property
@abc.abstractmethod
def FloatTensor(self):
...
@property
@abc.abstractmethod
def HalfTensor(self):
...
@property
@abc.abstractmethod
def IntTensor(self):
...
@property
@abc.abstractmethod
def LongTensor(self):
...
@abc.abstractmethod
def pin_memory(self, tensor):
...
@abc.abstractmethod
def on_accelerator(self, tensor):
...
@abc.abstractmethod
def op_builder_dir(self):
...
# create an instance of op builder, specified by class_name
@abc.abstractmethod
def create_op_builder(self, class_name):
...
# return an op builder class, specified by class_name
@abc.abstractmethod
def get_op_builder(self, class_name):
...
@abc.abstractmethod
def build_extension(self):
...
| 4,607 | 17.808163 | 63 |
py
|
DeepSpeed
|
DeepSpeed-master/accelerator/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .abstract_accelerator import DeepSpeedAccelerator
from .real_accelerator import get_accelerator, set_accelerator
| 214 | 25.875 | 62 |
py
|
DeepSpeed
|
DeepSpeed-master/op_builder/stochastic_transformer.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .transformer import TransformerBuilder
class StochasticTransformerBuilder(TransformerBuilder):
BUILD_VAR = "DS_BUILD_STOCHASTIC_TRANSFORMER"
NAME = "stochastic_transformer"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.transformer.{self.NAME}_op'
def nvcc_args(self):
args = super().nvcc_args()
args.append('-D__STOCHASTIC_MODE__')
return args
| 565 | 23.608696 | 58 |
py
|
DeepSpeed
|
DeepSpeed-master/op_builder/transformer_inference.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder, installed_cuda_version
class InferenceBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE"
NAME = "transformer_inference"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.transformer.inference.{self.NAME}_op'
def is_compatible(self, verbose=True):
try:
import torch
except ImportError:
self.warning("Please install torch if trying to pre-compile inference kernels")
return False
cuda_okay = True
if not self.is_rocm_pytorch() and torch.cuda.is_available():
sys_cuda_major, _ = installed_cuda_version()
torch_cuda_major = int(torch.version.cuda.split('.')[0])
cuda_capability = torch.cuda.get_device_properties(0).major
if cuda_capability < 6:
self.warning("NVIDIA Inference is only supported on Pascal and newer architectures")
cuda_okay = False
if cuda_capability >= 8:
if torch_cuda_major < 11 or sys_cuda_major < 11:
self.warning("On Ampere and higher architectures please use CUDA 11+")
cuda_okay = False
return super().is_compatible(verbose) and cuda_okay
def filter_ccs(self, ccs):
ccs_retained = []
ccs_pruned = []
for cc in ccs:
if int(cc[0]) >= 6:
ccs_retained.append(cc)
else:
ccs_pruned.append(cc)
if len(ccs_pruned) > 0:
self.warning(f"Filtered compute capabilities {ccs_pruned}")
return ccs_retained
def sources(self):
return [
'csrc/transformer/inference/csrc/pt_binding.cpp',
'csrc/transformer/inference/csrc/gelu.cu',
'csrc/transformer/inference/csrc/relu.cu',
'csrc/transformer/inference/csrc/layer_norm.cu',
'csrc/transformer/inference/csrc/rms_norm.cu',
'csrc/transformer/inference/csrc/softmax.cu',
'csrc/transformer/inference/csrc/dequantize.cu',
'csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu',
'csrc/transformer/inference/csrc/transform.cu',
'csrc/transformer/inference/csrc/pointwise_ops.cu',
]
def extra_ldflags(self):
if not self.is_rocm_pytorch():
return ['-lcurand']
else:
return []
def include_paths(self):
return ['csrc/transformer/inference/includes', 'csrc/includes']
| 2,745 | 35.613333 | 100 |
py
|
DeepSpeed
|
DeepSpeed-master/op_builder/cpu_adagrad.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from .builder import TorchCPUOpBuilder
class CPUAdagradBuilder(TorchCPUOpBuilder):
BUILD_VAR = "DS_BUILD_CPU_ADAGRAD"
NAME = "cpu_adagrad"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.adagrad.{self.NAME}_op'
def sources(self):
if self.build_for_cpu:
return ['csrc/adagrad/cpu_adagrad.cpp']
return ['csrc/adagrad/cpu_adagrad.cpp', 'csrc/common/custom_cuda_kernel.cu']
def libraries_args(self):
args = super().libraries_args()
if self.build_for_cpu:
return args
if not self.is_rocm_pytorch():
args += ['curand']
return args
def include_paths(self):
import torch
if self.build_for_cpu:
CUDA_INCLUDE = []
elif not self.is_rocm_pytorch():
CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")]
else:
CUDA_INCLUDE = [
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include"),
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "rocrand"),
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "hiprand"),
]
return ['csrc/includes'] + CUDA_INCLUDE
| 1,420 | 28.604167 | 89 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.