Spaces:
Sleeping
Sleeping
# coding=utf-8 | |
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. | |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import inspect | |
import os | |
import re | |
import warnings | |
from contextlib import contextmanager | |
from dataclasses import dataclass | |
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union | |
import torch | |
from torch import Tensor, device, nn | |
from torch.nn import CrossEntropyLoss | |
from .activations import get_activation | |
from .configuration_utils import PretrainedConfig | |
from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled | |
from .file_utils import ( | |
DUMMY_INPUTS, | |
FLAX_WEIGHTS_NAME, | |
TF2_WEIGHTS_NAME, | |
TF_WEIGHTS_NAME, | |
WEIGHTS_NAME, | |
ModelOutput, | |
PushToHubMixin, | |
cached_path, | |
copy_func, | |
hf_bucket_url, | |
is_offline_mode, | |
is_remote_url, | |
replace_return_docstrings, | |
) | |
from .generation_utils import GenerationMixin | |
from .utils import logging | |
logger = logging.get_logger(__name__) | |
_init_weights = True | |
def no_init_weights(_enable=True): | |
""" | |
Context manager to globally disable weight initialization to speed up loading large models. | |
TODO(Patrick): Delete safety argument `_enable=True` at next major version. . | |
""" | |
global _init_weights | |
if _enable: | |
_init_weights = False | |
try: | |
yield | |
finally: | |
_init_weights = True | |
try: | |
from torch.nn import Identity | |
except ImportError: | |
# Older PyTorch compatibility | |
class Identity(nn.Module): | |
r"""A placeholder identity operator that is argument-insensitive.""" | |
def __init__(self, *args, **kwargs): | |
super().__init__() | |
def forward(self, input): | |
return input | |
def find_pruneable_heads_and_indices( | |
heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int] | |
) -> Tuple[Set[int], torch.LongTensor]: | |
""" | |
Finds the heads and their indices taking :obj:`already_pruned_heads` into account. | |
Args: | |
heads (:obj:`List[int]`): List of the indices of heads to prune. | |
n_heads (:obj:`int`): The number of heads in the model. | |
head_size (:obj:`int`): The size of each head. | |
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads. | |
Returns: | |
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices. | |
""" | |
mask = torch.ones(n_heads, head_size) | |
heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads | |
for head in heads: | |
# Compute how many pruned heads are before the head and move the index accordingly | |
head = head - sum(1 if h < head else 0 for h in already_pruned_heads) | |
mask[head] = 0 | |
mask = mask.view(-1).contiguous().eq(1) | |
index: torch.LongTensor = torch.arange(len(mask))[mask].long() | |
return heads, index | |
def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): | |
try: | |
return next(parameter.parameters()).device | |
except StopIteration: | |
# For nn.DataParallel compatibility in PyTorch 1.5 | |
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: | |
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] | |
return tuples | |
gen = parameter._named_members(get_members_fn=find_tensor_attributes) | |
first_tuple = next(gen) | |
return first_tuple[1].device | |
def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): | |
try: | |
return next(parameter.parameters()).dtype | |
except StopIteration: | |
# For nn.DataParallel compatibility in PyTorch 1.5 | |
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: | |
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] | |
return tuples | |
gen = parameter._named_members(get_members_fn=find_tensor_attributes) | |
first_tuple = next(gen) | |
return first_tuple[1].dtype | |
class ModuleUtilsMixin: | |
""" | |
A few utilities for :obj:`torch.nn.Modules`, to be used as a mixin. | |
""" | |
def _hook_rss_memory_pre_forward(module, *args, **kwargs): | |
try: | |
import psutil | |
except (ImportError): | |
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") | |
process = psutil.Process(os.getpid()) | |
mem = process.memory_info() | |
module.mem_rss_pre_forward = mem.rss | |
return None | |
def _hook_rss_memory_post_forward(module, *args, **kwargs): | |
try: | |
import psutil | |
except (ImportError): | |
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") | |
process = psutil.Process(os.getpid()) | |
mem = process.memory_info() | |
module.mem_rss_post_forward = mem.rss | |
mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward | |
module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0) | |
return None | |
def add_memory_hooks(self): | |
""" | |
Add a memory hook before and after each sub-module forward pass to record increase in memory consumption. | |
Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to | |
zero with :obj:`model.reset_memory_hooks_state()`. | |
""" | |
for module in self.modules(): | |
module.register_forward_pre_hook(self._hook_rss_memory_pre_forward) | |
module.register_forward_hook(self._hook_rss_memory_post_forward) | |
self.reset_memory_hooks_state() | |
def reset_memory_hooks_state(self): | |
""" | |
Reset the :obj:`mem_rss_diff` attribute of each module (see | |
:func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`). | |
""" | |
for module in self.modules(): | |
module.mem_rss_diff = 0 | |
module.mem_rss_post_forward = 0 | |
module.mem_rss_pre_forward = 0 | |
def device(self) -> device: | |
""" | |
:obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same | |
device). | |
""" | |
return get_parameter_device(self) | |
def dtype(self) -> torch.dtype: | |
""" | |
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). | |
""" | |
return get_parameter_dtype(self) | |
def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: | |
""" | |
Invert an attention mask (e.g., switches 0. and 1.). | |
Args: | |
encoder_attention_mask (:obj:`torch.Tensor`): An attention mask. | |
Returns: | |
:obj:`torch.Tensor`: The inverted attention mask. | |
""" | |
if encoder_attention_mask.dim() == 3: | |
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] | |
if encoder_attention_mask.dim() == 2: | |
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] | |
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition | |
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow | |
# /transformer/transformer_layers.py#L270 | |
# encoder_extended_attention_mask = (encoder_extended_attention_mask == | |
# encoder_extended_attention_mask.transpose(-1, -2)) | |
encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility | |
if self.dtype == torch.float16: | |
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4 | |
elif self.dtype == torch.float32: | |
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9 | |
else: | |
raise ValueError( | |
f"{self.dtype} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`" | |
) | |
return encoder_extended_attention_mask | |
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor: | |
""" | |
Makes broadcastable attention and causal masks so that future and masked tokens are ignored. | |
Arguments: | |
attention_mask (:obj:`torch.Tensor`): | |
Mask with ones indicating tokens to attend to, zeros for tokens to ignore. | |
input_shape (:obj:`Tuple[int]`): | |
The shape of the input to the model. | |
device: (:obj:`torch.device`): | |
The device of the input to the model. | |
Returns: | |
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. | |
""" | |
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] | |
# ourselves in which case we just need to make it broadcastable to all heads. | |
if attention_mask.dim() == 3: | |
extended_attention_mask = attention_mask[:, None, :, :] | |
elif attention_mask.dim() == 2: | |
# Provided a padding mask of dimensions [batch_size, seq_length] | |
# - if the model is a decoder, apply a causal mask in addition to the padding mask | |
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] | |
if self.config.is_decoder: | |
batch_size, seq_length = input_shape | |
seq_ids = torch.arange(seq_length, device=device) | |
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] | |
# in case past_key_values are used we need to add a prefix ones mask to the causal mask | |
# causal and attention masks must have same type with pytorch version < 1.3 | |
causal_mask = causal_mask.to(attention_mask.dtype) | |
if causal_mask.shape[1] < attention_mask.shape[1]: | |
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] | |
causal_mask = torch.cat( | |
[ | |
torch.ones( | |
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype | |
), | |
causal_mask, | |
], | |
axis=-1, | |
) | |
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] | |
else: | |
extended_attention_mask = attention_mask[:, None, None, :] | |
else: | |
raise ValueError( | |
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})" | |
) | |
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for | |
# masked positions, this operation will create a tensor which is 0.0 for | |
# positions we want to attend and -10000.0 for masked positions. | |
# Since we are adding it to the raw scores before the softmax, this is | |
# effectively the same as removing these entirely. | |
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility | |
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 | |
return extended_attention_mask | |
def get_head_mask( | |
self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False | |
) -> Tensor: | |
""" | |
Prepare the head mask if needed. | |
Args: | |
head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`): | |
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). | |
num_hidden_layers (:obj:`int`): | |
The number of hidden layers in the model. | |
is_attention_chunked: (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether or not the attentions scores are computed by chunks or not. | |
Returns: | |
:obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or | |
list with :obj:`[None]` for each layer. | |
""" | |
if head_mask is not None: | |
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) | |
if is_attention_chunked is True: | |
head_mask = head_mask.unsqueeze(-1) | |
else: | |
head_mask = [None] * num_hidden_layers | |
return head_mask | |
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): | |
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]""" | |
if head_mask.dim() == 1: | |
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) | |
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1) | |
elif head_mask.dim() == 2: | |
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer | |
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}" | |
head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility | |
return head_mask | |
def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: | |
""" | |
Get number of (optionally, trainable or non-embeddings) parameters in the module. | |
Args: | |
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether or not to return only the number of trainable parameters | |
exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether or not to return only the number of non-embeddings parameters | |
Returns: | |
:obj:`int`: The number of parameters. | |
""" | |
if exclude_embeddings: | |
embedding_param_names = [ | |
f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding) | |
] | |
non_embedding_parameters = [ | |
parameter for name, parameter in self.named_parameters() if name not in embedding_param_names | |
] | |
return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable) | |
else: | |
return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable) | |
def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int: | |
""" | |
Helper function to estimate the total number of tokens from the model inputs. | |
Args: | |
inputs (:obj:`dict`): The model inputs. | |
Returns: | |
:obj:`int`: The total number of tokens. | |
""" | |
token_inputs = [tensor for key, tensor in input_dict.items() if "input" in key] | |
if token_inputs: | |
return sum([token_input.numel() for token_input in token_inputs]) | |
else: | |
warnings.warn( | |
"Could not estimate the number of tokens of the input, floating-point operations will not be computed" | |
) | |
return 0 | |
def floating_point_ops( | |
self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True | |
) -> int: | |
""" | |
Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a | |
batch with this transformer model. Default approximation neglects the quadratic dependency on the number of | |
tokens (valid if :obj:`12 * d_model << sequence_length`) as laid out in `this paper | |
<https://arxiv.org/pdf/2001.08361.pdf>`__ section 2.1. Should be overridden for transformers with parameter | |
re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths. | |
Args: | |
batch_size (:obj:`int`): | |
The batch size for the forward pass. | |
sequence_length (:obj:`int`): | |
The number of tokens in each line of the batch. | |
exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`True`): | |
Whether or not to count embedding and softmax operations. | |
Returns: | |
:obj:`int`: The number of floating-point operations. | |
""" | |
return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings) | |
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin): | |
r""" | |
Base class for all models. | |
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods | |
for loading, downloading and saving models as well as a few methods common to all models to: | |
* resize the input embeddings, | |
* prune heads in the self-attention heads. | |
Class attributes (overridden by derived classes): | |
- **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of | |
:class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture. | |
- **load_tf_weights** (:obj:`Callable`) -- A python `method` for loading a TensorFlow checkpoint in a PyTorch | |
model, taking as arguments: | |
- **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the | |
TensorFlow checkpoint. | |
- **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated to | |
the model. | |
- **path** (:obj:`str`) -- A path to the TensorFlow checkpoint. | |
- **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in | |
derived classes of the same architecture adding modules on top of the base model. | |
- **is_parallelizable** (:obj:`bool`) -- A flag indicating whether this model supports model parallelization. | |
""" | |
config_class = None | |
base_model_prefix = "" | |
# a list of re pattern of tensor names to ignore from the model when loading the model weights | |
# (and avoid unnecessary warnings). | |
_keys_to_ignore_on_load_missing = None | |
# a list of re pattern of tensor names to ignore from the weights when loading the model weights | |
# (and avoid unnecessary warnings). | |
_keys_to_ignore_on_load_unexpected = None | |
# a list of of tensor names to ignore when saving the model (useful for keys that aren't | |
# trained, but which are deterministic, or tied variables) | |
_keys_to_ignore_on_save = None | |
is_parallelizable = False | |
def dummy_inputs(self) -> Dict[str, torch.Tensor]: | |
""" | |
:obj:`Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network. | |
""" | |
return {"input_ids": torch.tensor(DUMMY_INPUTS)} | |
def __init__(self, config: PretrainedConfig, *inputs, **kwargs): | |
super().__init__() | |
if not isinstance(config, PretrainedConfig): | |
raise ValueError( | |
f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " | |
"`PretrainedConfig`. To create a model from a pretrained model use " | |
f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" | |
) | |
# Save config and origin of the pretrained weights if given in model | |
self.config = config | |
self.name_or_path = config.name_or_path | |
def _from_config(cls, config, **kwargs): | |
""" | |
All context managers that the model should be initialized under go here. | |
Args: | |
torch_dtype (:obj:`torch.dtype`, `optional`): | |
Override the default ``torch.dtype`` and load the model under this dtype. | |
""" | |
torch_dtype = kwargs.pop("torch_dtype", None) | |
# override default dtype if needed | |
dtype_orig = None | |
if torch_dtype is not None: | |
dtype_orig = cls._set_default_torch_dtype(torch_dtype) | |
if is_deepspeed_zero3_enabled(): | |
import deepspeed | |
logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") | |
# this immediately partitions the model across all gpus, to avoid the overhead in time | |
# and memory copying it on CPU or each GPU first | |
with deepspeed.zero.Init(config=deepspeed_config()): | |
model = cls(config, **kwargs) | |
else: | |
model = cls(config, **kwargs) | |
# restore default dtype if it was modified | |
if dtype_orig is not None: | |
torch.set_default_dtype(dtype_orig) | |
return model | |
def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype: | |
""" | |
Change the default dtype and return the previous one. This is needed when wanting to instantiate the model | |
under specific dtype. | |
Args: | |
dtype (:obj:`torch.dtype`): | |
a floating dtype to set to. | |
Returns: | |
:obj:`torch.dtype`: the original ``dtype`` that can be used to restore ``torch.set_default_dtype(dtype)`` | |
if it was modified. If it wasn't, returns :obj:`None`. | |
Note ``set_default_dtype`` currently only works with floating-point types and asserts if for example, | |
``torch.int64`` is passed. So if a non-float ``dtype`` is passed this functions will throw an exception. | |
""" | |
if not dtype.is_floating_point: | |
raise ValueError( | |
f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype" | |
) | |
logger.info(f"Instantiating {cls.__name__} model under default dtype {dtype}.") | |
dtype_orig = torch.get_default_dtype() | |
torch.set_default_dtype(dtype) | |
return dtype_orig | |
def base_model(self) -> nn.Module: | |
""" | |
:obj:`torch.nn.Module`: The main body of the model. | |
""" | |
return getattr(self, self.base_model_prefix, self) | |
def get_input_embeddings(self) -> nn.Module: | |
""" | |
Returns the model's input embeddings. | |
Returns: | |
:obj:`nn.Module`: A torch module mapping vocabulary to hidden states. | |
""" | |
base_model = getattr(self, self.base_model_prefix, self) | |
if base_model is not self: | |
return base_model.get_input_embeddings() | |
else: | |
raise NotImplementedError | |
def set_input_embeddings(self, value: nn.Module): | |
""" | |
Set model's input embeddings. | |
Args: | |
value (:obj:`nn.Module`): A module mapping vocabulary to hidden states. | |
""" | |
base_model = getattr(self, self.base_model_prefix, self) | |
if base_model is not self: | |
base_model.set_input_embeddings(value) | |
else: | |
raise NotImplementedError | |
def get_output_embeddings(self) -> nn.Module: | |
""" | |
Returns the model's output embeddings. | |
Returns: | |
:obj:`nn.Module`: A torch module mapping hidden states to vocabulary. | |
""" | |
return None # Overwrite for models with output embeddings | |
def _init_weights(self, module): | |
""" | |
Initialize the weights. This method should be overridden by derived class. | |
""" | |
raise NotImplementedError(f"Make sure `_init_weigths` is implemented for {self.__class__}") | |
def tie_weights(self): | |
""" | |
Tie the weights between the input embeddings and the output embeddings. | |
If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning | |
the weights instead. | |
""" | |
output_embeddings = self.get_output_embeddings() | |
if output_embeddings is not None and self.config.tie_word_embeddings: | |
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) | |
if self.config.is_encoder_decoder and self.config.tie_encoder_decoder: | |
if hasattr(self, self.base_model_prefix): | |
self = getattr(self, self.base_model_prefix) | |
self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix) | |
def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str): | |
uninitialized_encoder_weights: List[str] = [] | |
if decoder.__class__ != encoder.__class__: | |
logger.info( | |
f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized." | |
) | |
def tie_encoder_to_decoder_recursively( | |
decoder_pointer: nn.Module, | |
encoder_pointer: nn.Module, | |
module_name: str, | |
uninitialized_encoder_weights: List[str], | |
depth=0, | |
): | |
assert isinstance(decoder_pointer, nn.Module) and isinstance( | |
encoder_pointer, nn.Module | |
), f"{decoder_pointer} and {encoder_pointer} have to be of type nn.Module" | |
if hasattr(decoder_pointer, "weight"): | |
assert hasattr(encoder_pointer, "weight") | |
encoder_pointer.weight = decoder_pointer.weight | |
if hasattr(decoder_pointer, "bias"): | |
assert hasattr(encoder_pointer, "bias") | |
encoder_pointer.bias = decoder_pointer.bias | |
return | |
encoder_modules = encoder_pointer._modules | |
decoder_modules = decoder_pointer._modules | |
if len(decoder_modules) > 0: | |
assert ( | |
len(encoder_modules) > 0 | |
), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}" | |
all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()]) | |
encoder_layer_pos = 0 | |
for name, module in decoder_modules.items(): | |
if name.isdigit(): | |
encoder_name = str(int(name) + encoder_layer_pos) | |
decoder_name = name | |
if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len( | |
encoder_modules | |
) != len(decoder_modules): | |
# this can happen if the name corresponds to the position in a list module list of layers | |
# in this case the decoder has added a cross-attention that the encoder does not have | |
# thus skip this step and subtract one layer pos from encoder | |
encoder_layer_pos -= 1 | |
continue | |
elif name not in encoder_modules: | |
continue | |
elif depth > 500: | |
raise ValueError( | |
"Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model." | |
) | |
else: | |
decoder_name = encoder_name = name | |
tie_encoder_to_decoder_recursively( | |
decoder_modules[decoder_name], | |
encoder_modules[encoder_name], | |
module_name + "/" + name, | |
uninitialized_encoder_weights, | |
depth=depth + 1, | |
) | |
all_encoder_weights.remove(module_name + "/" + encoder_name) | |
uninitialized_encoder_weights += list(all_encoder_weights) | |
# tie weights recursively | |
tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights) | |
if len(uninitialized_encoder_weights) > 0: | |
logger.warning( | |
f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}" | |
) | |
def _tie_or_clone_weights(self, output_embeddings, input_embeddings): | |
"""Tie or clone module weights depending of whether we are using TorchScript or not""" | |
if self.config.torchscript: | |
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) | |
else: | |
output_embeddings.weight = input_embeddings.weight | |
if getattr(output_embeddings, "bias", None) is not None: | |
output_embeddings.bias.data = nn.functional.pad( | |
output_embeddings.bias.data, | |
( | |
0, | |
output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0], | |
), | |
"constant", | |
0, | |
) | |
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): | |
output_embeddings.out_features = input_embeddings.num_embeddings | |
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding: | |
""" | |
Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`. | |
Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method. | |
Arguments: | |
new_num_tokens (:obj:`int`, `optional`): | |
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized | |
vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, | |
just returns a pointer to the input tokens :obj:`torch.nn.Embedding` module of the model without doing | |
anything. | |
Return: | |
:obj:`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. | |
""" | |
model_embeds = self._resize_token_embeddings(new_num_tokens) | |
if new_num_tokens is None: | |
return model_embeds | |
# Update base model and current model config | |
self.config.vocab_size = new_num_tokens | |
self.vocab_size = new_num_tokens | |
# Tie weights again if needed | |
self.tie_weights() | |
return model_embeds | |
def _resize_token_embeddings(self, new_num_tokens): | |
old_embeddings = self.get_input_embeddings() | |
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) | |
self.set_input_embeddings(new_embeddings) | |
# if word embeddings are not tied, make sure that lm head is resized as well | |
if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings: | |
old_lm_head = self.get_output_embeddings() | |
new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens) | |
self.set_output_embeddings(new_lm_head) | |
return self.get_input_embeddings() | |
def _get_resized_embeddings( | |
self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int] = None | |
) -> nn.Embedding: | |
""" | |
Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly | |
initialized vectors at the end. Reducing the size will remove vectors from the end | |
Args: | |
old_embeddings (:obj:`torch.nn.Embedding`): | |
Old embeddings to be resized. | |
new_num_tokens (:obj:`int`, `optional`): | |
New number of tokens in the embedding matrix. | |
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove | |
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens | |
:obj:`torch.nn.Embedding`` module of the model without doing anything. | |
Return: | |
:obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if | |
:obj:`new_num_tokens` is :obj:`None` | |
""" | |
if new_num_tokens is None: | |
return old_embeddings | |
if is_deepspeed_zero3_enabled(): | |
import deepspeed | |
with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None): | |
old_num_tokens, old_embedding_dim = old_embeddings.weight.size() | |
else: | |
old_num_tokens, old_embedding_dim = old_embeddings.weight.size() | |
if old_num_tokens == new_num_tokens: | |
return old_embeddings | |
if not isinstance(old_embeddings, nn.Embedding): | |
raise TypeError( | |
f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}." | |
f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}." | |
) | |
# Build new embeddings | |
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim).to( | |
self.device, dtype=old_embeddings.weight.dtype | |
) | |
# initialize all new embeddings (in particular added tokens) | |
self._init_weights(new_embeddings) | |
# Copy token embeddings from the previous weights | |
# numbers of tokens to copy | |
n = min(old_num_tokens, new_num_tokens) | |
if is_deepspeed_zero3_enabled(): | |
import deepspeed | |
with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=0): | |
if torch.distributed.get_rank() == 0: | |
new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] | |
else: | |
new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] | |
return new_embeddings | |
def _get_resized_lm_head( | |
self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False | |
) -> nn.Linear: | |
""" | |
Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized | |
vectors at the end. Reducing the size will remove vectors from the end | |
Args: | |
old_lm_head (:obj:`torch.nn.Linear`): | |
Old lm head liner layer to be resized. | |
new_num_tokens (:obj:`int`, `optional`): | |
New number of tokens in the linear matrix. | |
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove | |
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens | |
:obj:`torch.nn.Linear`` module of the model without doing anything. | |
transposed (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether ``old_lm_head`` is transposed or not. If True ``old_lm_head.size()`` is ``lm_head_dim, | |
vocab_size`` else ``vocab_size, lm_head_dim``. | |
Return: | |
:obj:`torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if | |
:obj:`new_num_tokens` is :obj:`None` | |
""" | |
if new_num_tokens is None: | |
return old_lm_head | |
if is_deepspeed_zero3_enabled(): | |
import deepspeed | |
with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None): | |
old_num_tokens, old_lm_head_dim = ( | |
old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() | |
) | |
else: | |
old_num_tokens, old_lm_head_dim = ( | |
old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() | |
) | |
if old_num_tokens == new_num_tokens: | |
return old_lm_head | |
if not isinstance(old_lm_head, nn.Linear): | |
raise TypeError( | |
f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}." | |
f"You should either use a different resize function or make sure that `old_lm_head` are an instance of {nn.Linear}." | |
) | |
# Build new lm head | |
new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim) | |
has_new_lm_head_bias = old_lm_head.bias is not None | |
new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias).to(self.device) | |
# initialize new lm head (in particular added tokens) | |
self._init_weights(new_lm_head) | |
num_tokens_to_copy = min(old_num_tokens, new_num_tokens) | |
# XXX: put the long block of code in a wrapper | |
if is_deepspeed_zero3_enabled(): | |
import deepspeed | |
with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=0): | |
if torch.distributed.get_rank() == 0: | |
# Copy old lm head weights to new lm head | |
if not transposed: | |
new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[ | |
:num_tokens_to_copy, : | |
] | |
else: | |
new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[ | |
:, :num_tokens_to_copy | |
] | |
# Copy bias weights to new lm head | |
if has_new_lm_head_bias: | |
new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy] | |
else: | |
# Copy old lm head weights to new lm head | |
if not transposed: | |
new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :] | |
else: | |
new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy] | |
# Copy bias weights to new lm head | |
if has_new_lm_head_bias: | |
new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy] | |
return new_lm_head | |
def init_weights(self): | |
""" | |
If needed prunes and maybe initializes weights. | |
""" | |
# Prune heads if needed | |
if self.config.pruned_heads: | |
self.prune_heads(self.config.pruned_heads) | |
if _init_weights: | |
# Initialize weights | |
self.apply(self._init_weights) | |
# Tie weights should be skipped when not initializing all weights | |
# since from_pretrained(...) calls tie weights anyways | |
self.tie_weights() | |
def prune_heads(self, heads_to_prune: Dict[int, List[int]]): | |
""" | |
Prunes heads of the base model. | |
Arguments: | |
heads_to_prune (:obj:`Dict[int, List[int]]`): | |
Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of | |
heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads | |
0 and 2 on layer 1 and heads 2 and 3 on layer 2. | |
""" | |
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads | |
for layer, heads in heads_to_prune.items(): | |
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) | |
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON | |
self.base_model._prune_heads(heads_to_prune) | |
def save_pretrained( | |
self, | |
save_directory: Union[str, os.PathLike], | |
save_config: bool = True, | |
state_dict: Optional[dict] = None, | |
save_function: Callable = torch.save, | |
push_to_hub: bool = False, | |
**kwargs, | |
): | |
""" | |
Save a model and its configuration file to a directory, so that it can be re-loaded using the | |
`:func:`~transformers.PreTrainedModel.from_pretrained`` class method. | |
Arguments: | |
save_directory (:obj:`str` or :obj:`os.PathLike`): | |
Directory to which to save. Will be created if it doesn't exist. | |
save_config (:obj:`bool`, `optional`, defaults to :obj:`True`): | |
Whether or not to save the config of the model. Useful when in distributed training like TPUs and need | |
to call this function on all processes. In this case, set :obj:`save_config=True` only on the main | |
process to avoid race conditions. | |
state_dict (nested dictionary of :obj:`torch.Tensor`): | |
The state dictionary of the model to save. Will default to :obj:`self.state_dict()`, but can be used to | |
only save parts of the model or if special precautions need to be taken when recovering the state | |
dictionary of a model (like when using model parallelism). | |
save_function (:obj:`Callable`): | |
The function to use to save the state dictionary. Useful on distributed training like TPUs when one | |
need to replace :obj:`torch.save` by another method. | |
push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether or not to push your model to the Hugging Face model hub after saving it. | |
.. warning:: | |
Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with | |
:obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are | |
pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory | |
instead. | |
kwargs: | |
Additional key word arguments passed along to the | |
:meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method. | |
""" | |
if os.path.isfile(save_directory): | |
logger.error(f"Provided path ({save_directory}) should be a directory, not a file") | |
return | |
if push_to_hub: | |
commit_message = kwargs.pop("commit_message", None) | |
repo = self._create_or_get_repo(save_directory, **kwargs) | |
os.makedirs(save_directory, exist_ok=True) | |
# Only save the model itself if we are using distributed training | |
model_to_save = unwrap_model(self) | |
# save the string version of dtype to the config, e.g. convert torch.float32 => "float32" | |
# we currently don't use this setting automatically, but may start to use with v5 | |
dtype = get_parameter_dtype(model_to_save) | |
model_to_save.config.torch_dtype = str(dtype).split(".")[1] | |
# Attach architecture to the config | |
model_to_save.config.architectures = [model_to_save.__class__.__name__] | |
# Save the config | |
if save_config: | |
model_to_save.config.save_pretrained(save_directory) | |
# Save the model | |
if state_dict is None: | |
state_dict = model_to_save.state_dict() | |
# Handle the case where some state_dict keys shouldn't be saved | |
if self._keys_to_ignore_on_save is not None: | |
state_dict = {k: v for k, v in state_dict.items() if k not in self._keys_to_ignore_on_save} | |
# If we save using the predefined names, we can load using `from_pretrained` | |
output_model_file = os.path.join(save_directory, WEIGHTS_NAME) | |
save_function(state_dict, output_model_file) | |
logger.info(f"Model weights saved in {output_model_file}") | |
if push_to_hub: | |
url = self._push_to_hub(repo, commit_message=commit_message) | |
logger.info(f"Model pushed to the hub in this commit: {url}") | |
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): | |
r""" | |
Instantiate a pretrained pytorch model from a pre-trained model configuration. | |
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To | |
train the model, you should first set it back in training mode with ``model.train()``. | |
The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come | |
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning | |
task. | |
The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those | |
weights are discarded. | |
Parameters: | |
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`, `optional`): | |
Can be either: | |
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. | |
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under | |
a user or organization name, like ``dbmdz/bert-base-german-cased``. | |
- A path to a `directory` containing model weights saved using | |
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. | |
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In | |
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided | |
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in | |
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. | |
- A path or url to a model folder containing a `flax checkpoint file` in `.msgpack` format (e.g, | |
``./flax_model/`` containing ``flax_model.msgpack``). In this case, ``from_flax`` should be set | |
to :obj:`True`. | |
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword | |
arguments ``config`` and ``state_dict``). | |
model_args (sequence of positional arguments, `optional`): | |
All remaning positional arguments will be passed to the underlying model's ``__init__`` method. | |
config (:obj:`Union[PretrainedConfig, str, os.PathLike]`, `optional`): | |
Can be either: | |
- an instance of a class derived from :class:`~transformers.PretrainedConfig`, | |
- a string or path valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`. | |
Configuration for the model to use instead of an automatically loaded configuation. Configuration can | |
be automatically loaded when: | |
- The model is a model provided by the library (loaded with the `model id` string of a pretrained | |
model). | |
- The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded | |
by supplying the save directory. | |
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a | |
configuration JSON file named `config.json` is found in the directory. | |
state_dict (:obj:`Dict[str, torch.Tensor]`, `optional`): | |
A state dictionary to use instead of a state dictionary loaded from saved weights file. | |
This option can be used if you want to create a model from a pretrained configuration but load your own | |
weights. In this case though, you should check if using | |
:func:`~transformers.PreTrainedModel.save_pretrained` and | |
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. | |
cache_dir (:obj:`Union[str, os.PathLike]`, `optional`): | |
Path to a directory in which a downloaded pretrained model configuration should be cached if the | |
standard cache should not be used. | |
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Load the model weights from a TensorFlow checkpoint save file (see docstring of | |
``pretrained_model_name_or_path`` argument). | |
from_flax (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Load the model weights from a Flax checkpoint save file (see docstring of | |
``pretrained_model_name_or_path`` argument). | |
ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether or not to raise an error if some of the weights from the checkpoint do not have the same size | |
as the weights of the model (if for instance, you are instantiating a model with 10 labels from a | |
checkpoint with 3 labels). | |
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether or not to force the (re-)download of the model weights and configuration files, overriding the | |
cached versions if they exist. | |
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether or not to delete incompletely received files. Will attempt to resume the download if such a | |
file exists. | |
proxies (:obj:`Dict[str, str], `optional`): | |
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', | |
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. | |
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. | |
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether or not to only look at local files (i.e., do not try to download the model). | |
use_auth_token (:obj:`str` or `bool`, `optional`): | |
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token | |
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`). | |
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`): | |
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a | |
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any | |
identifier allowed by git. | |
mirror(:obj:`str`, `optional`): | |
Mirror source to accelerate downloads in China. If you are from China and have an accessibility | |
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. | |
Please refer to the mirror site for more information. | |
_fast_init(:obj:`bool`, `optional`, defaults to `:obj:`True`): | |
Whether or not to disable fast initialization. | |
torch_dtype (:obj:`str` or :obj:`torch.dtype`, `optional`): | |
Override the default ``torch.dtype`` and load the model under this dtype. If ``"auto"`` is passed the | |
dtype will be automatically derived from the model's weights. | |
.. warning:: | |
One should only disable `_fast_init` to ensure backwards compatibility with | |
``transformers.__version__ < 4.6.0`` for seeded model initialization. This argument will be removed | |
at the next major version. See `pull request 11471 | |
<https://github.com/huggingface/transformers/pull/11471>`__ for more information. | |
kwargs (remaining dictionary of keyword arguments, `optional`): | |
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., | |
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or | |
automatically loaded: | |
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the | |
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have | |
already been done) | |
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class | |
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of | |
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute | |
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration | |
attribute will be passed to the underlying model's ``__init__`` function. | |
.. note:: | |
Passing :obj:`use_auth_token=True` is required when you want to use a private model. | |
.. note:: | |
Activate the special `"offline-mode" | |
<https://huggingface.co/transformers/installation.html#offline-mode>`__ to use this method in a firewalled | |
environment. | |
Examples:: | |
>>> from transformers import BertConfig, BertModel | |
>>> # Download model and configuration from huggingface.co and cache. | |
>>> model = BertModel.from_pretrained('bert-base-uncased') | |
>>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable). | |
>>> model = BertModel.from_pretrained('./test/saved_model/') | |
>>> # Update configuration during loading. | |
>>> model = BertModel.from_pretrained('bert-base-uncased', output_attentions=True) | |
>>> assert model.config.output_attentions == True | |
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). | |
>>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json') | |
>>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config) | |
>>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower) | |
>>> model = BertModel.from_pretrained('bert-base-uncased', from_flax=True) | |
""" | |
config = kwargs.pop("config", None) | |
state_dict = kwargs.pop("state_dict", None) | |
cache_dir = kwargs.pop("cache_dir", None) | |
from_tf = kwargs.pop("from_tf", False) | |
from_flax = kwargs.pop("from_flax", False) | |
ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) | |
force_download = kwargs.pop("force_download", False) | |
resume_download = kwargs.pop("resume_download", False) | |
proxies = kwargs.pop("proxies", None) | |
output_loading_info = kwargs.pop("output_loading_info", False) | |
local_files_only = kwargs.pop("local_files_only", False) | |
use_auth_token = kwargs.pop("use_auth_token", None) | |
revision = kwargs.pop("revision", None) | |
mirror = kwargs.pop("mirror", None) | |
from_pipeline = kwargs.pop("_from_pipeline", None) | |
from_auto_class = kwargs.pop("_from_auto", False) | |
_fast_init = kwargs.pop("_fast_init", True) | |
torch_dtype = kwargs.pop("torch_dtype", None) | |
from_pt = not (from_tf | from_flax) | |
user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class} | |
if from_pipeline is not None: | |
user_agent["using_pipeline"] = from_pipeline | |
if is_offline_mode() and not local_files_only: | |
logger.info("Offline mode: forcing local_files_only=True") | |
local_files_only = True | |
# Load config if we don't provide a configuration | |
if not isinstance(config, PretrainedConfig): | |
config_path = config if config is not None else pretrained_model_name_or_path | |
config, model_kwargs = cls.config_class.from_pretrained( | |
config_path, | |
*model_args, | |
cache_dir=cache_dir, | |
return_unused_kwargs=True, | |
force_download=force_download, | |
resume_download=resume_download, | |
proxies=proxies, | |
local_files_only=local_files_only, | |
use_auth_token=use_auth_token, | |
revision=revision, | |
_from_auto=from_auto_class, | |
_from_pipeline=from_pipeline, | |
**kwargs, | |
) | |
else: | |
model_kwargs = kwargs | |
# Load model | |
if pretrained_model_name_or_path is not None: | |
pretrained_model_name_or_path = str(pretrained_model_name_or_path) | |
if os.path.isdir(pretrained_model_name_or_path): | |
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")): | |
# Load from a TF 1.0 checkpoint in priority if from_tf | |
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index") | |
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): | |
# Load from a TF 2.0 checkpoint in priority if from_tf | |
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) | |
elif from_flax and os.path.isfile(os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)): | |
# Load from a Flax checkpoint in priority if from_flax | |
archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME) | |
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): | |
# Load from a PyTorch checkpoint | |
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) | |
else: | |
raise EnvironmentError( | |
f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + '.index', FLAX_WEIGHTS_NAME]} found in " | |
f"directory {pretrained_model_name_or_path} or `from_tf` and `from_flax` set to False." | |
) | |
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): | |
archive_file = pretrained_model_name_or_path | |
elif os.path.isfile(pretrained_model_name_or_path + ".index"): | |
if not from_tf: | |
raise ValueError( | |
f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set " | |
"from_tf to True to load from this checkpoint." | |
) | |
archive_file = pretrained_model_name_or_path + ".index" | |
else: | |
# set correct filename | |
if from_tf: | |
filename = TF2_WEIGHTS_NAME | |
elif from_flax: | |
filename = FLAX_WEIGHTS_NAME | |
else: | |
filename = WEIGHTS_NAME | |
archive_file = hf_bucket_url( | |
pretrained_model_name_or_path, | |
filename=filename, | |
revision=revision, | |
mirror=mirror, | |
) | |
try: | |
# Load from URL or cache if already cached | |
resolved_archive_file = cached_path( | |
archive_file, | |
cache_dir=cache_dir, | |
force_download=force_download, | |
proxies=proxies, | |
resume_download=resume_download, | |
local_files_only=local_files_only, | |
use_auth_token=use_auth_token, | |
user_agent=user_agent, | |
) | |
except EnvironmentError as err: | |
logger.error(err) | |
msg = ( | |
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n" | |
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n" | |
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n" | |
) | |
raise EnvironmentError(msg) | |
if resolved_archive_file == archive_file: | |
logger.info(f"loading weights file {archive_file}") | |
else: | |
logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}") | |
else: | |
resolved_archive_file = None | |
# load pt weights early so that we know which dtype to init the model under | |
if from_pt: | |
if state_dict is None: | |
try: | |
state_dict = torch.load(resolved_archive_file, map_location="cpu") | |
except Exception: | |
raise OSError( | |
f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' " | |
f"at '{resolved_archive_file}'" | |
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. " | |
) | |
# set dtype to instantiate the model under: | |
# 1. If torch_dtype is not None, we use that dtype | |
# 2. If torch_dtype is "auto", we auto-detect dtype from the loaded state_dict, by checking its first | |
# weights entry - we assume all weights are of the same dtype | |
# we also may have config.torch_dtype available, but we won't rely on it till v5 | |
dtype_orig = None | |
if torch_dtype is not None: | |
if isinstance(torch_dtype, str): | |
if torch_dtype == "auto": | |
torch_dtype = next(iter(state_dict.values())).dtype | |
else: | |
raise ValueError( | |
f"`torch_dtype` can be either a `torch.dtype` or `auto`, but received {torch_dtype}" | |
) | |
dtype_orig = cls._set_default_torch_dtype(torch_dtype) | |
config.name_or_path = pretrained_model_name_or_path | |
# Instantiate model. | |
if is_deepspeed_zero3_enabled(): | |
import deepspeed | |
logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") | |
# this immediately partitions the model across all gpus, to avoid the overhead in time | |
# and memory copying it on CPU or each GPU first | |
with deepspeed.zero.Init(config=deepspeed_config()): | |
with no_init_weights(_enable=_fast_init): | |
model = cls(config, *model_args, **model_kwargs) | |
else: | |
with no_init_weights(_enable=_fast_init): | |
model = cls(config, *model_args, **model_kwargs) | |
if from_pt: | |
# restore default dtype | |
if dtype_orig is not None: | |
torch.set_default_dtype(dtype_orig) | |
if from_tf: | |
if resolved_archive_file.endswith(".index"): | |
# Load from a TensorFlow 1.X checkpoint - provided by original authors | |
model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' | |
else: | |
# Load from our TensorFlow 2.0 checkpoints | |
try: | |
from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model | |
model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True) | |
except ImportError: | |
logger.error( | |
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " | |
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." | |
) | |
raise | |
elif from_flax: | |
try: | |
from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model | |
model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file) | |
except ImportError: | |
logger.error( | |
"Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see " | |
"https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions." | |
) | |
raise | |
elif from_pt: | |
model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_state_dict_into_model( | |
model, | |
state_dict, | |
pretrained_model_name_or_path, | |
ignore_mismatched_sizes=ignore_mismatched_sizes, | |
_fast_init=_fast_init, | |
) | |
# make sure token embedding weights are still tied if needed | |
model.tie_weights() | |
# Set model in evaluation mode to deactivate DropOut modules by default | |
model.eval() | |
if output_loading_info: | |
loading_info = { | |
"missing_keys": missing_keys, | |
"unexpected_keys": unexpected_keys, | |
"mismatched_keys": mismatched_keys, | |
"error_msgs": error_msgs, | |
} | |
return model, loading_info | |
return model | |
def _load_state_dict_into_model( | |
cls, model, state_dict, pretrained_model_name_or_path, ignore_mismatched_sizes=False, _fast_init=True | |
): | |
# Convert old format to new format if needed from a PyTorch state_dict | |
old_keys = [] | |
new_keys = [] | |
for key in state_dict.keys(): | |
new_key = None | |
if "gamma" in key: | |
new_key = key.replace("gamma", "weight") | |
if "beta" in key: | |
new_key = key.replace("beta", "bias") | |
if new_key: | |
old_keys.append(key) | |
new_keys.append(new_key) | |
for old_key, new_key in zip(old_keys, new_keys): | |
state_dict[new_key] = state_dict.pop(old_key) | |
# Retrieve missing & unexpected_keys | |
model_state_dict = model.state_dict() | |
expected_keys = list(model_state_dict.keys()) | |
loaded_keys = list(state_dict.keys()) | |
prefix = model.base_model_prefix | |
has_prefix_module = any(s.startswith(prefix) for s in loaded_keys) | |
expects_prefix_module = any(s.startswith(prefix) for s in expected_keys) | |
# key re-naming operations are never done on the keys | |
# that are loaded, but always on the keys of the newly initialized model | |
remove_prefix = not has_prefix_module and expects_prefix_module | |
add_prefix = has_prefix_module and not expects_prefix_module | |
if remove_prefix: | |
expected_keys = [".".join(s.split(".")[1:]) if s.startswith(prefix) else s for s in expected_keys] | |
elif add_prefix: | |
expected_keys = [".".join([prefix, s]) for s in expected_keys] | |
missing_keys = list(set(expected_keys) - set(loaded_keys)) | |
unexpected_keys = list(set(loaded_keys) - set(expected_keys)) | |
# Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not | |
# matching the weights in the model. | |
mismatched_keys = [] | |
if ignore_mismatched_sizes: | |
for checkpoint_key in loaded_keys: | |
model_key = checkpoint_key | |
if remove_prefix and checkpoint_key.startswith(prefix): | |
model_key = ".".join(checkpoint_key.split(".")[1:]) | |
elif add_prefix: | |
model_key = f"{prefix}.{checkpoint_key}" | |
if ( | |
model_key in model_state_dict | |
and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape | |
): | |
mismatched_keys.append( | |
(checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) | |
) | |
del state_dict[checkpoint_key] | |
# Some models may have keys that are not in the state by design, removing them before needlessly warning | |
# the user. | |
if cls._keys_to_ignore_on_load_missing is not None: | |
for pat in cls._keys_to_ignore_on_load_missing: | |
missing_keys = [k for k in missing_keys if re.search(pat, k) is None] | |
if cls._keys_to_ignore_on_load_unexpected is not None: | |
for pat in cls._keys_to_ignore_on_load_unexpected: | |
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] | |
if _fast_init: | |
# retrieve unintialized modules and initialize | |
unintialized_modules = model.retrieve_modules_from_names( | |
missing_keys, add_prefix=add_prefix, remove_prefix=remove_prefix | |
) | |
for module in unintialized_modules: | |
model._init_weights(module) | |
# copy state_dict so _load_from_state_dict can modify it | |
metadata = getattr(state_dict, "_metadata", None) | |
state_dict = state_dict.copy() | |
if metadata is not None: | |
state_dict._metadata = metadata | |
error_msgs = [] | |
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants | |
# so we need to apply the function recursively. | |
def load(module: nn.Module, prefix=""): | |
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) | |
args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) | |
if is_deepspeed_zero3_enabled(): | |
import deepspeed | |
# because zero3 puts placeholders in model params, this context | |
# manager gathers (unpartitions) the params of the current layer, then loads from | |
# the state dict and then re-partitions them again | |
with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0): | |
if torch.distributed.get_rank() == 0: | |
module._load_from_state_dict(*args) | |
else: | |
module._load_from_state_dict(*args) | |
for name, child in module._modules.items(): | |
if child is not None: | |
load(child, prefix + name + ".") | |
# Make sure we are able to load base models as well as derived models (with heads) | |
start_prefix = "" | |
model_to_load = model | |
if not hasattr(model, cls.base_model_prefix) and has_prefix_module: | |
start_prefix = cls.base_model_prefix + "." | |
if hasattr(model, cls.base_model_prefix) and not has_prefix_module: | |
model_to_load = getattr(model, cls.base_model_prefix) | |
load(model_to_load, prefix=start_prefix) | |
if len(error_msgs) > 0: | |
error_msg = "\n\t".join(error_msgs) | |
raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}") | |
if len(unexpected_keys) > 0: | |
logger.warning( | |
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when " | |
f"initializing {model.__class__.__name__}: {unexpected_keys}\n" | |
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task " | |
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n" | |
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect " | |
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." | |
) | |
else: | |
logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") | |
if len(missing_keys) > 0: | |
logger.warning( | |
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " | |
f"and are newly initialized: {missing_keys}\n" | |
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." | |
) | |
elif len(mismatched_keys) == 0: | |
logger.info( | |
f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n" | |
f"If your task is similar to the task the model of the checkpoint was trained on, " | |
f"you can already use {model.__class__.__name__} for predictions without further training." | |
) | |
if len(mismatched_keys) > 0: | |
mismatched_warning = "\n".join( | |
[ | |
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" | |
for key, shape1, shape2 in mismatched_keys | |
] | |
) | |
logger.warning( | |
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " | |
f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n" | |
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." | |
) | |
return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs | |
def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False): | |
module_keys = set([".".join(key.split(".")[:-1]) for key in names]) | |
# torch.nn.ParameterList is a special case where two parameter keywords | |
# are appended to the module name, *e.g.* bert.special_embeddings.0 | |
module_keys = module_keys.union(set([".".join(key.split(".")[:-2]) for key in names if key[-1].isdigit()])) | |
retrieved_modules = [] | |
# retrieve all modules that has at least one missing weight name | |
for name, module in self.named_modules(): | |
if remove_prefix: | |
name = ".".join(name.split(".")[1:]) if name.startswith(self.base_model_prefix) else name | |
elif add_prefix: | |
name = ".".join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix | |
if name in module_keys: | |
retrieved_modules.append(module) | |
return retrieved_modules | |
# To update the docstring, we need to copy the method, otherwise we change the original docstring. | |
PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) | |
PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format( | |
object="model", object_class="AutoModel", object_files="model checkpoint" | |
) | |
class Conv1D(nn.Module): | |
""" | |
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). | |
Basically works like a linear layer but the weights are transposed. | |
Args: | |
nf (:obj:`int`): The number of output features. | |
nx (:obj:`int`): The number of input features. | |
""" | |
def __init__(self, nf, nx): | |
super().__init__() | |
self.nf = nf | |
w = torch.empty(nx, nf) | |
nn.init.normal_(w, std=0.02) | |
self.weight = nn.Parameter(w) | |
self.bias = nn.Parameter(torch.zeros(nf)) | |
def forward(self, x): | |
size_out = x.size()[:-1] + (self.nf,) | |
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) | |
x = x.view(*size_out) | |
return x | |
class PoolerStartLogits(nn.Module): | |
""" | |
Compute SQuAD start logits from sequence hidden states. | |
Args: | |
config (:class:`~transformers.PretrainedConfig`): | |
The config used by the model, will be used to grab the :obj:`hidden_size` of the model. | |
""" | |
def __init__(self, config: PretrainedConfig): | |
super().__init__() | |
self.dense = nn.Linear(config.hidden_size, 1) | |
def forward( | |
self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None | |
) -> torch.FloatTensor: | |
""" | |
Args: | |
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`): | |
The final hidden states of the model. | |
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`): | |
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token | |
should be masked. | |
Returns: | |
:obj:`torch.FloatTensor`: The start logits for SQuAD. | |
""" | |
x = self.dense(hidden_states).squeeze(-1) | |
if p_mask is not None: | |
if get_parameter_dtype(self) == torch.float16: | |
x = x * (1 - p_mask) - 65500 * p_mask | |
else: | |
x = x * (1 - p_mask) - 1e30 * p_mask | |
return x | |
class PoolerEndLogits(nn.Module): | |
""" | |
Compute SQuAD end logits from sequence hidden states. | |
Args: | |
config (:class:`~transformers.PretrainedConfig`): | |
The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the | |
:obj:`layer_norm_eps` to use. | |
""" | |
def __init__(self, config: PretrainedConfig): | |
super().__init__() | |
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) | |
self.activation = nn.Tanh() | |
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) | |
self.dense_1 = nn.Linear(config.hidden_size, 1) | |
def forward( | |
self, | |
hidden_states: torch.FloatTensor, | |
start_states: Optional[torch.FloatTensor] = None, | |
start_positions: Optional[torch.LongTensor] = None, | |
p_mask: Optional[torch.FloatTensor] = None, | |
) -> torch.FloatTensor: | |
""" | |
Args: | |
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`): | |
The final hidden states of the model. | |
start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`): | |
The hidden states of the first tokens for the labeled span. | |
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): | |
The position of the first token for the labeled span. | |
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`): | |
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token | |
should be masked. | |
.. note:: | |
One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set, | |
``start_positions`` overrides ``start_states``. | |
Returns: | |
:obj:`torch.FloatTensor`: The end logits for SQuAD. | |
""" | |
assert ( | |
start_states is not None or start_positions is not None | |
), "One of start_states, start_positions should be not None" | |
if start_positions is not None: | |
slen, hsz = hidden_states.shape[-2:] | |
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) | |
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz) | |
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz) | |
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) | |
x = self.activation(x) | |
x = self.LayerNorm(x) | |
x = self.dense_1(x).squeeze(-1) | |
if p_mask is not None: | |
if get_parameter_dtype(self) == torch.float16: | |
x = x * (1 - p_mask) - 65500 * p_mask | |
else: | |
x = x * (1 - p_mask) - 1e30 * p_mask | |
return x | |
class PoolerAnswerClass(nn.Module): | |
""" | |
Compute SQuAD 2.0 answer class from classification and start tokens hidden states. | |
Args: | |
config (:class:`~transformers.PretrainedConfig`): | |
The config used by the model, will be used to grab the :obj:`hidden_size` of the model. | |
""" | |
def __init__(self, config): | |
super().__init__() | |
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) | |
self.activation = nn.Tanh() | |
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) | |
def forward( | |
self, | |
hidden_states: torch.FloatTensor, | |
start_states: Optional[torch.FloatTensor] = None, | |
start_positions: Optional[torch.LongTensor] = None, | |
cls_index: Optional[torch.LongTensor] = None, | |
) -> torch.FloatTensor: | |
""" | |
Args: | |
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`): | |
The final hidden states of the model. | |
start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`): | |
The hidden states of the first tokens for the labeled span. | |
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): | |
The position of the first token for the labeled span. | |
cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): | |
Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token. | |
.. note:: | |
One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set, | |
``start_positions`` overrides ``start_states``. | |
Returns: | |
:obj:`torch.FloatTensor`: The SQuAD 2.0 answer class. | |
""" | |
# No dependency on end_feature so that we can obtain one single `cls_logits` for each sample. | |
hsz = hidden_states.shape[-1] | |
assert ( | |
start_states is not None or start_positions is not None | |
), "One of start_states, start_positions should be not None" | |
if start_positions is not None: | |
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) | |
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz) | |
if cls_index is not None: | |
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) | |
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz) | |
else: | |
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz) | |
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) | |
x = self.activation(x) | |
x = self.dense_1(x).squeeze(-1) | |
return x | |
class SquadHeadOutput(ModelOutput): | |
""" | |
Base class for outputs of question answering models using a :class:`~transformers.modeling_utils.SQuADHead`. | |
Args: | |
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided): | |
Classification loss as the sum of start token, end token (and is_impossible if provided) classification | |
losses. | |
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): | |
Log probabilities for the top config.start_n_top start token possibilities (beam-search). | |
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): | |
Indices for the top config.start_n_top start token possibilities (beam-search). | |
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): | |
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities | |
(beam-search). | |
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): | |
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). | |
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): | |
Log probabilities for the ``is_impossible`` label of the answers. | |
""" | |
loss: Optional[torch.FloatTensor] = None | |
start_top_log_probs: Optional[torch.FloatTensor] = None | |
start_top_index: Optional[torch.LongTensor] = None | |
end_top_log_probs: Optional[torch.FloatTensor] = None | |
end_top_index: Optional[torch.LongTensor] = None | |
cls_logits: Optional[torch.FloatTensor] = None | |
class SQuADHead(nn.Module): | |
r""" | |
A SQuAD head inspired by XLNet. | |
Args: | |
config (:class:`~transformers.PretrainedConfig`): | |
The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the | |
:obj:`layer_norm_eps` to use. | |
""" | |
def __init__(self, config): | |
super().__init__() | |
self.start_n_top = config.start_n_top | |
self.end_n_top = config.end_n_top | |
self.start_logits = PoolerStartLogits(config) | |
self.end_logits = PoolerEndLogits(config) | |
self.answer_class = PoolerAnswerClass(config) | |
def forward( | |
self, | |
hidden_states: torch.FloatTensor, | |
start_positions: Optional[torch.LongTensor] = None, | |
end_positions: Optional[torch.LongTensor] = None, | |
cls_index: Optional[torch.LongTensor] = None, | |
is_impossible: Optional[torch.LongTensor] = None, | |
p_mask: Optional[torch.FloatTensor] = None, | |
return_dict: bool = False, | |
) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]: | |
""" | |
Args: | |
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`): | |
Final hidden states of the model on the sequence tokens. | |
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): | |
Positions of the first token for the labeled span. | |
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): | |
Positions of the last token for the labeled span. | |
cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): | |
Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token. | |
is_impossible (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): | |
Whether the question has a possible answer in the paragraph or not. | |
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`): | |
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token | |
should be masked. | |
return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. | |
Returns: | |
""" | |
start_logits = self.start_logits(hidden_states, p_mask=p_mask) | |
if start_positions is not None and end_positions is not None: | |
# If we are on multi-GPU, let's remove the dimension added by batch splitting | |
for x in (start_positions, end_positions, cls_index, is_impossible): | |
if x is not None and x.dim() > 1: | |
x.squeeze_(-1) | |
# during training, compute the end logits based on the ground truth of the start position | |
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) | |
loss_fct = CrossEntropyLoss() | |
start_loss = loss_fct(start_logits, start_positions) | |
end_loss = loss_fct(end_logits, end_positions) | |
total_loss = (start_loss + end_loss) / 2 | |
if cls_index is not None and is_impossible is not None: | |
# Predict answerability from the representation of CLS and START | |
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) | |
loss_fct_cls = nn.BCEWithLogitsLoss() | |
cls_loss = loss_fct_cls(cls_logits, is_impossible) | |
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss | |
total_loss += cls_loss * 0.5 | |
return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,) | |
else: | |
# during inference, compute the end logits based on beam search | |
bsz, slen, hsz = hidden_states.size() | |
start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen) | |
start_top_log_probs, start_top_index = torch.topk( | |
start_log_probs, self.start_n_top, dim=-1 | |
) # shape (bsz, start_n_top) | |
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) | |
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) | |
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) | |
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( | |
start_states | |
) # shape (bsz, slen, start_n_top, hsz) | |
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None | |
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) | |
end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) | |
end_top_log_probs, end_top_index = torch.topk( | |
end_log_probs, self.end_n_top, dim=1 | |
) # shape (bsz, end_n_top, start_n_top) | |
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) | |
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) | |
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) | |
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) | |
if not return_dict: | |
return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) | |
else: | |
return SquadHeadOutput( | |
start_top_log_probs=start_top_log_probs, | |
start_top_index=start_top_index, | |
end_top_log_probs=end_top_log_probs, | |
end_top_index=end_top_index, | |
cls_logits=cls_logits, | |
) | |
class SequenceSummary(nn.Module): | |
r""" | |
Compute a single vector summary of a sequence hidden states. | |
Args: | |
config (:class:`~transformers.PretrainedConfig`): | |
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual | |
config class of your model for the default values it uses): | |
- **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are: | |
- :obj:`"last"` -- Take the last token hidden state (like XLNet) | |
- :obj:`"first"` -- Take the first token hidden state (like Bert) | |
- :obj:`"mean"` -- Take the mean of all tokens hidden states | |
- :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) | |
- :obj:`"attn"` -- Not implemented now, use multi-head attention | |
- **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction. | |
- **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to | |
:obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`). | |
- **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the | |
output, another string or :obj:`None` will add no activation. | |
- **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and | |
activation. | |
- **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and | |
activation. | |
""" | |
def __init__(self, config: PretrainedConfig): | |
super().__init__() | |
self.summary_type = getattr(config, "summary_type", "last") | |
if self.summary_type == "attn": | |
# We should use a standard multi-head attention module with absolute positional embedding for that. | |
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 | |
# We can probably just use the multi-head attention module of PyTorch >=1.1.0 | |
raise NotImplementedError | |
self.summary = Identity() | |
if hasattr(config, "summary_use_proj") and config.summary_use_proj: | |
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: | |
num_classes = config.num_labels | |
else: | |
num_classes = config.hidden_size | |
self.summary = nn.Linear(config.hidden_size, num_classes) | |
activation_string = getattr(config, "summary_activation", None) | |
self.activation: Callable = get_activation(activation_string) if activation_string else Identity() | |
self.first_dropout = Identity() | |
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: | |
self.first_dropout = nn.Dropout(config.summary_first_dropout) | |
self.last_dropout = Identity() | |
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: | |
self.last_dropout = nn.Dropout(config.summary_last_dropout) | |
def forward( | |
self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None | |
) -> torch.FloatTensor: | |
""" | |
Compute a single vector summary of a sequence hidden states. | |
Args: | |
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`[batch_size, seq_len, hidden_size]`): | |
The hidden states of the last layer. | |
cls_index (:obj:`torch.LongTensor` of shape :obj:`[batch_size]` or :obj:`[batch_size, ...]` where ... are optional leading dimensions of :obj:`hidden_states`, `optional`): | |
Used if :obj:`summary_type == "cls_index"` and takes the last token of the sequence as classification | |
token. | |
Returns: | |
:obj:`torch.FloatTensor`: The summary of the sequence hidden states. | |
""" | |
if self.summary_type == "last": | |
output = hidden_states[:, -1] | |
elif self.summary_type == "first": | |
output = hidden_states[:, 0] | |
elif self.summary_type == "mean": | |
output = hidden_states.mean(dim=1) | |
elif self.summary_type == "cls_index": | |
if cls_index is None: | |
cls_index = torch.full_like( | |
hidden_states[..., :1, :], | |
hidden_states.shape[-2] - 1, | |
dtype=torch.long, | |
) | |
else: | |
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) | |
cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) | |
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states | |
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) | |
elif self.summary_type == "attn": | |
raise NotImplementedError | |
output = self.first_dropout(output) | |
output = self.summary(output) | |
output = self.activation(output) | |
output = self.last_dropout(output) | |
return output | |
def unwrap_model(model: nn.Module) -> nn.Module: | |
""" | |
Recursively unwraps a model from potential containers (as used in distributed training). | |
Args: | |
model (:obj:`torch.nn.Module`): The model to unwrap. | |
""" | |
# since there could be multiple levels of wrapping, unwrap recursively | |
if hasattr(model, "module"): | |
return unwrap_model(model.module) | |
else: | |
return model | |
def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear: | |
""" | |
Prune a linear layer to keep only entries in index. | |
Used to remove heads. | |
Args: | |
layer (:obj:`torch.nn.Linear`): The layer to prune. | |
index (:obj:`torch.LongTensor`): The indices to keep in the layer. | |
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices. | |
Returns: | |
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`. | |
""" | |
index = index.to(layer.weight.device) | |
W = layer.weight.index_select(dim, index).clone().detach() | |
if layer.bias is not None: | |
if dim == 1: | |
b = layer.bias.clone().detach() | |
else: | |
b = layer.bias[index].clone().detach() | |
new_size = list(layer.weight.size()) | |
new_size[dim] = len(index) | |
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) | |
new_layer.weight.requires_grad = False | |
new_layer.weight.copy_(W.contiguous()) | |
new_layer.weight.requires_grad = True | |
if layer.bias is not None: | |
new_layer.bias.requires_grad = False | |
new_layer.bias.copy_(b.contiguous()) | |
new_layer.bias.requires_grad = True | |
return new_layer | |
def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D: | |
""" | |
Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights | |
are transposed. | |
Used to remove heads. | |
Args: | |
layer (:class:`~transformers.modeling_utils.Conv1D`): The layer to prune. | |
index (:obj:`torch.LongTensor`): The indices to keep in the layer. | |
dim (:obj:`int`, `optional`, defaults to 1): The dimension on which to keep the indices. | |
Returns: | |
:class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`. | |
""" | |
index = index.to(layer.weight.device) | |
W = layer.weight.index_select(dim, index).clone().detach() | |
if dim == 0: | |
b = layer.bias.clone().detach() | |
else: | |
b = layer.bias[index].clone().detach() | |
new_size = list(layer.weight.size()) | |
new_size[dim] = len(index) | |
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) | |
new_layer.weight.requires_grad = False | |
new_layer.weight.copy_(W.contiguous()) | |
new_layer.weight.requires_grad = True | |
new_layer.bias.requires_grad = False | |
new_layer.bias.copy_(b.contiguous()) | |
new_layer.bias.requires_grad = True | |
return new_layer | |
def prune_layer( | |
layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None | |
) -> Union[nn.Linear, Conv1D]: | |
""" | |
Prune a Conv1D or linear layer to keep only entries in index. | |
Used to remove heads. | |
Args: | |
layer (:obj:`Union[torch.nn.Linear, Conv1D]`): The layer to prune. | |
index (:obj:`torch.LongTensor`): The indices to keep in the layer. | |
dim (:obj:`int`, `optional`): The dimension on which to keep the indices. | |
Returns: | |
:obj:`torch.nn.Linear` or :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with | |
:obj:`requires_grad=True`. | |
""" | |
if isinstance(layer, nn.Linear): | |
return prune_linear_layer(layer, index, dim=0 if dim is None else dim) | |
elif isinstance(layer, Conv1D): | |
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim) | |
else: | |
raise ValueError(f"Can't prune layer of class {layer.__class__}") | |
def apply_chunking_to_forward( | |
forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors | |
) -> torch.Tensor: | |
""" | |
This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the | |
dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory. | |
If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as | |
directly applying :obj:`forward_fn` to :obj:`input_tensors`. | |
Args: | |
forward_fn (:obj:`Callable[..., torch.Tensor]`): | |
The forward function of the model. | |
chunk_size (:obj:`int`): | |
The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`. | |
chunk_dim (:obj:`int`): | |
The dimension over which the :obj:`input_tensors` should be chunked. | |
input_tensors (:obj:`Tuple[torch.Tensor]`): | |
The input tensors of ``forward_fn`` which will be chunked | |
Returns: | |
:obj:`torch.Tensor`: A tensor with the same shape as the :obj:`forward_fn` would have given if applied`. | |
Examples:: | |
# rename the usual forward() fn to forward_chunk() | |
def forward_chunk(self, hidden_states): | |
hidden_states = self.decoder(hidden_states) | |
return hidden_states | |
# implement a chunked forward function | |
def forward(self, hidden_states): | |
return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) | |
""" | |
assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors" | |
tensor_shape = input_tensors[0].shape[chunk_dim] | |
assert all( | |
input_tensor.shape[chunk_dim] == tensor_shape for input_tensor in input_tensors | |
), "All input tenors have to be of the same shape" | |
# inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility | |
num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters) | |
if num_args_in_forward_chunk_fn != len(input_tensors): | |
raise ValueError( | |
f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input " | |
"tensors are given" | |
) | |
if chunk_size > 0: | |
if input_tensors[0].shape[chunk_dim] % chunk_size != 0: | |
raise ValueError( | |
f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk " | |
f"size {chunk_size}" | |
) | |
num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size | |
# chunk input tensor into tuples | |
input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors) | |
# apply forward fn to every tuple | |
output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks)) | |
# concatenate output at same dimension | |
return torch.cat(output_chunks, dim=chunk_dim) | |
return forward_fn(*input_tensors) | |