repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
SLT-FAI
SLT-FAI-main/transformers/convert_bert_original_tf2_checkpoint_to_pytorch.py
""" This script can be used to convert a head-less TF2.x Bert model to PyTorch, as published on the official GitHub: https://github.com/tensorflow/models/tree/master/official/nlp/bert TF2.x uses different variable names from the original BERT (TF 1.4) implementation. The script re-maps the TF2.x Bert weight names to the original names, so the model can be imported with Huggingface/transformer. You may adapt this script to include classification/MLM/NSP/etc. heads. """ import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def load_tf2_weights_in_bert(model, tf_checkpoint_path, config): tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] layer_depth = [] for full_name, shape in init_vars: # logger.info("Loading TF weight {} with shape {}".format(name, shape)) name = full_name.split("/") if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f"Skipping non-model layer {full_name}") continue if "optimizer" in full_name: logger.info(f"Skipping optimization layer {full_name}") continue if name[0] == "model": # ignore initial 'model' name = name[1:] # figure out how many levels deep the name is depth = 0 for _name in name: if _name.startswith("layer_with_weights"): depth += 1 else: break layer_depth.append(depth) # read data array = tf.train.load_variable(tf_path, full_name) names.append("/".join(name)) arrays.append(array) logger.info(f"Read a total of {len(arrays):,} layers") # Sanity check if len(set(layer_depth)) != 1: raise ValueError(f"Found layer names with different depths (layer depth {list(set(layer_depth))})") layer_depth = list(set(layer_depth))[0] if layer_depth != 1: raise ValueError( "The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP heads." ) # convert layers logger.info("Converting weights...") for full_name, array in zip(names, arrays): name = full_name.split("/") pointer = model trace = [] for i, m_name in enumerate(name): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("layer_with_weights"): layer_num = int(m_name.split("-")[-1]) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["embeddings", "LayerNorm"]) pointer = getattr(pointer, "embeddings") pointer = getattr(pointer, "LayerNorm") elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["encoder", "layer", str(layer_num - 4)]) pointer = getattr(pointer, "encoder") pointer = getattr(pointer, "layer") pointer = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["pooler", "dense"]) pointer = getattr(pointer, "pooler") pointer = getattr(pointer, "dense") elif m_name == "embeddings": trace.append("embeddings") pointer = getattr(pointer, "embeddings") if layer_num == 0: trace.append("word_embeddings") pointer = getattr(pointer, "word_embeddings") elif layer_num == 1: trace.append("position_embeddings") pointer = getattr(pointer, "position_embeddings") elif layer_num == 2: trace.append("token_type_embeddings") pointer = getattr(pointer, "token_type_embeddings") else: raise ValueError("Unknown embedding layer with name {full_name}") trace.append("weight") pointer = getattr(pointer, "weight") elif m_name == "_attention_layer": # self-attention layer trace.extend(["attention", "self"]) pointer = getattr(pointer, "attention") pointer = getattr(pointer, "self") elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["attention", "output", "LayerNorm"]) pointer = getattr(pointer, "attention") pointer = getattr(pointer, "output") pointer = getattr(pointer, "LayerNorm") elif m_name == "_attention_output_dense": # output attention dense trace.extend(["attention", "output", "dense"]) pointer = getattr(pointer, "attention") pointer = getattr(pointer, "output") pointer = getattr(pointer, "dense") elif m_name == "_output_dense": # output dense trace.extend(["output", "dense"]) pointer = getattr(pointer, "output") pointer = getattr(pointer, "dense") elif m_name == "_output_layer_norm": # output dense trace.extend(["output", "LayerNorm"]) pointer = getattr(pointer, "output") pointer = getattr(pointer, "LayerNorm") elif m_name == "_key_dense": # attention key trace.append("key") pointer = getattr(pointer, "key") elif m_name == "_query_dense": # attention query trace.append("query") pointer = getattr(pointer, "query") elif m_name == "_value_dense": # attention value trace.append("value") pointer = getattr(pointer, "value") elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["intermediate", "dense"]) pointer = getattr(pointer, "intermediate") pointer = getattr(pointer, "dense") elif m_name == "_output_layer_norm": # output layer norm trace.append("output") pointer = getattr(pointer, "output") # weights & biases elif m_name in ["bias", "beta"]: trace.append("bias") pointer = getattr(pointer, "bias") elif m_name in ["kernel", "gamma"]: trace.append("weight") pointer = getattr(pointer, "weight") else: logger.warning(f"Ignored {m_name}") # for certain layers reshape is necessary trace = ".".join(trace) if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)", trace) or re.match( r"(\S+)\.attention\.output\.dense\.weight", trace ): array = array.reshape(pointer.data.shape) if "kernel" in full_name: array = array.transpose() if pointer.shape == array.shape: pointer.data = torch.from_numpy(array) else: raise ValueError( f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape: {array.shape}" ) logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}") return model def convert_tf2_checkpoint_to_pytorch(tf_checkpoint_path, config_path, pytorch_dump_path): # Instantiate model logger.info(f"Loading model based on config from {config_path}...") config = BertConfig.from_json_file(config_path) model = BertModel(config) # Load weights from checkpoint logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}...") load_tf2_weights_in_bert(model, tf_checkpoint_path, config) # Save pytorch-model logger.info(f"Saving PyTorch model to {pytorch_dump_path}...") torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model (must include filename).", ) args = parser.parse_args() convert_tf2_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
9,664
41.577093
131
py
SLT-FAI
SLT-FAI-main/transformers/hf_api.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import os from os.path import expanduser from typing import Dict, List, Optional, Tuple from tqdm import tqdm import requests ENDPOINT = "https://huggingface.co" class S3Obj: """ Data structure that represents a file belonging to the current user. """ def __init__(self, filename: str, LastModified: str, ETag: str, Size: int, **kwargs): self.filename = filename self.LastModified = LastModified self.ETag = ETag self.Size = Size class PresignedUrl: def __init__(self, write: str, access: str, type: str, **kwargs): self.write = write self.access = access self.type = type # mime-type to send to S3. class S3Object: """ Data structure that represents a public file accessible on our S3. """ def __init__( self, key: str, # S3 object key etag: str, lastModified: str, size: int, rfilename: str, # filename relative to config.json **kwargs ): self.key = key self.etag = etag self.lastModified = lastModified self.size = size self.rfilename = rfilename for k, v in kwargs.items(): setattr(self, k, v) class ModelInfo: """ Info about a public model accessible from our S3. """ def __init__( self, modelId: str, # id of model key: str, # S3 object key of config.json author: Optional[str] = None, downloads: Optional[int] = None, tags: List[str] = [], pipeline_tag: Optional[str] = None, siblings: Optional[List[Dict]] = None, # list of files that constitute the model **kwargs ): self.modelId = modelId self.key = key self.author = author self.downloads = downloads self.tags = tags self.pipeline_tag = pipeline_tag self.siblings = [S3Object(**x) for x in siblings] if siblings is not None else None for k, v in kwargs.items(): setattr(self, k, v) class HfApi: def __init__(self, endpoint=None): self.endpoint = endpoint if endpoint is not None else ENDPOINT def login(self, username: str, password: str) -> str: """ Call HF API to sign in a user and get a token if credentials are valid. Outputs: token if credentials are valid Throws: requests.exceptions.HTTPError if credentials are invalid """ path = "{}/api/login".format(self.endpoint) r = requests.post(path, json={"username": username, "password": password}) r.raise_for_status() d = r.json() return d["token"] def whoami(self, token: str) -> Tuple[str, List[str]]: """ Call HF API to know "whoami" """ path = "{}/api/whoami".format(self.endpoint) r = requests.get(path, headers={"authorization": "Bearer {}".format(token)}) r.raise_for_status() d = r.json() return d["user"], d["orgs"] def logout(self, token: str) -> None: """ Call HF API to log out. """ path = "{}/api/logout".format(self.endpoint) r = requests.post(path, headers={"authorization": "Bearer {}".format(token)}) r.raise_for_status() def presign(self, token: str, filename: str, organization: Optional[str] = None) -> PresignedUrl: """ Call HF API to get a presigned url to upload `filename` to S3. """ path = "{}/api/presign".format(self.endpoint) r = requests.post( path, headers={"authorization": "Bearer {}".format(token)}, json={"filename": filename, "organization": organization}, ) r.raise_for_status() d = r.json() return PresignedUrl(**d) def presign_and_upload(self, token: str, filename: str, filepath: str, organization: Optional[str] = None) -> str: """ Get a presigned url, then upload file to S3. Outputs: url: Read-only url for the stored file on S3. """ urls = self.presign(token, filename=filename, organization=organization) # streaming upload: # https://2.python-requests.org/en/master/user/advanced/#streaming-uploads # # Even though we presign with the correct content-type, # the client still has to specify it when uploading the file. with open(filepath, "rb") as f: pf = TqdmProgressFileReader(f) data = f if pf.total_size > 0 else "" r = requests.put(urls.write, data=data, headers={"content-type": urls.type}) r.raise_for_status() pf.close() return urls.access def list_objs(self, token: str, organization: Optional[str] = None) -> List[S3Obj]: """ Call HF API to list all stored files for user (or one of their organizations). """ path = "{}/api/listObjs".format(self.endpoint) params = {"organization": organization} if organization is not None else None r = requests.get(path, params=params, headers={"authorization": "Bearer {}".format(token)}) r.raise_for_status() d = r.json() return [S3Obj(**x) for x in d] def delete_obj(self, token: str, filename: str, organization: Optional[str] = None): """ Call HF API to delete a file stored by user """ path = "{}/api/deleteObj".format(self.endpoint) r = requests.delete( path, headers={"authorization": "Bearer {}".format(token)}, json={"filename": filename, "organization": organization}, ) r.raise_for_status() def model_list(self) -> List[ModelInfo]: """ Get the public list of all the models on huggingface, including the community models """ path = "{}/api/models".format(self.endpoint) r = requests.get(path) r.raise_for_status() d = r.json() return [ModelInfo(**x) for x in d] class TqdmProgressFileReader: """ Wrap an io.BufferedReader `f` (such as the output of `open(…, "rb")`) and override `f.read()` so as to display a tqdm progress bar. see github.com/huggingface/transformers/pull/2078#discussion_r354739608 for implementation details. """ def __init__(self, f: io.BufferedReader): self.f = f self.total_size = os.fstat(f.fileno()).st_size self.pbar = tqdm(total=self.total_size, leave=False) self.read = f.read f.read = self._read def _read(self, n=-1): self.pbar.update(n) return self.read(n) def close(self): self.pbar.close() class HfFolder: path_token = expanduser("~/.huggingface/token") @classmethod def save_token(cls, token): """ Save token, creating folder as needed. """ os.makedirs(os.path.dirname(cls.path_token), exist_ok=True) with open(cls.path_token, "w+") as f: f.write(token) @classmethod def get_token(cls): """ Get token or None if not existent. """ try: with open(cls.path_token, "r") as f: return f.read() except FileNotFoundError: pass @classmethod def delete_token(cls): """ Delete token. Do not fail if token does not exist. """ try: os.remove(cls.path_token) except FileNotFoundError: pass
8,176
29.973485
118
py
SLT-FAI
SLT-FAI-main/transformers/configuration_albert.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ALBERT model configuration """ from .configuration_utils import PretrainedConfig ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-config.json", "albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v1-config.json", "albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v1-config.json", "albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v1-config.json", "albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-config.json", "albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-config.json", "albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-config.json", "albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-config.json", } class AlbertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.AlbertModel` or a :class:`~transformers.TFAlbertModel`. It is used to instantiate an ALBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ALBERT `xxlarge <https://huggingface.co/albert-xxlarge-v2>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 30000): Vocabulary size of the ALBERT model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.AlbertModel` or :class:`~transformers.TFAlbertModel`. embedding_size (:obj:`int`, `optional`, defaults to 128): Dimensionality of vocabulary embeddings. hidden_size (:obj:`int`, `optional`, defaults to 4096): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. num_hidden_groups (:obj:`int`, `optional`, defaults to 1): Number of groups for the hidden layers, parameters in the same group are shared. num_attention_heads (:obj:`int`, `optional`, defaults to 64): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (:obj:`int`, `optional`, defaults to 16384): The dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. inner_group_num (:obj:`int`, `optional`, defaults to 1): The number of inner repetition of attention and ffn. hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"` and :obj:`"gelu_new"` are supported. hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0): The dropout ratio for the attention probabilities. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, `optional`, defaults to 2): The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.AlbertModel` or :class:`~transformers.TFAlbertModel`. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. classifier_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for attached classifiers. Examples:: >>> from transformers import AlbertConfig, AlbertModel >>> # Initializing an ALBERT-xxlarge style configuration >>> albert_xxlarge_configuration = AlbertConfig() >>> # Initializing an ALBERT-base style configuration >>> albert_base_configuration = AlbertConfig( ... hidden_size=768, ... num_attention_heads=12, ... intermediate_size=3072, ... ) >>> # Initializing a model from the ALBERT-base style configuration >>> model = AlbertModel(albert_xxlarge_configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "albert" def __init__( self, vocab_size=30000, embedding_size=128, hidden_size=4096, num_hidden_layers=12, num_hidden_groups=1, num_attention_heads=64, intermediate_size=16384, inner_group_num=1, hidden_act="gelu_new", hidden_dropout_prob=0, attention_probs_dropout_prob=0, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, classifier_dropout_prob=0.1, pad_token_id=0, bos_token_id=2, eos_token_id=3, **kwargs ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_hidden_groups = num_hidden_groups self.num_attention_heads = num_attention_heads self.inner_group_num = inner_group_num self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.classifier_dropout_prob = classifier_dropout_prob
7,570
50.856164
118
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_albert_fast.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for ALBERT model.""" import os from shutil import copyfile from typing import List, Optional, Tuple from .file_utils import is_sentencepiece_available from .tokenization_utils_fast import PreTrainedTokenizerFast from .utils import logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: AlbertTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-spiece.model", "albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v1-spiece.model", "albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v1-spiece.model", "albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v1-spiece.model", "albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-spiece.model", "albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-spiece.model", "albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-spiece.model", "albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-spiece.model", }, "tokenizer_file": { "albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-tokenizer.json", "albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v1-tokenizer.json", "albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v1-tokenizer.json", "albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v1-tokenizer.json", "albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-tokenizer.json", "albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-tokenizer.json", "albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-tokenizer.json", "albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "albert-base-v1": 512, "albert-large-v1": 512, "albert-xlarge-v1": 512, "albert-xxlarge-v1": 512, "albert-base-v2": 512, "albert-large-v2": 512, "albert-xlarge-v2": 512, "albert-xxlarge-v2": 512, } SPIECE_UNDERLINE = "▁" class AlbertTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" ALBERT tokenizer (backed by HuggingFace's `tokenizers` library). Based on `SentencePiece <https://github.com/google/sentencepiece>`__. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to lowercase the input when tokenizing. remove_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to strip the text when tokenizing (removing excess spaces before and after the string). keep_accents (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to keep accents when tokenizing. bos_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. .. note:: When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the :obj:`cls_token`. eos_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the :obj:`sep_token`. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. Attributes: sp_model (:obj:`SentencePieceProcessor`): The `SentencePiece` processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = AlbertTokenizer def __init__( self, vocab_file, tokenizer_file=None, do_lower_case=True, remove_space=True, keep_accents=False, bos_token="[CLS]", eos_token="[SEP]", unk_token="<unk>", sep_token="[SEP]", pad_token="<pad>", cls_token="[CLS]", mask_token="[MASK]", **kwargs ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs, ) self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ALBERT sequence has the following format: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` method. Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True if the token list is already formatted with special tokens for the model Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
12,321
46.210728
119
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_bert.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Bert.""" import collections import os import unicodedata from typing import List, Optional, Tuple from .tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", "bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", "bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", "bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", "bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", "bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", "bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", "bert-base-german-cased": "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt", "bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt", "bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt", "bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt", "bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt", "bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt", "bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-vocab.txt", "bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-vocab.txt", "TurkuNLP/bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/vocab.txt", "TurkuNLP/bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/vocab.txt", "wietsedv/bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "bert-base-uncased": 512, "bert-large-uncased": 512, "bert-base-cased": 512, "bert-large-cased": 512, "bert-base-multilingual-uncased": 512, "bert-base-multilingual-cased": 512, "bert-base-chinese": 512, "bert-base-german-cased": 512, "bert-large-uncased-whole-word-masking": 512, "bert-large-cased-whole-word-masking": 512, "bert-large-uncased-whole-word-masking-finetuned-squad": 512, "bert-large-cased-whole-word-masking-finetuned-squad": 512, "bert-base-cased-finetuned-mrpc": 512, "bert-base-german-dbmdz-cased": 512, "bert-base-german-dbmdz-uncased": 512, "TurkuNLP/bert-base-finnish-cased-v1": 512, "TurkuNLP/bert-base-finnish-uncased-v1": 512, "wietsedv/bert-base-dutch-cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "bert-base-uncased": {"do_lower_case": True}, "bert-large-uncased": {"do_lower_case": True}, "bert-base-cased": {"do_lower_case": False}, "bert-large-cased": {"do_lower_case": False}, "bert-base-multilingual-uncased": {"do_lower_case": True}, "bert-base-multilingual-cased": {"do_lower_case": False}, "bert-base-chinese": {"do_lower_case": False}, "bert-base-german-cased": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking": {"do_lower_case": True}, "bert-large-cased-whole-word-masking": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True}, "bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False}, "bert-base-cased-finetuned-mrpc": {"do_lower_case": False}, "bert-base-german-dbmdz-cased": {"do_lower_case": False}, "bert-base-german-dbmdz-uncased": {"do_lower_case": True}, "TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False}, "TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True}, "wietsedv/bert-base-dutch-cased": {"do_lower_case": False}, } def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizer(PreTrainedTokenizer): r""" Construct a BERT tokenizer. Based on WordPiece. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): File containing the vocabulary. do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to do basic tokenization before WordPiece. never_split (:obj:`Iterable`, `optional`): Collection of tokens which will never be split during tokenization. Only has an effect when :obj:`do_basic_tokenize=True` unk_token (:obj:`str`, `optional`, defaults to :obj:`"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (:obj:`str`, `optional`, defaults to :obj:`"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this `issue <https://github.com/huggingface/transformers/issues/328>`__). strip_accents: (:obj:`bool`, `optional`): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for :obj:`lowercase` (as in the original BERT). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs ): super().__init__( unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs, ) if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file) ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): # If the token is part of the never_split set if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` method. Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the token list is already formatted with special tokens for the model. Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( "Saving vocabulary to {}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!".format(vocab_file) ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) class BasicTokenizer(object): """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to lowercase the input when tokenizing. never_split (:obj:`Iterable`, `optional`): Collection of tokens which will never be split during tokenization. Only has an effect when :obj:`do_basic_tokenize=True` tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this `issue <https://github.com/huggingface/transformers/issues/328>`__). strip_accents: (:obj:`bool`, `optional`): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for :obj:`lowercase` (as in the original BERT). """ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): """Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: **never_split**: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
24,880
43.509839
183
py
SLT-FAI
SLT-FAI-main/transformers/activations_tf.py
import math import tensorflow as tf def gelu(x): """Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ x = tf.convert_to_tensor(x) cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0))) return x * cdf def gelu_new(x): """Gaussian Error Linear Unit. This is a smoother version of the GELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ x = tf.convert_to_tensor(x) pi = tf.cast(math.pi, x.dtype) coeff = tf.cast(0.044715, x.dtype) cdf = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3)))) return x * cdf def mish(x): x = tf.convert_to_tensor(x) return x * tf.tanh(tf.math.softplus(x)) def gelu_fast(x): x = tf.convert_to_tensor(x) coeff1 = tf.cast(7978845608, x.dtype) coeff2 = tf.cast(0.044715, x.dtype) return 0.5 * x * (1.0 + tf.tanh(x * coeff2 * (1.0 + coeff1 * x * x))) ACT2FN = { "gelu": tf.keras.layers.Activation(gelu), "relu": tf.keras.activations.relu, "swish": tf.keras.activations.swish, "gelu_new": tf.keras.layers.Activation(gelu_new), "mish": tf.keras.layers.Activation(mish), "tanh": tf.keras.activations.tanh, "gelu_fast": tf.keras.layers.Activation(gelu_fast), } def get_tf_activation(activation_string): if activation_string in ACT2FN: return ACT2FN[activation_string] else: raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys())))
1,924
28.166667
115
py
SLT-FAI
SLT-FAI-main/transformers/modeling_fsmt.py
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Original implementation: https://github.com/pytorch/fairseq/tree/master/examples/wmt19 # Authors: # - @alexeib Alexei Baevski # - @edunov Sergey Edunov # - @michaelauli Michael Auli # - @myleott Myle Ott # - @nng555 Nathan Ng # - David Grangier # - Kyra Yee # # Paper: Facebook FAIR's WMT19 News Translation Task Submission https://arxiv.org/abs/1907.06616 # """PyTorch Fairseq model, ported from https://github.com/pytorch/fairseq/tree/master/examples/wmt19""" import math import random import warnings from typing import Any, Dict, List, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor, nn from torch.nn import CrossEntropyLoss from .activations import ACT2FN from .configuration_fsmt import FSMTConfig from .file_utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings, ) from .modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput from .modeling_utils import PreTrainedModel from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "FSMTConfig" _TOKENIZER_FOR_DOC = "FSMTTokenizer" # See all FSMT models at https://huggingface.co/models?filter=fsmt # Porting notes: # this one is modeled after BartModel* # # Currently only translation (fairseq also has weights for LM) # # fairseq provides weights for ru-en, en-ru and de-en, en-de pairs. All have been ported. # - ru-en, en-ru use asymmetric vocab # - de-en, en-de use a merged single vocab (but the code works as if they are separate) # # Differences with Bart: # - not using bos token # - 2 separate vocabs (src and target) # - embed weights aren't tied # - uses a model Ensemble (but that part isn't ported/implemented yet) - so we # aren't getting as good of a BLEU score # - uses a projection layer at the end of the decoder # - doesn't use final_logits_bias # - beam search: stops as soon as num_beams == len(hypos) (whereas transformers # is not satisfied there and will continue searching until the next cycles # aren't promising something better), comparing BLEU scores - the transformers # algorithm is slightly superior, therefore using the latter. But if you want # to match fairseq outputs, you need to pass ``early_stopping=True`` to ``generate()``. # # SinusoidalPositionalEmbedding is slightly different from Bart's - generates # different embeddings. This implementation is copied verbatim from fairseq with # some small changes to make it work here. # # Other changes: # - doesn't support use_cache as Bart's version does # # # FSMTConfig changes with BartConfig # # Differences with BART: # - src/tgt vocabs aren't shared # - token embeddings aren't shared # - needs a language pair # - scale_embedding are True # # some unused args were removed too # # # TODO: # - port model ensemble (fs uses 4 model checkpoints) # - solve beam search discrepancies """ Here is how to compare BLEU scores against fairseq implementation: # en-ru export PAIR=en-ru export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=50 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS # (fairseq BLEU: 36.4 http://matrix.statmt.org/matrix/output/1914?score_id=37605) # ru-en export PAIR=ru-en export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=50 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS # (fairseq BLEU: 41.3 http://matrix.statmt.org/matrix/output/1907?run_id=6937) # de-en export PAIR=de-en export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=50 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS # (fairseq BLEU: 42.3 http://matrix.statmt.org/matrix/output/1902?run_id=6750) # en-de export PAIR=en-de export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS # (fairseq BLEU: 43.1 http://matrix.statmt.org/matrix/output/1909?run_id=6862) """ FSMT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.FSMTConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ FSMT_GENERATION_EXAMPLE = r""" Translation example:: from transformers import FSMTTokenizer, FSMTForConditionalGeneration mname = "facebook/wmt19-ru-en" model = FSMTForConditionalGeneration.from_pretrained(mname) tokenizer = FSMTTokenizer.from_pretrained(mname) src_text = "Машинное обучение - это здорово, не так ли?" input_ids = tokenizer.encode(src_text, return_tensors='pt') outputs = model.generate(input_ids, num_beams=5, num_return_sequences=3) for i, output in enumerate(outputs): decoded = tokenizer.decode(output, skip_special_tokens=True) print(f"{i}: {decoded}) # 1: Machine learning is great, isn't it? ... """ FSMT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. IIndices can be obtained using :class:`~transformers.FSTMTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Provide for translation and summarization training. By default, the model will create this tensor by shifting the input_ids right, following the paper. decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`): Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read :func:`modeling_fstm._prepare_fstm_decoder_inputs` and modify. See diagram 1 in the paper for more info on the default strategy encoder_outputs (:obj:`Tuple(torch.FloatTensor)`, `optional`): Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`: :obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (:obj:`Tuple(torch.FloatTensor)` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ have_fused_layer_norm = False if torch.cuda.is_available(): try: from apex.normalization import FusedLayerNorm have_fused_layer_norm = True except ImportError: pass LayerNorm = FusedLayerNorm if have_fused_layer_norm else torch.nn.LayerNorm def invert_mask(attention_mask): """Turns 1->0, 0->1, False->True, True-> False""" assert attention_mask.dim() == 2 return attention_mask.eq(0) def _prepare_fsmt_decoder_inputs( config, input_ids, decoder_input_ids=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32 ): """Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if none are provided. This mimics the default behavior in fairseq. To override it pass in masks. Note: this is not called during generation """ pad_token_id = config.pad_token_id if decoder_input_ids is None: decoder_input_ids = shift_tokens_right(input_ids, pad_token_id) bsz, tgt_len = decoder_input_ids.size() if decoder_padding_mask is None: decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id) else: decoder_padding_mask = invert_mask(decoder_padding_mask) causal_mask = torch.triu(fill_with_neg_inf(torch.zeros(tgt_len, tgt_len)), 1).to( dtype=causal_mask_dtype, device=decoder_input_ids.device ) return decoder_input_ids, decoder_padding_mask, causal_mask class PretrainedFSMTModel(PreTrainedModel): config_class = FSMTConfig base_model_prefix = "model" def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, SinusoidalPositionalEmbedding): pass elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs def _make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer # Helper Functions, mostly for making masks def _check_shapes(shape_1, shape2): if shape_1 != shape2: raise AssertionError("shape mismatch: {} != {}".format(shape_1, shape2)) def shift_tokens_right(input_ids, pad_token_id): """Shift input ids one token to the right, and wrap the last non pad token (usually <eos>).""" prev_output_tokens = input_ids.clone() index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1) prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze() prev_output_tokens[:, 1:] = input_ids[:, :-1] return prev_output_tokens def make_padding_mask(input_ids, padding_idx=1): """True for pad tokens""" padding_mask = input_ids.eq(padding_idx) if not padding_mask.any(): padding_mask = None return padding_mask # Helper Modules class EncoderLayer(nn.Module): def __init__(self, config: FSMTConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def forward(self, x, encoder_padding_mask, output_attentions=False): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. for t_tgt, t_src is excluded (or masked out), =0 means it is included in attention Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ residual = x x, attn_weights = self.self_attn( query=x, key=x, key_padding_mask=encoder_padding_mask, output_attentions=output_attentions ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.final_layer_norm(x) return x, attn_weights class FSMTEncoder(nn.Module): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a :class:`EncoderLayer`. Args: config: FSMTConfig """ def __init__(self, config: FSMTConfig, embed_tokens): super().__init__() self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.padding_idx = embed_tokens.padding_idx self.embed_tokens = embed_tokens embed_dim = embed_tokens.embedding_dim self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_positions = SinusoidalPositionalEmbedding( config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx ) self.layers = nn.ModuleList( [EncoderLayer(config) for _ in range(config.encoder_layers)] ) # type: List[EncoderLayer] def forward( self, input_ids, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=False ): """ Args: input_ids (LongTensor): tokens in the source language of shape `(batch, src_len)` attention_mask (torch.LongTensor): indicating which indices are padding tokens. Returns: BaseModelOutput or Tuple comprised of: - **x** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_states** (tuple(torch.FloatTensor)): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *output_hidden_states:* is True. - **all_attentions** (tuple(torch.FloatTensor)): Attention weights for each layer. During training might not be of length n_layers because of layer dropout. """ # check attention mask and invert if attention_mask is not None: attention_mask = invert_mask(attention_mask) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_ids) x = inputs_embeds + embed_pos x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) encoder_states = [] if output_hidden_states else None all_attentions = () if output_attentions else None for encoder_layer in self.layers: if output_hidden_states: encoder_states.append(x) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): # skip the layer attn = None else: x, attn = encoder_layer(x, attention_mask, output_attentions=output_attentions) if output_attentions: all_attentions = all_attentions + (attn,) if output_hidden_states: encoder_states.append(x) # T x B x C -> B x T x C encoder_states = tuple(hidden_state.transpose(0, 1) for hidden_state in encoder_states) # T x B x C -> B x T x C x = x.transpose(0, 1) if not return_dict: return tuple(v for v in [x, encoder_states, all_attentions] if v is not None) return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions) class DecoderLayer(nn.Module): def __init__(self, config: FSMTConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Attention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.encoder_attn = Attention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, encoder_decoder_attention=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def forward( self, x, encoder_hidden_states, encoder_attn_mask=None, layer_state=None, causal_mask=None, decoder_padding_mask=None, output_attentions=False, ): residual = x if layer_state is None: layer_state = {} # Self Attention x, self_attn_weights = self.self_attn( query=x, key=x, layer_state=layer_state, # adds keys to layer state key_padding_mask=decoder_padding_mask, attn_mask=causal_mask, output_attentions=output_attentions, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.self_attn_layer_norm(x) # Cross attention residual = x assert self.encoder_attn.cache_key != self.self_attn.cache_key x, _ = self.encoder_attn( query=x, key=encoder_hidden_states, key_padding_mask=encoder_attn_mask, layer_state=layer_state, # mutates layer state ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.encoder_attn_layer_norm(x) # Fully Connected residual = x x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.final_layer_norm(x) return ( x, self_attn_weights, layer_state, ) # just self_attn weights for now, following t5, layer_state = cache for decoding class FSMTDecoder(nn.Module): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`DecoderLayer`. Args: config: FSMTConfig embed_tokens (torch.nn.Embedding): output embedding """ def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding): super().__init__() self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = embed_tokens.padding_idx self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = embed_tokens embed_dim = embed_tokens.embedding_dim self.embed_positions = SinusoidalPositionalEmbedding( config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx ) self.layers = nn.ModuleList( [DecoderLayer(config) for _ in range(config.decoder_layers)] ) # type: List[DecoderLayer] self.output_projection = nn.Linear( self.embed_tokens.weight.shape[1], self.embed_tokens.weight.shape[0], bias=False, ) self.output_projection.weight = self.embed_tokens.weight def forward( self, input_ids, encoder_hidden_states, encoder_padding_mask, decoder_padding_mask, decoder_causal_mask, past_key_values=None, use_cache=False, output_attentions=False, output_hidden_states=False, return_dict=False, **unused, ): """ Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: input_ids (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_hidden_states: output from the encoder, used for encoder-side attention encoder_padding_mask: for ignoring pad tokens past_key_values (dict or None): dictionary used for storing state during generation Returns: BaseModelOutputWithPast or tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - the cache - hidden states - attentions """ if "decoder_cached_states" in unused: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) past_key_values = unused.pop("decoder_cached_states") if "decoder_past_key_values" in unused: warnings.warn( "The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) past_key_values = unused.pop("decoder_past_key_values") # check attention mask and invert if encoder_padding_mask is not None: encoder_padding_mask = invert_mask(encoder_padding_mask) # embed positions positions = self.embed_positions(input_ids) # , use_cache=use_cache) if use_cache: input_ids = input_ids[:, -1:] positions = positions[:, -1:] # happens after we embed them # assert input_ids.ne(self.padding_idx).any() x = self.embed_tokens(input_ids) * self.embed_scale x += positions x = F.dropout(x, p=self.dropout, training=self.training) # Convert to FSMT output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim) x = x.transpose(0, 1) encoder_hidden_states = encoder_hidden_states.transpose(0, 1) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = [] for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (x,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue layer_state = past_key_values[idx] if past_key_values is not None else None x, layer_self_attn, layer_past = decoder_layer( x, encoder_hidden_states, encoder_attn_mask=encoder_padding_mask, decoder_padding_mask=decoder_padding_mask, layer_state=layer_state, causal_mask=decoder_causal_mask, output_attentions=output_attentions, ) if use_cache: next_decoder_cache.append(layer_past.copy()) if output_attentions: all_self_attns += (layer_self_attn,) # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim) if output_hidden_states: all_hidden_states = tuple(hidden_state.transpose(0, 1) for hidden_state in all_hidden_states) x = x.transpose(0, 1) encoder_hidden_states = encoder_hidden_states.transpose(0, 1) x = self.output_projection(x) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [x, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns ) def _reorder_buffer(attn_cache, new_order): for k, input_buffer_k in attn_cache.items(): if input_buffer_k is not None: attn_cache[k] = input_buffer_k.index_select(0, new_order) return attn_cache class Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim, num_heads, dropout=0.0, bias=True, encoder_decoder_attention=False, # otherwise self_attention ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.encoder_decoder_attention = encoder_decoder_attention self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self" def _shape(self, tensor, seq_len, bsz): return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) def forward( self, query, key: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, layer_state: Optional[Dict[str, Optional[Tensor]]] = None, attn_mask: Optional[Tensor] = None, output_attentions=False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time(SeqLen) x Batch x Channel""" static_kv: bool = self.encoder_decoder_attention tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] # get here for encoder decoder cause of static_kv if layer_state is not None: # reuse k,v and encoder_padding_mask saved_state = layer_state.get(self.cache_key, {}) if "prev_key" in saved_state and static_kv: # previous time steps are cached - no need to recompute key and value if they are static key = None else: saved_state = None layer_state = {} q = self.q_proj(query) * self.scaling if static_kv: if key is None: k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: k = self.k_proj(query) v = self.v_proj(query) q = self._shape(q, tgt_len, bsz) if k is not None: k = self._shape(k, -1, bsz) if v is not None: v = self._shape(v, -1, bsz) if saved_state is not None: k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz) # Update cache layer_state[self.cache_key] = { "prev_key": k.view(bsz, self.num_heads, -1, self.head_dim), "prev_value": v.view(bsz, self.num_heads, -1, self.head_dim), "prev_key_padding_mask": key_padding_mask if not static_kv else None, } assert k is not None src_len = k.size(1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len) if attn_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) # This is part of a workaround to get around fork/join parallelism not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None assert key_padding_mask is None or key_padding_mask.size()[:2] == ( bsz, src_len, ) if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2) attn_weights = attn_weights.masked_fill(reshaped, float("-inf")) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = F.softmax(attn_weights, dim=-1) attn_probs = F.dropout( attn_weights, p=self.dropout, training=self.training, ) assert v is not None attn_output = torch.bmm(attn_probs, v) assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = self.out_proj(attn_output) if output_attentions: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) else: attn_weights = None return attn_output, attn_weights def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz): # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) assert k is not None and v is not None prev_key_padding_mask: Optional[Tensor] = saved_state.get("prev_key_padding_mask", None) if prev_key_padding_mask is not None: if static_kv: new_key_padding_mask = prev_key_padding_mask else: new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1) else: new_key_padding_mask = key_padding_mask return k, v, new_key_padding_mask def fill_with_neg_inf(t): """FP16-compatible function that fills a input_ids with -inf.""" return t.float().fill_(float("-inf")).type_as(t) # Public API def _get_shape(t): return getattr(t, "shape", None) @add_start_docstrings( "The bare FSMT Model outputting raw hidden-states without any specific head on top.", FSMT_START_DOCSTRING, ) class FSMTModel(PretrainedFSMTModel): def __init__(self, config: FSMTConfig): super().__init__(config) padding_idx = config.pad_token_id encoder_embed_tokens = nn.Embedding(config.src_vocab_size, config.d_model, padding_idx) decoder_embed_tokens = nn.Embedding(config.tgt_vocab_size, config.d_model, padding_idx) self.encoder = FSMTEncoder(config, encoder_embed_tokens) self.decoder = FSMTDecoder(config, decoder_embed_tokens) self.init_weights() @add_start_docstrings_to_callable(FSMT_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="facebook/wmt19-ru-en", output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs: Optional[Tuple] = None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): if "decoder_past_key_values" in kwargs: warnings.warn( "The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) past_key_values = kwargs.pop("decoder_past_key_values") if decoder_input_ids is None: use_cache = False output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # make masks if user doesn't supply if not use_cache: decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_fsmt_decoder_inputs( self.config, input_ids, decoder_input_ids=decoder_input_ids, decoder_padding_mask=decoder_attention_mask, causal_mask_dtype=self.decoder.embed_tokens.weight.dtype, ) else: decoder_padding_mask, causal_mask = None, None assert decoder_input_ids is not None if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOuput when return_dict=False elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) decoder_outputs = self.decoder( decoder_input_ids, encoder_outputs[0], attention_mask, decoder_padding_mask, decoder_causal_mask=causal_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): return self.encoder.embed_tokens def set_input_embeddings(self, value): self.encoder.embed_tokens = value def get_output_embeddings(self): return self.decoder.embed_tokens def set_output_embeddings(self, value): self.decoder.embed_tokens = value @add_start_docstrings( "The FSMT Model with a language modeling head. Can be used for summarization.", FSMT_START_DOCSTRING ) class FSMTForConditionalGeneration(PretrainedFSMTModel): base_model_prefix = "model" authorized_missing_keys = [ "model.encoder.embed_positions.weight", "model.decoder.embed_positions.weight", ] keys_to_never_save = [ "model.encoder.embed_positions.weight", "model.decoder.embed_positions.weight", ] def __init__(self, config: FSMTConfig): super().__init__(config) base_model = FSMTModel(config) self.model = base_model def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens) self.model.encoder.embed_tokens = new_embeddings new_embeddings = super().resize_token_embeddings(new_num_tokens) self.model.decoder.embed_tokens = new_embeddings # XXX: this is not quite correct, as we have 2 different `new_embeddings`, and # only one return value is expected. Needs to be redesigned in the core to support dual dicts raise NotImplementedError("this method needs re-thinking for models with 2 separate dictionaries") return new_embeddings @add_start_docstrings_to_callable(FSMT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(FSMT_GENERATION_EXAMPLE) def forward( self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, past_key_values=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, **unused, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = outputs[0] masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # TODO(SS): do we need to ignore pad tokens in labels? masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.tgt_vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs ): return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def adjust_logits_during_generation(self, logits, cur_len, max_length): if cur_len == max_length - 1 and self.config.eos_token_id is not None: self._force_token_ids_generation(logits, self.config.eos_token_id) return logits def _force_token_ids_generation(self, scores, token_ids) -> None: """force one of token_ids to be generated by setting prob of all other tokens to 0""" if isinstance(token_ids, int): token_ids = [token_ids] all_but_token_ids_mask = torch.tensor( [x for x in range(self.config.tgt_vocab_size) if x not in token_ids], dtype=torch.long, device=next(self.parameters()).device, ) assert len(scores.shape) == 2, "scores should be of rank 2 with shape: [batch_size, vocab_size]" scores[:, all_but_token_ids_mask] = -float("inf") @staticmethod def _reorder_cache(past, beam_idx): reordered_past = [] for layer_past in past: # get the correct batch idx from decoder layer's batch dim for cross and self-attn layer_past_new = { attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items() } reordered_past.append(layer_past_new) return reordered_past def get_encoder(self): return self.model.encoder def get_output_embeddings(self): return self.model.decoder.embed_tokens class SinusoidalPositionalEmbedding(nn.Embedding): """ This module produces sinusoidal positional embeddings of any length. We don't want to save the weight of this embedding since it's not trained (deterministic) and it can be huge. Padding symbols are ignored. These embeddings get automatically extended in forward if more positions is needed. """ def __init__(self, num_positions, embedding_dim, padding_idx): self.make_weight(num_positions, embedding_dim, padding_idx) def make_weight(self, num_positions, embedding_dim, padding_idx): weight = self.get_embedding(num_positions, embedding_dim, padding_idx) if not hasattr(self, "weight"): # in ___init__ super().__init__(num_positions, embedding_dim, padding_idx, _weight=weight) else: # in forward weight = weight.to(self.weight.device) self.weight = nn.Parameter(weight) self.weight.detach_() self.weight.requires_grad = False @staticmethod def get_embedding(num_embeddings, embedding_dim, padding_idx): """Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb @staticmethod def make_positions(tensor, padding_idx: int): """Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. """ # The series of casts and type-conversions here are carefully # balanced to both work with ONNX export and XLA. In particular XLA # prefers ints, cumsum defaults to output longs, and ONNX doesn't know # how to handle the dtype kwarg in cumsum. mask = tensor.ne(padding_idx).int() return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx def forward( self, input, incremental_state: Optional[Any] = None, timestep: Optional[Tensor] = None, ): """Input is expected to be of size [bsz x seqlen].""" bsz, seq_len = input.shape[:2] max_pos = self.padding_idx + 1 + seq_len if max_pos > self.weight.size(0): # expand embeddings if needed self.make_weight(max_pos, self.embedding_dim, self.padding_idx) positions = self.make_positions(input, self.padding_idx) return super().forward(positions)
49,644
39.39463
270
py
SLT-FAI
SLT-FAI-main/transformers/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Transformer XL checkpoint and datasets.""" import argparse import os import pickle import sys import torch import transformers.tokenization_transfo_xl as data_utils from transformers import ( CONFIG_NAME, WEIGHTS_NAME, TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl, ) from transformers.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 data_utils.Vocab = data_utils.TransfoXLTokenizer data_utils.Corpus = data_utils.TransfoXLCorpus sys.modules["data_utils"] = data_utils sys.modules["vocabulary"] = data_utils def convert_transfo_xl_checkpoint_to_pytorch( tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file ): if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(transfo_xl_dataset_file, "rb") as fp: corpus = pickle.load(fp, encoding="latin1") # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print("Save vocabulary to {}".format(pytorch_vocab_dump_path)) corpus_vocab_dict = corpus.vocab.__dict__ torch.save(corpus_vocab_dict, pytorch_vocab_dump_path) corpus_dict_no_vocab = corpus.__dict__ corpus_dict_no_vocab.pop("vocab", None) pytorch_dataset_dump_path = pytorch_dump_folder_path + "/" + CORPUS_NAME print("Save dataset to {}".format(pytorch_dataset_dump_path)) torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model config_path = os.path.abspath(transfo_xl_config_file) tf_path = os.path.abspath(tf_checkpoint_path) print("Converting Transformer XL checkpoint from {} with config at {}".format(tf_path, config_path)) # Initialise PyTorch model if transfo_xl_config_file == "": config = TransfoXLConfig() else: config = TransfoXLConfig.from_json_file(transfo_xl_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = TransfoXLLMHeadModel(config) model = load_tf_weights_in_transfo_xl(model, config, tf_path) # Save pytorch-model pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME) pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME) print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path))) torch.save(model.state_dict(), pytorch_weights_dump_path) print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path))) with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help="An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture.", ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) args = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
4,926
38.103175
121
py
SLT-FAI
SLT-FAI-main/transformers/modeling_mbart.py
from .configuration_mbart import MBartConfig from .modeling_bart import BartForConditionalGeneration _CONFIG_FOR_DOC = "MBartConfig" _TOKENIZER_FOR_DOC = "MBartTokenizer" MBART_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/mbart-large-cc25", "facebook/mbart-large-en-ro", # See all multilingual BART models at https://huggingface.co/models?filter=mbart ] class MBartForConditionalGeneration(BartForConditionalGeneration): r""" This class overrides :class:`~transformers.BartForConditionalGeneration`. Please check the superclass for the appropriate documentation alongside usage examples. Examples:: >>> from transformers import MBartForConditionalGeneration, MBartTokenizer >>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro") >>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro") >>> article = "UN Chief Says There Is No Military Solution in Syria" >>> batch = tokenizer.prepare_seq2seq_batch(src_texts=[article]) >>> translated_tokens = model.generate(**batch) >>> translation = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0] >>> assert translation == "Şeful ONU declară că nu există o soluţie militară în Siria" """ model_type = "mbart" config_class = MBartConfig
1,358
41.46875
96
py
SLT-FAI
SLT-FAI-main/transformers/retrieval_rag.py
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RAG Retriever model implementation.""" import os import pickle import time from typing import Iterable, List, Optional, Tuple import numpy as np from .configuration_rag import RagConfig from .file_utils import ( cached_path, is_datasets_available, is_faiss_available, is_remote_url, requires_datasets, requires_faiss, ) from .tokenization_rag import RagTokenizer from .tokenization_utils_base import BatchEncoding from .utils import logging if is_datasets_available(): from datasets import Dataset, load_dataset, load_from_disk if is_faiss_available(): import faiss logger = logging.get_logger(__name__) LEGACY_INDEX_PATH = "https://storage.googleapis.com/huggingface-nlp/datasets/wiki_dpr/" class Index: """ A base class for the Indices encapsulated by the :class:`~transformers.RagRetriever`. """ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]: """ Returns a list of dictionaries, containing titles and text of the retrieved documents. Args: doc_ids (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs)`): A tensor of document indices. """ raise NotImplementedError def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: """ For each query in the batch, retrieves ``n_docs`` documents. Args: question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size): An array of query vectors. n_docs (:obj:`int`): The number of docs retrieved per query. Returns: :obj:`np.ndarray` of shape :obj:`(batch_size, n_docs)`: A tensor of indices of retrieved documents. :obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`: A tensor of vector representations of retrieved documents. """ raise NotImplementedError def is_initialized(self): """ Returns :obj:`True` if index is already initialized. """ raise NotImplementedError def init_index(self): """ A function responsible for loading the index into memory. Should be called only once per training run of a RAG model. E.g. if the model is trained on multiple GPUs in a distributed setup, only one of the workers will load the index. """ raise NotImplementedError class LegacyIndex(Index): """ An index which can be deserialized from the files built using https://github.com/facebookresearch/DPR. We use default faiss index parameters as specified in that repository. Args: vector_size (:obj:`int`): The dimension of indexed vectors. index_path (:obj:`str`): A path to a `directory` containing index files compatible with :class:`~transformers.retrieval_rag.LegacyIndex` """ INDEX_FILENAME = "hf_bert_base.hnswSQ8_correct_phi_128.c_index" PASSAGE_FILENAME = "psgs_w100.tsv.pkl" def __init__(self, vector_size, index_path): self.index_id_to_db_id = [] self.index_path = index_path self.passages = self._load_passages() self.vector_size = vector_size self.index = None self._index_initialized = False def _resolve_path(self, index_path, filename): assert os.path.isdir(index_path) or is_remote_url(index_path), "Please specify a valid ``index_path``." archive_file = os.path.join(index_path, filename) try: # Load from URL or cache if already cached resolved_archive_file = cached_path(archive_file) if resolved_archive_file is None: raise EnvironmentError except EnvironmentError: msg = ( f"Can't load '{archive_file}'. Make sure that:\n\n" f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}" f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n" ) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info("loading file {}".format(archive_file)) else: logger.info("loading file {} from cache at {}".format(archive_file, resolved_archive_file)) return resolved_archive_file def _load_passages(self): logger.info("Loading passages from {}".format(self.index_path)) passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME) with open(passages_path, "rb") as passages_file: passages = pickle.load(passages_file) return passages def _deserialize_index(self): logger.info("Loading index from {}".format(self.index_path)) resolved_index_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index.dpr") self.index = faiss.read_index(resolved_index_path) resolved_meta_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index_meta.dpr") with open(resolved_meta_path, "rb") as metadata_file: self.index_id_to_db_id = pickle.load(metadata_file) assert ( len(self.index_id_to_db_id) == self.index.ntotal ), "Deserialized index_id_to_db_id should match faiss index size" def is_initialized(self): return self._index_initialized def init_index(self): index = faiss.IndexHNSWFlat(self.vector_size + 1, 512) index.hnsw.efSearch = 128 index.hnsw.efConstruction = 200 self.index = index self._deserialize_index() self._index_initialized = True def get_doc_dicts(self, doc_ids: np.array): doc_list = [] for doc_ids_i in doc_ids: ids = [str(int(doc_id)) for doc_id in doc_ids_i] docs = [self.passages[doc_id] for doc_id in ids] doc_list.append(docs) doc_dicts = [] for docs in doc_list: doc_dict = {} doc_dict["title"] = [doc[1] for doc in docs] doc_dict["text"] = [doc[0] for doc in docs] doc_dicts.append(doc_dict) return doc_dicts def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: aux_dim = np.zeros(len(question_hidden_states), dtype="float32").reshape(-1, 1) query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim)) _, docs_ids = self.index.search(query_nhsw_vectors, n_docs) vectors = [[self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids] ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids] return np.array(ids), np.array(vectors) class HFIndexBase(Index): def __init__(self, vector_size, dataset, index_initialized=False): self.vector_size = vector_size self.dataset = dataset self._index_initialized = index_initialized self._check_dataset_format(with_index=index_initialized) dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True) def _check_dataset_format(self, with_index: bool): if not isinstance(self.dataset, Dataset): raise ValueError("Dataset should be a datasets.Dataset object, but got {}".format(type(self.dataset))) if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0: raise ValueError( "Dataset should be a dataset with the following columns: " "title (str), text (str) and embeddings (arrays of dimension vector_size), " "but got columns {}".format(self.dataset.column_names) ) if with_index and "embeddings" not in self.dataset.list_indexes(): raise ValueError( "Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it " "or `dataset.load_faiss_index` to load one from the disk." ) def init_index(self): raise NotImplementedError() def is_initialized(self): return self._index_initialized def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]: return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])] def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: _, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs) docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids] vectors = [doc["embeddings"] for doc in docs] for i in range(len(vectors)): if len(vectors[i]) < n_docs: vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))]) return np.array(ids), np.array(vectors) # shapes (batch_size, n_docs) and (batch_size, n_docs, d) class CanonicalHFIndex(HFIndexBase): """ A wrapper around an instance of :class:`~datasets.Datasets`. If ``index_path`` is set to ``None``, we load the pre-computed index available with the :class:`~datasets.arrow_dataset.Dataset`, otherwise, we load the index from the indicated path on disk. Args: vector_size (:obj:`int`): the dimension of the passages embeddings used by the index dataset_name (:obj:`str`, optional, defaults to ``wiki_dpr``): A datatset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids with ``datasets.list_datasets()``). dataset_split (:obj:`str`, optional, defaults to ``train``) Which split of the ``dataset`` to load. index_name (:obj:`str`, optional, defaults to ``train``) The index_name of the index associated with the ``dataset``. The index loaded from ``index_path`` will be saved under this name. index_path (:obj:`str`, optional, defaults to ``None``) The path to the serialized faiss index on disk. use_dummy_dataset (:obj:`bool`, optional, defaults to ``False``): If True, use the dummy configuration of the dataset for tests. """ def __init__( self, vector_size: int, dataset_name: str = "wiki_dpr", dataset_split: str = "train", index_name: Optional[str] = None, index_path: Optional[str] = None, use_dummy_dataset=False, ): if int(index_path is None) + int(index_name is None) != 1: raise ValueError("Please provide `index_name` or `index_path`.") self.dataset_name = dataset_name self.dataset_split = dataset_split self.index_name = index_name self.index_path = index_path self.use_dummy_dataset = use_dummy_dataset logger.info("Loading passages from {}".format(self.dataset_name)) dataset = load_dataset( self.dataset_name, with_index=False, split=self.dataset_split, dummy=self.use_dummy_dataset ) super().__init__(vector_size, dataset, index_initialized=False) def init_index(self): if self.index_path is not None: logger.info("Loading index from {}".format(self.index_path)) self.dataset.load_faiss_index("embeddings", file=self.index_path) else: logger.info("Loading index from {}".format(self.dataset_name + " with index name " + self.index_name)) self.dataset = load_dataset( self.dataset_name, with_embeddings=True, with_index=True, split=self.dataset_split, index_name=self.index_name, dummy=self.use_dummy_dataset, ) self.dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True) self._index_initialized = True class CustomHFIndex(HFIndexBase): """ A wrapper around an instance of :class:`~datasets.Datasets`. The dataset and the index are both loaded from the indicated paths on disk. Args: vector_size (:obj:`int`): the dimension of the passages embeddings used by the index dataset_path (:obj:`str`): The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and embeddings (arrays of dimension vector_size) index_path (:obj:`str`) The path to the serialized faiss index on disk. """ def __init__(self, vector_size: int, dataset, index_path=None): super().__init__(vector_size, dataset, index_initialized=index_path is None) self.index_path = index_path @classmethod def load_from_disk(cls, vector_size, dataset_path, index_path): logger.info("Loading passages from {}".format(dataset_path)) if dataset_path is None or index_path is None: raise ValueError( "Please provide ``dataset_path`` and ``index_path`` after calling ``dataset.save_to_disk(dataset_path)`` " "and ``dataset.get_index('embeddings').save(index_path)``." ) dataset = load_from_disk(dataset_path) return cls(vector_size=vector_size, dataset=dataset, index_path=index_path) def init_index(self): if not self.is_initialized(): logger.info("Loading index from {}".format(self.index_path)) self.dataset.load_faiss_index("embeddings", file=self.index_path) self._index_initialized = True class RagRetriever: """ Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents contents, and it formats them to be used with a RagModel. Args: config (:class:`~transformers.RagConfig`): The configuration of the RAG model this Retriever is used with. Contains parameters indicating which ``Index`` to build. You can load your own custom dataset with ``config.index_name="custom"`` or use a canonical one (default) from the datasets library with ``config.index_name="wiki_dpr"`` for example. question_encoder_tokenizer (:class:`~transformers.PreTrainedTokenizer`): The tokenizer that was used to tokenize the question. It is used to decode the question and then use the generator_tokenizer. generator_tokenizer (:class:`~transformers.PreTrainedTokenizer`): The tokenizer used for the generator part of the RagModel. index (:class:`~transformers.retrieval_rag.Index`, optional, defaults to the one defined by the configuration): If specified, use this index instead of the one built using the configuration Examples:: >>> # To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact') >>> from transformers import RagRetriever >>> retriever = RagRetriever.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base', dataset="wiki_dpr", index_name='compressed') >>> # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py >>> from transformers import RagRetriever >>> dataset = ... # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index >>> retriever = RagRetriever.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base', indexed_dataset=dataset) >>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py >>> from transformers import RagRetriever >>> dataset_path = "path/to/my/dataset" # dataset saved via `dataset.save_to_disk(...)` >>> index_path = "path/to/my/index.faiss" # faiss index saved via `dataset.get_index("embeddings").save(...)` >>> retriever = RagRetriever.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base', index_name='custom', passages_path=dataset_path, index_path=index_path) >>> # To load the legacy index built originally for Rag's paper >>> from transformers import RagRetriever >>> retriever = RagRetriever.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base', index_name='legacy') """ _init_retrieval = True def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None): requires_datasets(self) requires_faiss(self) super().__init__() self.index = index or self._build_index(config) self.generator_tokenizer = generator_tokenizer self.question_encoder_tokenizer = question_encoder_tokenizer self.n_docs = config.n_docs self.batch_size = config.retrieval_batch_size self.config = config if self._init_retrieval: self.init_retrieval() @staticmethod def _build_index(config): if config.index_name == "legacy": return LegacyIndex( config.retrieval_vector_size, config.index_path or LEGACY_INDEX_PATH, ) elif config.index_name == "custom": return CustomHFIndex.load_from_disk( vector_size=config.retrieval_vector_size, dataset_path=config.passages_path, index_path=config.index_path, ) else: return CanonicalHFIndex( vector_size=config.retrieval_vector_size, dataset_name=config.dataset, dataset_split=config.dataset_split, index_name=config.index_name, index_path=config.index_path, use_dummy_dataset=config.use_dummy_dataset, ) @classmethod def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs): requires_datasets(cls) requires_faiss(cls) config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs) rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config) question_encoder_tokenizer = rag_tokenizer.question_encoder generator_tokenizer = rag_tokenizer.generator if indexed_dataset is not None: config.index_name = "custom" index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset) else: index = cls._build_index(config) return cls( config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, index=index, ) def save_pretrained(self, save_directory): if isinstance(self.index, CustomHFIndex): if self.config.index_path is None: index_path = os.path.join(save_directory, "hf_dataset_index.faiss") self.index.dataset.get_index("embeddings").save(index_path) self.config.index_path = index_path if self.config.passages_path is None: passages_path = os.path.join(save_directory, "hf_dataset") # datasets don't support save_to_disk with indexes right now faiss_index = self.index.dataset._indexes.pop("embeddings") self.index.dataset.save_to_disk(passages_path) self.index.dataset._indexes["embeddings"] = faiss_index self.config.passages_path = passages_path self.config.save_pretrained(save_directory) rag_tokenizer = RagTokenizer( question_encoder=self.question_encoder_tokenizer, generator=self.generator_tokenizer, ) rag_tokenizer.save_pretrained(save_directory) def init_retrieval(self): """ Retriever initalization function. It loads the index into memory. """ logger.info("initializing retrieval") self.index.init_index() def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None): r""" Postprocessing retrieved ``docs`` and combining them with ``input_strings``. Args: doc_scores (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs)`): Retrieval scores of respective docs - passed for logging. docs (:obj:`dict`): Retrieved documents. input_strings (:obj:`str`): Input strings decoded by ``preprocess_query``. prefix (:obj:`str`): Prefix added at the beginning of each input, typically used with T5-based models. Return: :obj:`tuple(tensors)`: a tuple consisting of two elements: contextualized ``input_ids`` and a compatible ``attention_mask``. """ def cat_input_and_doc(doc_title, doc_text, input_string, prefix): # TODO(Patrick): if we train more RAG models, I want to put the input first to take advantage of effortless truncation # TODO(piktus): better handling of truncation if doc_title.startswith('"'): doc_title = doc_title[1:] if doc_title.endswith('"'): doc_title = doc_title[:-1] if prefix is None: prefix = "" out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace( " ", " " ) return out rag_input_strings = [ cat_input_and_doc( docs[i]["title"][j], docs[i]["text"][j], input_strings[i], prefix, ) for i in range(len(docs)) for j in range(n_docs) ] contextualized_inputs = self.generator_tokenizer.batch_encode_plus( rag_input_strings, max_length=self.config.max_combined_length, return_tensors=return_tensors, padding="max_length", truncation=True, ) return contextualized_inputs["input_ids"], contextualized_inputs["attention_mask"] def _chunk_tensor(self, t: Iterable, chunk_size: int) -> List[Iterable]: return [t[i : i + chunk_size] for i in range(0, len(t), chunk_size)] def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, np.ndarray]: question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size) ids_batched = [] vectors_batched = [] for question_hidden_states in question_hidden_states_batched: start_time = time.time() ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs) logger.debug( "index search time: {} sec, batch size {}".format( time.time() - start_time, question_hidden_states.shape ) ) ids_batched.extend(ids) vectors_batched.extend(vectors) return ( np.array(ids_batched), np.array(vectors_batched), ) # shapes (batch_size, n_docs) and (batch_size, n_docs, d) def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]: """ Retrieves documents for specified ``question_hidden_states``. Args: question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`): A batch of query vectors to retrieve with. n_docs (:obj:`int`): The number of docs retrieved per query. Return: :obj:`Tuple[np.ndarray, np.ndarray, List[dict]]`: A tuple with the following objects: - **retrieved_doc_embeds** (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs, dim)`) -- The retrieval embeddings of the retrieved docs per query. - **doc_ids** (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs)`) -- The ids of the documents in the index - **doc_dicts** (:obj:`List[dict]`): The :obj:`retrieved_doc_embeds` examples per query. """ doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids) def __call__( self, question_input_ids: List[List[int]], question_hidden_states: np.ndarray, prefix=None, n_docs=None, return_tensors=None, ) -> BatchEncoding: """ Retrieves documents for specified :obj:`question_hidden_states`. Args: question_input_ids: (:obj:`List[List[int]]`) batch of input ids question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`: A batch of query vectors to retrieve with. prefix: (:obj:`str`, `optional`): The prefix used by the generator's tokenizer. n_docs (:obj:`int`, `optional`): The number of docs retrieved per query. return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. Output: :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: - **context_input_ids** -- List of token ids to be fed to a model. `What are input IDs? <../glossary.html#input-ids>`__ - **context_attention_mask** -- List of indices specifying which tokens should be attended to by the model (when :obj:`return_attention_mask=True` or if `"attention_mask"` is in :obj:`self.model_input_names`). `What are attention masks? <../glossary.html#attention-mask>`__ - **retrieved_doc_embeds** -- List of embeddings of the retrieved documents - **doc_ids** -- List of ids of the retrieved documents """ n_docs = n_docs if n_docs is not None else self.n_docs prefix = prefix if prefix is not None else self.config.generator.prefix retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs) input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True) context_input_ids, context_attention_mask = self.postprocess_docs( docs, input_strings, prefix, n_docs, return_tensors=return_tensors ) return BatchEncoding( { "context_input_ids": context_input_ids, "context_attention_mask": context_attention_mask, "retrieved_doc_embeds": retrieved_doc_embeds, "doc_ids": doc_ids, }, tensor_type=return_tensors, )
27,944
44.36526
170
py
SLT-FAI
SLT-FAI-main/transformers/configuration_ctrl.py
# coding=utf-8 # Copyright 2018 Salesforce and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Salesforce CTRL configuration """ from .configuration_utils import PretrainedConfig from .utils import logging logger = logging.get_logger(__name__) CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP = {"ctrl": "https://s3.amazonaws.com/models.huggingface.co/bert/ctrl-config.json"} class CTRLConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a :class:`~transformers.CTRLModel` or a :class:`~transformers.TFCTRLModel`. It is used to instantiate a CTRL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `ctrl <https://huggingface.co/ctrl>`__ architecture from SalesForce. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 246534): Vocabulary size of the CTRL model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.CTRLModel` or :class:`~transformers.TFCTRLModel`. n_positions (:obj:`int`, `optional`, defaults to 256): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_ctx (:obj:`int`, `optional`, defaults to 256): Dimensionality of the causal mask (usually same as n_positions). n_embd (:obj:`int`, `optional`, defaults to 1280): Dimensionality of the embeddings and hidden states. dff (:obj:`int`, `optional`, defaults to 8192): Dimensionality of the inner dimension of the feed forward networks (FFN). n_layer (:obj:`int`, `optional`, defaults to 48): Number of hidden layers in the Transformer encoder. n_head (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. resid_pdrop (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (:obj:`int`, `optional`, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (:obj:`float`, `optional`, defaults to 1e-6): The epsilon to use in the layer normalization layers initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Examples:: >>> from transformers import CTRLModel, CTRLConfig >>> # Initializing a CTRL configuration >>> configuration = CTRLConfig() >>> # Initializing a model from the configuration >>> model = CTRLModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "ctrl" def __init__( self, vocab_size=246534, n_positions=256, n_ctx=256, n_embd=1280, dff=8192, n_layer=48, n_head=16, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-6, initializer_range=0.02, summary_type="cls_index", summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs ): super().__init__(**kwargs) self.vocab_size = vocab_size self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.dff = dff self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels @property def max_position_embeddings(self): return self.n_positions @property def hidden_size(self): return self.n_embd @property def num_attention_heads(self): return self.n_head @property def num_hidden_layers(self): return self.n_layer
5,550
39.224638
117
py
SLT-FAI
SLT-FAI-main/transformers/convert_mobilebert_original_tf_checkpoint_to_pytorch.py
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, mobilebert_config_file, pytorch_dump_path): # Initialise PyTorch model config = MobileBertConfig.from_json_file(mobilebert_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = MobileBertForPreTraining(config) # Load weights from tf checkpoint model = load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
1,586
35.906977
117
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_lxmert_fast.py
# coding=utf-8 # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .tokenization_bert_fast import BertTokenizerFast from .tokenization_lxmert import LxmertTokenizer #################################################### # Mapping from the keyword arguments names of Tokenizer `__init__` # to file names for serializing Tokenizer instances #################################################### VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} #################################################### # Mapping from the keyword arguments names of Tokenizer `__init__` # to pretrained vocabulary URL for all the model shortcut names. #################################################### PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "unc-nlp/lxmert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", }, "tokenizer_file": { "unc-nlp/lxmert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-tokenizer.json", }, } #################################################### # Mapping from model shortcut names to max length of inputs #################################################### PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "unc-nlp/lxmert-base-uncased": 512, } #################################################### # Mapping from model shortcut names to a dictionary of additional # keyword arguments for Tokenizer `__init__`. # To be used for checkpoint specific configurations. #################################################### PRETRAINED_INIT_CONFIGURATION = { "unc-nlp/lxmert-base-uncased": {"do_lower_case": True}, } class LxmertTokenizerFast(BertTokenizerFast): r""" Construct a "fast" LXMERT tokenizer (backed by HuggingFace's `tokenizers` library). :class:`~transformers.LxmertTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION slow_tokenizer_class = LxmertTokenizer
3,003
41.914286
126
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_flaubert.py
# coding=utf-8 # Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Flaubert, based on XLM.""" import unicodedata import six from .tokenization_xlm import XLMTokenizer from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "flaubert/flaubert_small_cased": "https://s3.amazonaws.com/models.huggingface.co/bert/flaubert/flaubert_small_cased/vocab.json", "flaubert/flaubert_base_uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/flaubert/flaubert_base_uncased/vocab.json", "flaubert/flaubert_base_cased": "https://s3.amazonaws.com/models.huggingface.co/bert/flaubert/flaubert_base_cased/vocab.json", "flaubert/flaubert_large_cased": "https://s3.amazonaws.com/models.huggingface.co/bert/flaubert/flaubert_large_cased/vocab.json", }, "merges_file": { "flaubert/flaubert_small_cased": "https://s3.amazonaws.com/models.huggingface.co/bert/flaubert/flaubert_small_cased/merges.txt", "flaubert/flaubert_base_uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/flaubert/flaubert_base_uncased/merges.txt", "flaubert/flaubert_base_cased": "https://s3.amazonaws.com/models.huggingface.co/bert/flaubert/flaubert_base_cased/merges.txt", "flaubert/flaubert_large_cased": "https://s3.amazonaws.com/models.huggingface.co/bert/flaubert/flaubert_large_cased/merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "flaubert/flaubert_small_cased": 512, "flaubert/flaubert_base_uncased": 512, "flaubert/flaubert_base_cased": 512, "flaubert/flaubert_large_cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "flaubert/flaubert_small_cased": {"do_lowercase": False}, "flaubert/flaubert_base_uncased": {"do_lowercase": True}, "flaubert/flaubert_base_cased": {"do_lowercase": False}, "flaubert/flaubert_large_cased": {"do_lowercase": False}, } def convert_to_unicode(text): """ Converts `text` to Unicode (if it's not already), assuming UTF-8 input. """ # six_ensure_text is copied from https://github.com/benjaminp/six def six_ensure_text(s, encoding="utf-8", errors="strict"): if isinstance(s, six.binary_type): return s.decode(encoding, errors) elif isinstance(s, six.text_type): return s else: raise TypeError("not expecting type '%s'" % type(s)) return six_ensure_text(text, encoding="utf-8", errors="ignore") class FlaubertTokenizer(XLMTokenizer): """ Construct a Flaubert tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following: - Moses preprocessing and tokenization. - Normalizing all inputs text. - The arguments ``special_tokens`` and the function ``set_special_tokens``, can be used to add additional symbols (like "__classify__") to a vocabulary. - The argument :obj:`do_lowercase` controls lower casing (automatically set for pretrained vocabularies). This tokenizer inherits from :class:`~transformers.XLMTokenizer`. Please check the superclass for usage examples and documentation regarding arguments. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, do_lowercase=False, **kwargs): super().__init__(**kwargs) self.do_lowercase = do_lowercase self.do_lowercase_and_remove_accent = False def preprocess_text(self, text): text = text.replace("``", '"').replace("''", '"') text = convert_to_unicode(text) text = unicodedata.normalize("NFC", text) if self.do_lowercase: text = text.lower() return text def _tokenize(self, text, bypass_tokenizer=False): """ Tokenize a string given language code using Moses. Details of tokenization: - [sacremoses](https://github.com/alvations/sacremoses): port of Moses - Install with `pip install sacremoses` Args: - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE. Returns: List of tokens. """ lang = "fr" if lang and self.lang2id and lang not in self.lang2id: logger.error( "Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model." ) if bypass_tokenizer: text = text.split() else: text = self.preprocess_text(text) text = self.moses_pipeline(text, lang=lang) text = self.moses_tokenize(text, lang=lang) split_tokens = [] for token in text: if token: split_tokens.extend([t for t in self.bpe(token).split(" ")]) return split_tokens
5,738
38.308219
150
py
SLT-FAI
SLT-FAI-main/transformers/modeling_tf_transfo_xl_utilities.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A TF 2.0 Adaptive Softmax for Transformer XL model. """ import tensorflow as tf from .modeling_tf_utils import shape_list class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer): def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1, keep_order=False, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = cutoffs + [vocab_size] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters self.keep_order = keep_order self.out_layers = [] self.out_projs = [] def build(self, input_shape): if self.n_clusters > 0: self.cluster_weight = self.add_weight( shape=(self.n_clusters, self.d_embed), initializer="zeros", trainable=True, name="cluster_weight" ) self.cluster_bias = self.add_weight( shape=(self.n_clusters,), initializer="zeros", trainable=True, name="cluster_bias" ) if self.div_val == 1: for i in range(len(self.cutoffs)): if self.d_proj != self.d_embed: weight = self.add_weight( shape=(self.d_embed, self.d_proj), initializer="zeros", trainable=True, name="out_projs_._{}".format(i), ) self.out_projs.append(weight) else: self.out_projs.append(None) weight = self.add_weight( shape=( self.vocab_size, self.d_embed, ), initializer="zeros", trainable=True, name="out_layers_._{}_._weight".format(i), ) bias = self.add_weight( shape=(self.vocab_size,), initializer="zeros", trainable=True, name="out_layers_._{}_._bias".format(i), ) self.out_layers.append((weight, bias)) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] d_emb_i = self.d_embed // (self.div_val ** i) weight = self.add_weight( shape=(d_emb_i, self.d_proj), initializer="zeros", trainable=True, name="out_projs_._{}".format(i) ) self.out_projs.append(weight) weight = self.add_weight( shape=( r_idx - l_idx, d_emb_i, ), initializer="zeros", trainable=True, name="out_layers_._{}_._weight".format(i), ) bias = self.add_weight( shape=(r_idx - l_idx,), initializer="zeros", trainable=True, name="out_layers_._{}_._bias".format(i), ) self.out_layers.append((weight, bias)) super().build(input_shape) @staticmethod def _logit(x, W, b, proj=None): y = x if proj is not None: y = tf.einsum("ibd,ed->ibe", y, proj) return tf.einsum("ibd,nd->ibn", y, W) + b @staticmethod def _gather_logprob(logprob, target): lp_size = shape_list(logprob) r = tf.range(lp_size[0]) idx = tf.stack([r, target], 1) return tf.gather_nd(logprob, idx) def call(self, hidden, target, return_mean=True, training=False): head_logprob = 0 if self.n_clusters == 0: output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0]) if target is not None: loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output) out = tf.nn.log_softmax(output, axis=-1) else: hidden_sizes = shape_list(hidden) out = [] loss = tf.zeros(hidden_sizes[:2], dtype=tf.float32) for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: mask = (target >= l_idx) & (target < r_idx) mask_idx = tf.where(mask) cur_target = tf.boolean_mask(target, mask) - l_idx if self.div_val == 1: cur_W = self.out_layers[0][0][l_idx:r_idx] cur_b = self.out_layers[0][1][l_idx:r_idx] else: cur_W = self.out_layers[i][0] cur_b = self.out_layers[i][1] if i == 0: cur_W = tf.concat([cur_W, self.cluster_weight], 0) cur_b = tf.concat([cur_b, self.cluster_bias], 0) head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0]) head_logprob = tf.nn.log_softmax(head_logit) out.append(head_logprob[..., : self.cutoffs[0]]) if target is not None: cur_head_logprob = tf.boolean_mask(head_logprob, mask) cur_logprob = self._gather_logprob(cur_head_logprob, cur_target) else: tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i]) tail_logprob = tf.nn.log_softmax(tail_logit) cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(logprob_i) if target is not None: cur_head_logprob = tf.boolean_mask(head_logprob, mask) cur_tail_logprob = tf.boolean_mask(tail_logprob, mask) cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(mask_idx, -cur_logprob, tf.cast(shape_list(loss), dtype=tf.int64)) out = tf.concat(out, axis=-1) if target is not None: if return_mean: loss = tf.reduce_mean(loss) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(loss) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(loss, name=self.name, aggregation="mean" if return_mean else "") return out
7,818
41.494565
118
py
SLT-FAI
SLT-FAI-main/transformers/modeling_flax_bert.py
# coding=utf-8 # Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict import numpy as np import flax.linen as nn import jax import jax.numpy as jnp from flax.linen import compact from .configuration_bert import BertConfig from .file_utils import add_start_docstrings from .modeling_flax_utils import FlaxPreTrainedModel, gelu from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "BertConfig" _TOKENIZER_FOR_DOC = "BertTokenizer" BERT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ class FlaxBertLayerNorm(nn.Module): """Layer normalization (https://arxiv.org/abs/1607.06450). Operates on the last axis of the input data. """ epsilon: float = 1e-6 dtype: jnp.dtype = jnp.float32 bias: bool = True scale: bool = True bias_init: jnp.ndarray = nn.initializers.zeros scale_init: jnp.ndarray = nn.initializers.ones @compact def __call__(self, x): """Applies layer normalization on the input. It normalizes the activations of the layer for each given example in a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that maintains the mean activation within each example close to 0 and the activation standard deviation close to 1. Args: x: the inputs epsilon: A small float added to variance to avoid dividing by zero. dtype: the dtype of the computation (default: float32). bias: If True, bias (beta) is added. scale: If True, multiply by scale (gamma). When the next layer is linear (also e.g. nn.relu), this can be disabled since the scaling will be done by the next layer. bias_init: Initializer for bias, by default, zero. scale_init: Initializer for scale, by default, one. Returns: Normalized inputs (the same shape as inputs). """ features = x.shape[-1] mean = jnp.mean(x, axis=-1, keepdims=True) mean2 = jnp.mean(jax.lax.square(x), axis=-1, keepdims=True) var = mean2 - jax.lax.square(mean) mul = jax.lax.rsqrt(var + self.epsilon) if self.scale: mul = mul * jnp.asarray(self.param("gamma", self.scale_init, (features,)), self.dtype) y = (x - mean) * mul if self.bias: y = y + jnp.asarray(self.param("beta", self.bias_init, (features,)), self.dtype) return y class FlaxBertEmbedding(nn.Module): """ Specify a new class for doing the embedding stuff as Flax's one use 'embedding' for the parameter name and PyTorch use 'weight' """ vocab_size: int hidden_size: int emb_init: Callable[..., np.ndarray] = nn.initializers.normal(stddev=0.1) @compact def __call__(self, inputs): embedding = self.param("weight", self.emb_init, (self.vocab_size, self.hidden_size)) return jnp.take(embedding, inputs, axis=0) class FlaxBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" vocab_size: int hidden_size: int type_vocab_size: int max_length: int @compact def __call__(self, input_ids, token_type_ids, position_ids, attention_mask): # Embed w_emb = FlaxBertEmbedding(self.vocab_size, self.hidden_size, name="word_embeddings")( jnp.atleast_2d(input_ids.astype("i4")) ) p_emb = FlaxBertEmbedding(self.max_length, self.hidden_size, name="position_embeddings")( jnp.atleast_2d(position_ids.astype("i4")) ) t_emb = FlaxBertEmbedding(self.type_vocab_size, self.hidden_size, name="token_type_embeddings")( jnp.atleast_2d(token_type_ids.astype("i4")) ) # Sum all embeddings summed_emb = w_emb + jnp.broadcast_to(p_emb, w_emb.shape) + t_emb # Layer Norm layer_norm = FlaxBertLayerNorm(name="layer_norm")(summed_emb) return layer_norm class FlaxBertAttention(nn.Module): num_heads: int head_size: int @compact def __call__(self, hidden_state, attention_mask): self_att = nn.attention.SelfAttention(num_heads=self.num_heads, qkv_features=self.head_size, name="self")( hidden_state, attention_mask ) layer_norm = FlaxBertLayerNorm(name="layer_norm")(self_att + hidden_state) return layer_norm class FlaxBertIntermediate(nn.Module): output_size: int @compact def __call__(self, hidden_state): # TODO: Add ACT2FN reference to change activation function dense = nn.Dense(features=self.output_size, name="dense")(hidden_state) return gelu(dense) class FlaxBertOutput(nn.Module): @compact def __call__(self, intermediate_output, attention_output): hidden_state = nn.Dense(attention_output.shape[-1], name="dense")(intermediate_output) hidden_state = FlaxBertLayerNorm(name="layer_norm")(hidden_state + attention_output) return hidden_state class FlaxBertLayer(nn.Module): num_heads: int head_size: int intermediate_size: int @compact def __call__(self, hidden_state, attention_mask): attention = FlaxBertAttention(self.num_heads, self.head_size, name="attention")(hidden_state, attention_mask) intermediate = FlaxBertIntermediate(self.intermediate_size, name="intermediate")(attention) output = FlaxBertOutput(name="output")(intermediate, attention) return output class FlaxBertLayerCollection(nn.Module): """ Stores N BertLayer(s) """ num_layers: int num_heads: int head_size: int intermediate_size: int @compact def __call__(self, inputs, attention_mask): assert self.num_layers > 0, f"num_layers should be >= 1, got ({self.num_layers})" # Initialize input / output input_i = inputs # Forward over all encoders for i in range(self.num_layers): layer = FlaxBertLayer(self.num_heads, self.head_size, self.intermediate_size, name=f"{i}") input_i = layer(input_i, attention_mask) return input_i class FlaxBertEncoder(nn.Module): num_layers: int num_heads: int head_size: int intermediate_size: int @compact def __call__(self, hidden_state, attention_mask): layer = FlaxBertLayerCollection( self.num_layers, self.num_heads, self.head_size, self.intermediate_size, name="layer" )(hidden_state, attention_mask) return layer class FlaxBertPooler(nn.Module): @compact def __call__(self, hidden_state): cls_token = hidden_state[:, 0] out = nn.Dense(hidden_state.shape[-1], name="dense")(cls_token) return jax.lax.tanh(out) class FlaxBertModule(nn.Module): vocab_size: int hidden_size: int type_vocab_size: int max_length: int num_encoder_layers: int num_heads: int head_size: int intermediate_size: int @compact def __call__(self, input_ids, token_type_ids, position_ids, attention_mask): # Embedding embeddings = FlaxBertEmbeddings( self.vocab_size, self.hidden_size, self.type_vocab_size, self.max_length, name="embeddings" )(input_ids, token_type_ids, position_ids, attention_mask) # N stacked encoding layers encoder = FlaxBertEncoder( self.num_encoder_layers, self.num_heads, self.head_size, self.intermediate_size, name="encoder" )(embeddings, attention_mask) pooled = FlaxBertPooler(name="pooler")(encoder) return encoder, pooled @add_start_docstrings( "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", BERT_START_DOCSTRING, ) class FlaxBertModel(FlaxPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ model_class = FlaxBertModule config_class = BertConfig base_model_prefix = "bert" @staticmethod def convert_from_pytorch(pt_state: Dict, config: BertConfig) -> Dict: jax_state = dict(pt_state) # Need to change some parameters name to match Flax names so that we don't have to fork any layer for key, tensor in pt_state.items(): # Key parts key_parts = set(key.split(".")) # Every dense layer has "kernel" parameters instead of "weight" if "dense.weight" in key: del jax_state[key] key = key.replace("weight", "kernel") jax_state[key] = tensor # SelfAttention needs also to replace "weight" by "kernel" if {"query", "key", "value"} & key_parts: # Flax SelfAttention decomposes the heads (num_head, size // num_heads) if "bias" in key: jax_state[key] = tensor.reshape((config.num_attention_heads, -1)) elif "weight": del jax_state[key] key = key.replace("weight", "kernel") tensor = tensor.reshape((config.num_attention_heads, -1, config.hidden_size)).transpose((2, 0, 1)) jax_state[key] = tensor # SelfAttention output is not a separate layer, remove one nesting if "attention.output.dense" in key: del jax_state[key] key = key.replace("attention.output.dense", "attention.self.out") jax_state[key] = tensor # SelfAttention output is not a separate layer, remove nesting on layer norm if "attention.output.LayerNorm" in key: del jax_state[key] key = key.replace("attention.output.LayerNorm", "attention.LayerNorm") jax_state[key] = tensor # There are some transposed parameters w.r.t their PyTorch counterpart if "intermediate.dense.kernel" in key or "output.dense.kernel" in key: jax_state[key] = tensor.T # Self Attention output projection needs to be transposed if "out.kernel" in key: jax_state[key] = tensor.reshape((config.hidden_size, config.num_attention_heads, -1)).transpose( 1, 2, 0 ) # Pooler needs to transpose its kernel if "pooler.dense.kernel" in key: jax_state[key] = tensor.T # Handle LayerNorm conversion if "LayerNorm" in key: del jax_state[key] # Replace LayerNorm by layer_norm new_key = key.replace("LayerNorm", "layer_norm") if "weight" in key: new_key = new_key.replace("weight", "gamma") elif "bias" in key: new_key = new_key.replace("bias", "beta") jax_state[new_key] = tensor return jax_state def __init__(self, config: BertConfig, state: dict, seed: int = 0, **kwargs): model = FlaxBertModule( vocab_size=config.vocab_size, hidden_size=config.hidden_size, type_vocab_size=config.type_vocab_size, max_length=config.max_position_embeddings, num_encoder_layers=config.num_hidden_layers, num_heads=config.num_attention_heads, head_size=config.hidden_size, intermediate_size=config.intermediate_size, ) super().__init__(config, model, state, seed) @property def module(self) -> nn.Module: return self._module def __call__(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None): if token_type_ids is None: token_type_ids = jnp.ones_like(input_ids) if position_ids is None: position_ids = jnp.arange(jnp.atleast_2d(input_ids).shape[-1]) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) return self.model.apply( {"params": self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), jnp.array(position_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), )
16,952
37.617312
120
py
SLT-FAI
SLT-FAI-main/transformers/configuration_mbart.py
# coding=utf-8 # Copyright 2020 The Fairseq Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MBART configuration """ from .configuration_bart import BartConfig from .utils import logging logger = logging.get_logger(__name__) MBART_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/mbart-large-en-ro": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/mbart-large-en-ro/config.json", "facebook/mbart-large-cc25": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/mbart-large-cc25/config.json", } class MBartConfig(BartConfig): """ This is the configuration class to store the configuration of a :class:`~transformers.MBartForConditionalGeneration`. It is used to instantiate a BART model according to the specified arguments, defining the model architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 250027): Vocabulary size of the MBART model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.MBartForConditionalGeneration`. d_model (:obj:`int`, `optional`, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (:obj:`int`, `optional`, defaults to 12): Number of encoder layers. decoder_layers (:obj:`int`, `optional`, defaults to 12): Number of decoder layers. encoder_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096): Dimensionality of the "intermediate" (i.e., feed-forward) layer in decoder. encoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096): Dimensionality of the "intermediate" (i.e., feed-forward) layer in decoder. activation_function (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"` and :obj:`"gelu_new"` are supported. dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (:obj:`int`, `optional`, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. add_bias_logits (:obj:`bool`, `optional`, defaults to :obj:`False`): This should be completed, specific to marian. normalize_before (:obj:`bool`, `optional`, defaults to :obj:`True`): Call layernorm before attention ops. normalize_embedding (:obj:`bool`, `optional`, defaults to :obj:`True`): Call layernorm after embeddings. Only True for Bart. static_position_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`): Don't learn positional embeddings, use sinusoidal. add_final_layer_norm (:obj:`bool`, `optional`, defaults to :obj:`True`): Why not add another layernorm? scale_embedding (:obj:`bool`, `optional`, defaults to :obj:`False`): Scale embeddings by diving by sqrt(d_model). eos_token_id (:obj:`int`, `optional`, defaults to 2) End of stream token id. pad_token_id (:obj:`int`, `optional`, defaults to 1) Padding token id. bos_token_id (:obj:`int`, `optional`, defaults to 0) Beginning of stream token id. encoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): The LayerDrop probability for the encoder. See the `LayerDrop paper <see https://arxiv.org/abs/1909.11556>`__ for more details. decoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): The LayerDrop probability for the decoder. See the `LayerDrop paper <see https://arxiv.org/abs/1909.11556>`__ for more details. extra_pos_embeddings: (:obj:`int`, `optional`, defaults to 2): How many extra learned positional embeddings to use. Should be equal to :obj:`pad_token_id+1`. is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether this is an encoder/decoder model force_bos_token_to_be_generated (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force BOS token to be generated at step 1 (after ``decoder_start_token_id``). """ model_type = "mbart"
6,234
57.820755
127
py
SLT-FAI
SLT-FAI-main/transformers/generation_tf_utils.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from .utils import logging logger = logging.get_logger(__name__) class TFGenerationMixin: """ A class contraining all of the functions supporting generation, to be used as a mixin in :class:`~transfomers.TFPreTrainedModel`. """ def prepare_inputs_for_generation(self, inputs, **kwargs): """ Implement in subclasses of :class:`~transfomers.TFPreTrainedModel` for custom behavior to prepare inputs in the generate method. """ return {"inputs": inputs} def _use_cache(self, outputs, use_cache): """During generation, decide whether to pass the `past` variable to the next forward pass.""" if len(outputs) <= 1 or use_cache is False: return False if hasattr(self.config, "mem_len") and self.config.mem_len == 0: return False return True def generate( self, input_ids=None, max_length=None, min_length=None, do_sample=None, early_stopping=None, num_beams=None, temperature=None, top_k=None, top_p=None, repetition_penalty=None, bad_words_ids=None, bos_token_id=None, pad_token_id=None, eos_token_id=None, length_penalty=None, no_repeat_ngram_size=None, num_return_sequences=None, attention_mask=None, decoder_start_token_id=None, use_cache=None, ): r""" Generates sequences for models with a language modeling head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling. Adapted in part from `Facebook's XLM beam search code <https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__. Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values indicated are the default values of those config. Most of these parameters are explained in more detail in `this blog post <https://huggingface.co/blog/how-to-generate>`__. Parameters: input_ids (:obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty :obj:`tf.Tensor` of shape :obj:`(1,)`. max_length (:obj:`int`, `optional`, defaults to 20): The maximum length of the sequence to be generated. min_length (:obj:`int`, `optional`, defaults to 10): The minimum length of the sequence to be generated. do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use sampling ; use greedy decoding otherwise. early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not. num_beams (:obj:`int`, `optional`, defaults to 1): Number of beams for beam search. 1 means no beam search. temperature (:obj:`float`, `optional`, defaults to 1.0): The value used to module the next token probabilities. top_k (:obj:`int`, `optional`, defaults to 50): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (:obj:`float`, `optional`, defaults to 1.0): If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or higher are kept for generation. repetition_penalty (:obj:`float`, `optional`, defaults to 1.0): The parameter for repetition penalty. 1.0 means no penalty. See `this paper <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. bos_token_id (:obj:`int`, `optional`): The id of the `beginning-of-sequence` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. length_penalty (:obj:`float`, `optional`, defaults to 1.0): Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer sequences. no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. bad_words_ids(:obj:`List[int]`, `optional`): List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`. num_return_sequences(:obj:`int`, `optional`, defaults to 1): The number of independently computed returned sequences for each element in the batch. attention_mask (:obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_start_token_id (:obj:`int`, `optional`): If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token. use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. model_specific_kwargs: Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. Return: :obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. Examples:: tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. outputs = model.generate(max_length=40) # do greedy decoding print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache. input_context = 'The dog' input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog' for i in range(3): # 3 output sequences were generated print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. input_context = 'The dog' input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # generate 3 candidates using sampling for i in range(3): # 3 output sequences were generated print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache. input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer model = TFAutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache. input_context = 'My cute dog' bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']] input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated """ # We cannot generate if the model does not have a LM head if self.get_output_embeddings() is None: raise AttributeError( "You tried to generate sequences with a model that does not have a LM Head." "Please use another model class (e.g. `TFOpenAIGPTLMHeadModel`, `TFXLNetLMHeadModel`, `TFGPT2LMHeadModel`, `TFCTRLLMHeadModel`, `TFT5ForConditionalGeneration`, `TFTransfoXLLMHeadModel`)" ) max_length = max_length if max_length is not None else self.config.max_length min_length = min_length if min_length is not None else self.config.min_length do_sample = do_sample if do_sample is not None else self.config.do_sample early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping use_cache = use_cache if use_cache is not None else self.config.use_cache num_beams = num_beams if num_beams is not None else self.config.num_beams temperature = temperature if temperature is not None else self.config.temperature top_k = top_k if top_k is not None else self.config.top_k top_p = top_p if top_p is not None else self.config.top_p repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty no_repeat_ngram_size = ( no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id ) if input_ids is not None: batch_size = shape_list(input_ids)[0] # overriden by the input batch_size else: batch_size = 1 assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictely positive integer." assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer." assert isinstance(do_sample, bool), "`do_sample` should be a boolean." assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean." assert isinstance(use_cache, bool), "`use_cache` should be a boolean." assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer." assert temperature > 0, "`temperature` should be strictely positive." assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer." assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1." assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1." assert input_ids is not None or ( isinstance(bos_token_id, int) and bos_token_id >= 0 ), "If input_ids is not defined, `bos_token_id` should be a positive integer." assert pad_token_id is None or ( isinstance(pad_token_id, int) and (pad_token_id >= 0) ), "`pad_token_id` should be a positive integer." assert (eos_token_id is None) or ( isinstance(eos_token_id, int) and (eos_token_id >= 0) ), "`eos_token_id` should be a positive integer." assert length_penalty > 0, "`length_penalty` should be strictely positive." assert ( isinstance(num_return_sequences, int) and num_return_sequences > 0 ), "`num_return_sequences` should be a strictely positive integer." assert ( bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list) ), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated" if input_ids is None: assert isinstance(bos_token_id, int) and bos_token_id >= 0, ( "you should either supply a context to complete as `input_ids` input " "or a `bos_token_id` (integer >= 0) as a first token to start the generation." ) input_ids = tf.fill((batch_size, 1), bos_token_id) else: assert len(shape_list(input_ids)) == 2, "Input prompt should be of shape (batch_size, sequence length)." # not allow to duplicate outputs when greedy decoding if do_sample is False: if num_beams == 1: # no_beam_search greedy generation conditions assert ( num_return_sequences == 1 ), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1" else: # beam_search greedy generation conditions assert ( num_beams >= num_return_sequences ), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences" # create attention mask if necessary # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140 if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids.numpy()): attention_mask = tf.cast(tf.math.not_equal(input_ids, pad_token_id), dtype=tf.int32) elif attention_mask is None: attention_mask = tf.ones_like(input_ids) if pad_token_id is None and eos_token_id is not None: logger.warning( "Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id) ) pad_token_id = eos_token_id # current position and vocab size cur_len = shape_list(input_ids)[1] # unused vocab_size = self.config.vocab_size # set effective batch size and effective batch multiplier according to do_sample if do_sample: effective_batch_size = batch_size * num_return_sequences effective_batch_mult = num_return_sequences else: effective_batch_size = batch_size effective_batch_mult = 1 if self.config.is_encoder_decoder: if decoder_start_token_id is None: decoder_start_token_id = bos_token_id assert ( decoder_start_token_id is not None ), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation" assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self) assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder) # get encoder and store encoder outputs encoder = self.get_encoder() encoder_outputs = encoder(input_ids, attention_mask=attention_mask) # Expand input ids if num_beams > 1 or num_return_sequences > 1 if num_return_sequences > 1 or num_beams > 1: input_ids_len = shape_list(input_ids)[-1] input_ids = tf.broadcast_to( tf.expand_dims(input_ids, 1), (batch_size, effective_batch_mult * num_beams, input_ids_len) ) attention_mask = tf.broadcast_to( tf.expand_dims(attention_mask, 1), (batch_size, effective_batch_mult * num_beams, input_ids_len) ) input_ids = tf.reshape( input_ids, (effective_batch_size * num_beams, input_ids_len) ) # shape: (batch_size * num_return_sequences * num_beams, cur_len) attention_mask = tf.reshape( attention_mask, (effective_batch_size * num_beams, input_ids_len) ) # shape: (batch_size * num_return_sequences * num_beams, cur_len) if self.config.is_encoder_decoder: # create empty decoder_input_ids input_ids = ( tf.ones( (effective_batch_size * num_beams, 1), dtype=tf.int32, ) * decoder_start_token_id ) cur_len = 1 assert ( batch_size == encoder_outputs[0].shape[0] ), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} " # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1) expanded_batch_idxs = tf.reshape( tf.repeat(tf.expand_dims(tf.range(batch_size), -1), repeats=num_beams * effective_batch_mult, axis=1), shape=(-1,), ) # expand encoder_outputs encoder_outputs = (tf.gather(encoder_outputs[0], expanded_batch_idxs, axis=0), *encoder_outputs[1:]) else: encoder_outputs = None cur_len = shape_list(input_ids)[-1] assert ( cur_len < max_length ), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`" if num_beams > 1: output = self._generate_beam_search( input_ids, cur_len=cur_len, max_length=max_length, min_length=min_length, do_sample=do_sample, early_stopping=early_stopping, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, pad_token_id=pad_token_id, eos_token_id=eos_token_id, batch_size=effective_batch_size, num_return_sequences=num_return_sequences, length_penalty=length_penalty, num_beams=num_beams, vocab_size=vocab_size, encoder_outputs=encoder_outputs, attention_mask=attention_mask, use_cache=use_cache, ) else: output = self._generate_no_beam_search( input_ids, cur_len=cur_len, max_length=max_length, min_length=min_length, do_sample=do_sample, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, pad_token_id=pad_token_id, eos_token_id=eos_token_id, batch_size=effective_batch_size, vocab_size=vocab_size, encoder_outputs=encoder_outputs, attention_mask=attention_mask, use_cache=use_cache, ) return output def _generate_no_beam_search( self, input_ids, cur_len, max_length, min_length, do_sample, temperature, top_k, top_p, repetition_penalty, no_repeat_ngram_size, bad_words_ids, pad_token_id, eos_token_id, batch_size, vocab_size, encoder_outputs, attention_mask, use_cache, ): """Generate sequences for each example without beam search (num_beams == 1). All returned sequence are generated independantly. """ # length of generated sentences / unfinished sentences unfinished_sents = tf.ones_like(input_ids[:, 0]) sent_lengths = tf.ones_like(input_ids[:, 0]) * max_length past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models while cur_len < max_length: model_inputs = self.prepare_inputs_for_generation( input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache ) outputs = self(**model_inputs) next_token_logits = outputs[0][:, -1, :] # if model has past, then set the past variable to speed up decoding if self._use_cache(outputs, use_cache): past = outputs[1] # repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858) if repetition_penalty != 1.0: next_token_logits_penalties = _create_next_token_logits_penalties( input_ids, next_token_logits, repetition_penalty ) next_token_logits = tf.math.multiply(next_token_logits, next_token_logits_penalties) if no_repeat_ngram_size > 0: # calculate a list of banned tokens to prevent repetitively generating the same ngrams # from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345 banned_tokens = calc_banned_ngram_tokens(input_ids, batch_size, no_repeat_ngram_size, cur_len) # create banned_tokens boolean mask banned_tokens_indices_mask = [] for banned_tokens_slice in banned_tokens: banned_tokens_indices_mask.append( [True if token in banned_tokens_slice else False for token in range(vocab_size)] ) next_token_logits = set_tensor_by_indices_to_value( next_token_logits, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf") ) if bad_words_ids is not None: # calculate a list of banned tokens according to bad words banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids) banned_tokens_indices_mask = [] for banned_tokens_slice in banned_tokens: banned_tokens_indices_mask.append( [True if token in banned_tokens_slice else False for token in range(vocab_size)] ) next_token_logits = set_tensor_by_indices_to_value( next_token_logits, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf") ) # set eos token prob to zero if min_length is not reached if eos_token_id is not None and cur_len < min_length: # create eos_token_id boolean mask is_token_logit_eos_token = tf.convert_to_tensor( [True if token is eos_token_id else False for token in range(vocab_size)], dtype=tf.bool ) eos_token_indices_mask = tf.broadcast_to(is_token_logit_eos_token, [batch_size, vocab_size]) next_token_logits = set_tensor_by_indices_to_value( next_token_logits, eos_token_indices_mask, -float("inf") ) if do_sample: # Temperature (higher temperature => more likely to sample low probability tokens) if temperature != 1.0: next_token_logits = next_token_logits / temperature # Top-p/top-k filtering next_token_logits = tf_top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) # Sample next_token = tf.squeeze( tf.random.categorical(next_token_logits, dtype=tf.int32, num_samples=1), axis=1 ) else: # Greedy decoding next_token = tf.math.argmax(next_token_logits, axis=-1, output_type=tf.int32) # update generations and finished sentences if eos_token_id is not None: # pad finished sentences if eos_token_id exist tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents) else: tokens_to_add = next_token # add token and increase length by one input_ids = tf.concat([input_ids, tf.expand_dims(tokens_to_add, -1)], 1) cur_len = cur_len + 1 if eos_token_id is not None: eos_in_sents = tokens_to_add == eos_token_id # if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length is_sents_unfinished_and_token_to_add_is_eos = tf.math.multiply( unfinished_sents, tf.cast(eos_in_sents, tf.int32) ) sent_lengths = ( sent_lengths * (1 - is_sents_unfinished_and_token_to_add_is_eos) + cur_len * is_sents_unfinished_and_token_to_add_is_eos ) # unfinished_sents is set to zero if eos in sentence unfinished_sents -= is_sents_unfinished_and_token_to_add_is_eos # stop when there is a </s> in each sentence, or if we exceed the maximul length if tf.math.reduce_max(unfinished_sents) == 0: break # extend attention_mask for new generated input if only decoder if self.config.is_encoder_decoder is False: attention_mask = tf.concat( [attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1 ) # if there are different sentences lengths in the batch, some batches have to be padded min_sent_length = tf.math.reduce_min(sent_lengths) max_sent_length = tf.math.reduce_max(sent_lengths) if min_sent_length != max_sent_length: assert pad_token_id is not None, "`Pad_token_id` has to be defined if batches have different lengths" # finished sents are filled with pad_token padding = tf.ones([batch_size, max_sent_length.numpy()], dtype=tf.int32) * pad_token_id # create length masks for tf.where operation broad_casted_sent_lengths = tf.broadcast_to( tf.expand_dims(sent_lengths, -1), [batch_size, max_sent_length] ) broad_casted_range = tf.transpose( tf.broadcast_to(tf.expand_dims(tf.range(max_sent_length), -1), [max_sent_length, batch_size]) ) decoded = tf.where(broad_casted_range < broad_casted_sent_lengths, input_ids, padding) else: decoded = input_ids return decoded def _generate_beam_search( self, input_ids, cur_len, max_length, min_length, do_sample, early_stopping, temperature, top_k, top_p, repetition_penalty, no_repeat_ngram_size, bad_words_ids, pad_token_id, eos_token_id, batch_size, num_return_sequences, length_penalty, num_beams, vocab_size, encoder_outputs, attention_mask, use_cache, ): """Generate sequences for each example with beam search.""" # generated hypotheses generated_hyps = [ BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping) for _ in range(batch_size) ] # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times if do_sample is False: beam_scores_begin = tf.zeros((batch_size, 1), dtype=tf.float32) beam_scores_end = tf.ones((batch_size, num_beams - 1), dtype=tf.float32) * (-1e9) beam_scores = tf.concat([beam_scores_begin, beam_scores_end], -1) else: beam_scores = tf.zeros((batch_size, num_beams), dtype=tf.float32) beam_scores = tf.reshape(beam_scores, (batch_size * num_beams,)) # cache compute states past = encoder_outputs # to stay similar to torch : past = (encoder_outputs, None) if encoder_outputs is not None else None # done sentences done = [False for _ in range(batch_size)] while cur_len < max_length: model_inputs = self.prepare_inputs_for_generation( input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache ) outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size) next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size) # if model has past, then set the past variable to speed up decoding if self._use_cache(outputs, use_cache): past = outputs[1] # repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858) if repetition_penalty != 1.0: next_token_logits_penalties = _create_next_token_logits_penalties( input_ids, next_token_logits, repetition_penalty ) next_token_logits = tf.math.multiply(next_token_logits, next_token_logits_penalties) # Temperature (higher temperature => more likely to sample low probability tokens) if temperature != 1.0: next_token_logits = next_token_logits / temperature # calculate log softmax score scores = tf.nn.log_softmax(next_token_logits, axis=-1) # (batch_size * num_beams, vocab_size) # set eos token prob to zero if min_length is not reached if eos_token_id is not None and cur_len < min_length: # create eos_token_id boolean mask num_batch_hypotheses = batch_size * num_beams is_token_logit_eos_token = tf.convert_to_tensor( [True if token is eos_token_id else False for token in range(vocab_size)], dtype=tf.bool ) eos_token_indices_mask = tf.broadcast_to(is_token_logit_eos_token, [num_batch_hypotheses, vocab_size]) scores = set_tensor_by_indices_to_value(scores, eos_token_indices_mask, -float("inf")) if no_repeat_ngram_size > 0: # calculate a list of banned tokens to prevent repetitively generating the same ngrams # from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345 num_batch_hypotheses = batch_size * num_beams banned_tokens = calc_banned_ngram_tokens( input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len ) # create banned_tokens boolean mask banned_tokens_indices_mask = [] for banned_tokens_slice in banned_tokens: banned_tokens_indices_mask.append( [True if token in banned_tokens_slice else False for token in range(vocab_size)] ) scores = set_tensor_by_indices_to_value( scores, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf") ) if bad_words_ids is not None: # calculate a list of banned tokens according to bad words banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids) banned_tokens_indices_mask = [] for banned_tokens_slice in banned_tokens: banned_tokens_indices_mask.append( [True if token in banned_tokens_slice else False for token in range(vocab_size)] ) scores = set_tensor_by_indices_to_value( scores, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf") ) assert shape_list(scores) == [batch_size * num_beams, vocab_size] if do_sample: _scores = scores + tf.broadcast_to( beam_scores[:, None], (batch_size * num_beams, vocab_size) ) # (batch_size * num_beams, vocab_size) # Top-p/top-k filtering _scores = tf_top_k_top_p_filtering( _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2 ) # (batch_size * num_beams, vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search) _scores = tf.reshape(_scores, (batch_size, num_beams * vocab_size)) next_tokens = sample_without_replacement( _scores, num_samples=2 * num_beams ) # (batch_size, 2 * num_beams) # Compute next scores next_scores = tf.gather(_scores, next_tokens, batch_dims=1) # (batch_size, 2 * num_beams) # sort the sampled vector to make sure that the first num_beams samples are the best next_scores_indices = tf.argsort(next_scores, direction="DESCENDING", axis=1) next_scores = tf.gather(next_scores, next_scores_indices, batch_dims=1) # (batch_size, num_beams * 2) next_tokens = tf.gather(next_tokens, next_scores_indices, batch_dims=1) # (batch_size, num_beams * 2) else: # Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product) next_scores = scores + tf.broadcast_to( beam_scores[:, None], (batch_size * num_beams, vocab_size) ) # (batch_size * num_beams, vocab_size) # re-organize to group the beam together (we are keeping top hypothesis accross beams) next_scores = tf.reshape( next_scores, (batch_size, num_beams * vocab_size) ) # (batch_size, num_beams * vocab_size) next_scores, next_tokens = tf.math.top_k(next_scores, k=2 * num_beams, sorted=True) assert shape_list(next_scores) == shape_list(next_tokens) == [batch_size, 2 * num_beams] # next batch beam content next_batch_beam = [] # for each sentence for batch_idx in range(batch_size): # if we are done with this sentence if done[batch_idx]: assert ( len(generated_hyps[batch_idx]) >= num_beams ), "Batch can only be done if at least {} beams have been generated".format(num_beams) assert ( eos_token_id is not None and pad_token_id is not None ), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined" next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch continue # next sentence beam content next_sent_beam = [] # next tokens for this sentence for beam_token_rank, (beam_token_id, beam_token_score) in enumerate( zip(next_tokens[batch_idx], next_scores[batch_idx]) ): # get beam and token IDs beam_id = beam_token_id // vocab_size token_id = beam_token_id % vocab_size effective_beam_id = batch_idx * num_beams + beam_id # add to generated hypotheses if end of sentence or last iteration if (eos_token_id is not None) and (token_id.numpy() == eos_token_id): # if beam_token does not belong to top num_beams tokens, it should not be added is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams if is_beam_token_worse_than_top_num_beams: continue generated_hyps[batch_idx].add( tf.identity(input_ids[effective_beam_id]), beam_token_score.numpy() ) else: # add next predicted token if it is not eos_token next_sent_beam.append((beam_token_score, token_id, effective_beam_id)) # the beam for next step is full if len(next_sent_beam) == num_beams: break # Check if we are done so that we can save a pad step if all(done) done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done( tf.reduce_max(next_scores[batch_idx]).numpy(), cur_len ) # update next beam content assert len(next_sent_beam) == num_beams, "Beam should always be full" next_batch_beam.extend(next_sent_beam) assert len(next_batch_beam) == num_beams * (batch_idx + 1) # stop when we are done with each sentence if all(done): break # sanity check / prepare next batch assert len(next_batch_beam) == batch_size * num_beams beam_scores = tf.convert_to_tensor([x[0] for x in next_batch_beam], dtype=tf.float32) beam_tokens = tf.convert_to_tensor([x[1] for x in next_batch_beam], dtype=tf.int32) beam_idx = tf.convert_to_tensor([x[2] for x in next_batch_beam], dtype=tf.int32) # re-order batch and update current length input_ids = tf.stack([tf.identity(input_ids[x, :]) for x in beam_idx]) input_ids = tf.concat([input_ids, tf.expand_dims(beam_tokens, 1)], axis=-1) cur_len = cur_len + 1 # re-order internal states if past is not None: past = self._reorder_cache(past, beam_idx) # extend attention_mask for new generated input if only decoder if self.config.is_encoder_decoder is False: attention_mask = tf.concat( [attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1 ) # finalize all open beam hypotheses and end to generated hypotheses for batch_idx in range(batch_size): # Add all open beam hypothesis to generated_hyps if done[batch_idx]: continue # test that beam scores match previously calculated scores if not eos and batch_idx not done if eos_token_id is not None and all( (token_id % vocab_size).numpy().item() != eos_token_id for token_id in next_tokens[batch_idx] ): assert tf.reduce_all( next_scores[batch_idx, :num_beams] == tf.reshape(beam_scores, (batch_size, num_beams))[batch_idx] ), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format( next_scores[:, :num_beams][batch_idx], tf.reshape(beam_scores, (batch_size, num_beams))[batch_idx] ) # need to add best num_beams hypotheses to generated hyps for beam_id in range(num_beams): effective_beam_id = batch_idx * num_beams + beam_id final_score = beam_scores[effective_beam_id].numpy().item() final_tokens = input_ids[effective_beam_id] generated_hyps[batch_idx].add(final_tokens, final_score) # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch output_batch_size = batch_size if do_sample else batch_size * num_return_sequences output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences # select the best hypotheses sent_lengths_list = [] best = [] # retrieve best hypotheses for i, hypotheses in enumerate(generated_hyps): sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0]) for j in range(output_num_return_sequences_per_batch): best_hyp = sorted_hyps.pop()[1] sent_lengths_list.append(len(best_hyp)) best.append(best_hyp) assert output_batch_size == len(best), "Output batch size {} must match output beam hypotheses {}".format( output_batch_size, len(best) ) sent_lengths = tf.convert_to_tensor(sent_lengths_list, dtype=tf.int32) # shorter batches are filled with pad_token if tf.reduce_min(sent_lengths).numpy() != tf.reduce_max(sent_lengths).numpy(): assert pad_token_id is not None, "`Pad_token_id` has to be defined" sent_max_len = min(tf.reduce_max(sent_lengths).numpy() + 1, max_length) decoded_list = [] # fill with hypothesis and eos_token_id if necessary for i, hypo in enumerate(best): assert sent_lengths[i] == shape_list(hypo)[0] # if sent_length is max_len do not pad if sent_lengths[i] == sent_max_len: decoded_slice = hypo else: # else pad to sent_max_len num_pad_tokens = sent_max_len - sent_lengths[i] padding = pad_token_id * tf.ones((num_pad_tokens,), dtype=tf.int32) decoded_slice = tf.concat([hypo, padding], axis=-1) # finish sentence with EOS token if sent_lengths[i] < max_length: decoded_slice = tf.where( tf.range(sent_max_len, dtype=tf.int32) == sent_lengths[i], eos_token_id * tf.ones((sent_max_len,), dtype=tf.int32), decoded_slice, ) # add to list decoded_list.append(decoded_slice) decoded = tf.stack(decoded_list) else: # none of the hypotheses have an eos_token assert (len(hypo) == max_length for hypo in best) decoded = tf.stack(best) return decoded @staticmethod def _reorder_cache(past, beam_idx): return tuple(tf.gather(layer_past, beam_idx, axis=1) for layer_past in past) def _create_next_token_logits_penalties(input_ids, logits, repetition_penalty): # create logit penalties for already seen input_ids token_penalties = np.ones(shape_list(logits)) prev_input_ids = [np.unique(input_id) for input_id in input_ids.numpy()] for i, prev_input_id in enumerate(prev_input_ids): logit_penalized = logits[i].numpy()[prev_input_id] logit_penalties = np.zeros(logit_penalized.shape) # if previous logit score is < 0 then multiply repetition penalty else divide logit_penalties[logit_penalized < 0] = repetition_penalty logit_penalties[logit_penalized > 0] = 1 / repetition_penalty np.put(token_penalties[i], prev_input_id, logit_penalties) return tf.convert_to_tensor(token_penalties, dtype=tf.float32) def calc_banned_ngram_tokens(prev_input_ids, num_hypos, no_repeat_ngram_size, cur_len): # Copied from fairseq for no_repeat_ngram in beam_search""" if cur_len + 1 < no_repeat_ngram_size: # return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet return [[] for _ in range(num_hypos)] generated_ngrams = [{} for _ in range(num_hypos)] for idx in range(num_hypos): gen_tokens = prev_input_ids[idx].numpy().tolist() generated_ngram = generated_ngrams[idx] for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]): prev_ngram_tuple = tuple(ngram[:-1]) generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] def _get_generated_ngrams(hypo_idx): # Before decoding the next token, prevent decoding of ngrams that have already appeared start_idx = cur_len + 1 - no_repeat_ngram_size ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist()) return generated_ngrams[hypo_idx].get(ngram_idx, []) banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)] return banned_tokens def calc_banned_bad_words_ids(prev_input_ids, bad_words_ids): banned_tokens = [] def _tokens_match(prev_tokens, tokens): if len(tokens) == 0: # if bad word tokens is just one token always ban it return True if len(tokens) > len(prev_tokens): # if bad word tokens are longer than prev tokens they can't be equal return False if prev_tokens[-len(tokens) :] == tokens: # if tokens match return True else: return False for prev_input_ids_slice in prev_input_ids: banned_tokens_slice = [] for banned_token_seq in bad_words_ids: assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format( bad_words_ids ) if _tokens_match(prev_input_ids_slice.numpy().tolist(), banned_token_seq[:-1]) is False: # if tokens do not match continue continue banned_tokens_slice.append(banned_token_seq[-1]) banned_tokens.append(banned_tokens_slice) return banned_tokens def tf_top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1): """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) if top_k > 0: keep only top k tokens with highest probability (top-k filtering). if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) Make sure we keep at least min_tokens_to_keep per batch example in the output From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ logits_shape = shape_list(logits) if top_k > 0: top_k = min(max(top_k, min_tokens_to_keep), logits_shape[-1]) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < tf.math.top_k(logits, k=top_k)[0][..., -1, None] logits = set_tensor_by_indices_to_value(logits, indices_to_remove, filter_value) if top_p < 1.0: sorted_indices = tf.argsort(logits, direction="DESCENDING") sorted_logits = tf.gather( logits, sorted_indices, axis=-1, batch_dims=1 ) # expects logits to be of dim (batch_size, vocab_size) cumulative_probs = tf.math.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1) # Remove tokens with cumulative probability above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs > top_p if min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) sorted_indices_to_remove = tf.concat( [ tf.zeros_like(sorted_indices_to_remove[:, :min_tokens_to_keep]), sorted_indices_to_remove[:, min_tokens_to_keep:], ], -1, ) # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove = tf.roll(sorted_indices_to_remove, 1, axis=-1) sorted_indices_to_remove = tf.concat( [tf.zeros_like(sorted_indices_to_remove[:, :1]), sorted_indices_to_remove[:, 1:]], -1, ) # scatter sorted tensors to original indexing indices_to_remove = scatter_values_on_batch_indices(sorted_indices_to_remove, sorted_indices) logits = set_tensor_by_indices_to_value(logits, indices_to_remove, filter_value) return logits def scatter_values_on_batch_indices(values, batch_indices): shape = shape_list(batch_indices) # broadcast batch dim to shape broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1]) # transform batch_indices to pair_indices pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) # scatter values to pair indices return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape) def set_tensor_by_indices_to_value(tensor, indices, value): # create value_tensor since tensor value assignment is not possible in TF value_tensor = tf.zeros_like(tensor) + value return tf.where(indices, value_tensor, tensor) def sample_without_replacement(logits, num_samples): """ categorical sampling witouth replacement is currently not implemented the gumbel-max trick will do for now see https://github.com/tensorflow/tensorflow/issues/9260 for more info """ z = -tf.math.log(tf.random.uniform(shape_list(logits), 0, 1)) _, indices = tf.nn.top_k(logits + z, num_samples) return indices def shape_list(x): """Deal with dynamic shape in tensorflow cleanly.""" static = x.shape.as_list() dynamic = tf.shape(x) return [dynamic[i] if s is None else s for i, s in enumerate(static)] class BeamHypotheses(object): def __init__(self, num_beams, max_length, length_penalty, early_stopping): """ Initialize n-best list of hypotheses. """ self.max_length = max_length - 1 # ignoring bos_token self.length_penalty = length_penalty self.early_stopping = early_stopping self.num_beams = num_beams self.beams = [] self.worst_score = 1e9 def __len__(self): """ Number of hypotheses in the list. """ return len(self.beams) def add(self, hyp, sum_logprobs): """ Add a new hypothesis to the list. """ score = sum_logprobs / len(hyp) ** self.length_penalty if len(self) < self.num_beams or score > self.worst_score: self.beams.append((score, hyp)) if len(self) > self.num_beams: sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)]) del self.beams[sorted_scores[0][1]] self.worst_score = sorted_scores[1][0] else: self.worst_score = min(score, self.worst_score) def is_done(self, best_sum_logprobs, cur_len): """ If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst one in the heap, then we are done with this sentence. """ if len(self) < self.num_beams: return False elif self.early_stopping: return True else: cur_score = best_sum_logprobs / cur_len ** self.length_penalty ret = self.worst_score >= cur_score return ret
54,719
48.836066
246
py
SLT-FAI
SLT-FAI-main/transformers/trainer_tf.py
"""Tensorflow trainer class.""" import datetime import math import os import warnings from typing import Callable, Dict, Optional, Tuple import numpy as np import tensorflow as tf from packaging.version import parse from tensorflow.python.distribute.values import PerReplica from .integrations import is_comet_available, is_wandb_available from .modeling_tf_utils import TFPreTrainedModel from .optimization_tf import GradientAccumulator, create_optimizer from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed from .training_args_tf import TFTrainingArguments from .utils import logging if is_wandb_available(): import wandb if is_comet_available(): import comet_ml logger = logging.get_logger(__name__) class TFTrainer: """ TFTrainer is a simple but feature-complete training and eval loop for TensorFlow, optimized for 🤗 Transformers. Args: model (:class:`~transformers.TFPreTrainedModel`): The model to train, evaluate or use for predictions. args (:class:`~transformers.TFTrainingArguments`): The arguments to tweak training. train_dataset (:class:`~tf.data.Dataset`, `optional`): The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)``. eval_dataset (:class:`~tf.data.Dataset`, `optional`): The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)``. compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`): The function that will be used to compute metrics at evaluation. Must take a :class:`~transformers.EvalPrediction` and return a dictionary string to metric values. tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`): Object to write to TensorBoard. optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`): A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of :class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of :class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of :class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else an instance of :class:`~transformers.WarmUp`. kwargs: Deprecated keyword arguments. """ def __init__( self, model: TFPreTrainedModel, args: TFTrainingArguments, train_dataset: Optional[tf.data.Dataset] = None, eval_dataset: Optional[tf.data.Dataset] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, tb_writer: Optional[tf.summary.SummaryWriter] = None, optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = ( None, None, ), **kwargs, ): assert parse(tf.__version__).release >= (2, 2, 0), ( "You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r " % tf.__version__ ) self.model = model self.args = args self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.compute_metrics = compute_metrics self.optimizer, self.lr_scheduler = optimizers self.gradient_accumulator = GradientAccumulator() self.global_step = 0 self.epoch_logging = 0 if "prediction_loss_only" in kwargs: warnings.warn( "Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.", FutureWarning, ) self.args.prediction_loss_only = kwargs.pop("prediction_loss_only") assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}." if tb_writer is not None: self.tb_writer = tb_writer else: self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir) if is_wandb_available(): self.setup_wandb() elif os.environ.get("WANDB_DISABLED") != "true": logger.info( "You are instantiating a Trainer but W&B is not installed. To use wandb logging, " "run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface." ) if is_comet_available(): self.setup_comet() elif os.environ.get("COMET_MODE") != "DISABLED": logger.info( "To use comet_ml logging, run `pip/conda install comet_ml` " "see https://www.comet.ml/docs/python-sdk/huggingface/" ) set_seed(self.args.seed) def get_train_tfdataset(self) -> tf.data.Dataset: """ Returns the training :class:`~tf.data.Dataset`. Subclass and override this method if you want to inject some custom behavior. """ if self.train_dataset is None: raise ValueError("Trainer: training requires a train_dataset.") self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy() if self.num_train_examples < 0: raise ValueError("The training dataset must have an asserted cardinality") ds = ( self.train_dataset.repeat() .shuffle(self.num_train_examples, seed=self.args.seed) .batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last) .prefetch(tf.data.experimental.AUTOTUNE) ) return self.args.strategy.experimental_distribute_dataset(ds) def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset: """ Returns the evaluation :class:`~tf.data.Dataset`. Args: eval_dataset (:class:`~tf.data.Dataset`, `optional`): If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)``. Subclass and override this method if you want to inject some custom behavior. """ if eval_dataset is None and self.eval_dataset is None: raise ValueError("Trainer: evaluation requires an eval_dataset.") eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset num_examples = tf.data.experimental.cardinality(eval_dataset).numpy() if num_examples < 0: raise ValueError("The training dataset must have an asserted cardinality") approx = math.floor if self.args.dataloader_drop_last else math.ceil steps = approx(num_examples / self.args.eval_batch_size) ds = ( eval_dataset.repeat() .batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last) .prefetch(tf.data.experimental.AUTOTUNE) ) return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset: """ Returns a test :class:`~tf.data.Dataset`. Args: test_dataset (:class:`~tf.data.Dataset`): The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)``. Subclass and override this method if you want to inject some custom behavior. """ num_examples = tf.data.experimental.cardinality(test_dataset).numpy() if num_examples < 0: raise ValueError("The training dataset must have an asserted cardinality") approx = math.floor if self.args.dataloader_drop_last else math.ceil steps = approx(num_examples / self.args.eval_batch_size) ds = ( test_dataset.repeat() .batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last) .prefetch(tf.data.experimental.AUTOTUNE) ) return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples def create_optimizer_and_scheduler(self, num_training_steps: int): """ Setup the optimizer and the learning rate scheduler. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the TFTrainer's init through :obj:`optimizers`, or subclass and override this method. """ if not self.optimizer and not self.lr_scheduler: self.optimizer, self.lr_scheduler = create_optimizer( self.args.learning_rate, num_training_steps, self.args.warmup_steps, adam_beta1=self.args.adam_beta1, adam_beta2=self.args.adam_beta2, adam_epsilon=self.args.adam_epsilon, weight_decay_rate=self.args.weight_decay, power=self.args.poly_power, ) def setup_wandb(self): """ Setup the optional Weights & Biases (`wandb`) integration. One can subclass and override this method to customize the setup if needed. Find more information `here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables: Environment: WANDB_PROJECT: (Optional): str - "huggingface" by default, set this to a custom string to store results in a different project WANDB_DISABLED: (Optional): boolean - defaults to false, set to "true" to disable wandb entirely """ if hasattr(self, "_setup_wandb"): warnings.warn( "The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.", FutureWarning, ) return self._setup_wandb() logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"') combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()} wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name) def setup_comet(self): """ Setup the optional Comet.ml integration. Environment: COMET_MODE: (Optional): str - "OFFLINE", "ONLINE", or "DISABLED" COMET_PROJECT_NAME: (Optional): str - Comet.ml project name for experiments COMET_OFFLINE_DIRECTORY: (Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE" For a number of configurable items in the environment, see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__ """ comet_mode = os.getenv("COMET_MODE", "ONLINE").upper() args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")} experiment = None if comet_mode == "ONLINE": experiment = comet_ml.Experiment(**args) logger.info("Automatic Comet.ml online logging enabled") elif comet_mode == "OFFLINE": args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./") experiment = comet_ml.OfflineExperiment(**args) logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished") if experiment is not None: experiment._set_model_graph(self.model, framework="transformers") experiment._log_parameters(self.args, prefix="args/", framework="transformers") experiment._log_parameters(self.model.config, prefix="config/", framework="transformers") def prediction_loop( self, dataset: tf.data.Dataset, steps: int, num_examples: int, description: str, prediction_loss_only: Optional[bool] = None, ) -> PredictionOutput: """ Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and :func:`~transformers.TFTrainer.predict`. Works both with or without labels. """ if hasattr(self, "_prediction_loop"): warnings.warn( "The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.", FutureWarning, ) return self._prediction_loop( dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only ) prediction_loss_only = ( prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only ) logger.info("***** Running %s *****", description) logger.info(" Num examples = %d", num_examples) logger.info(" Batch size = %d", self.args.eval_batch_size) label_ids: np.ndarray = None preds: np.ndarray = None self.eval_loss = tf.keras.metrics.Sum() # Reset the past mems state at the beginning of the evaluation if necessary. if self.args.past_index >= 0: self._past = None for step, batch in enumerate(dataset): logits = self.distributed_prediction_steps(batch) _, labels = batch if not prediction_loss_only: if isinstance(logits, tuple): logits = logits[0] if isinstance(labels, tuple): labels = labels[0] if self.args.n_replicas > 1: for val in logits.values: if preds is None: preds = val.numpy() else: preds = np.append(preds, val.numpy(), axis=0) for val in labels.values: if label_ids is None: label_ids = val.numpy() else: label_ids = np.append(label_ids, val.numpy(), axis=0) else: if preds is None: preds = logits.numpy() else: preds = np.append(preds, logits.numpy(), axis=0) if label_ids is None: label_ids = labels.numpy() else: label_ids = np.append(label_ids, labels.numpy(), axis=0) if step == steps: break if self.compute_metrics is not None and preds is not None and label_ids is not None: metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) else: metrics = {} metrics["eval_loss"] = self.eval_loss.result().numpy() / steps for key in list(metrics.keys()): if not key.startswith("eval_"): metrics[f"eval_{key}"] = metrics.pop(key) if self.args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) def log(self, logs: Dict[str, float]) -> None: """ Log :obj:`logs` on the various objects watching training. Subclass and override this method to inject custom behavior. Args: logs (:obj:`Dict[str, float]`): The values to log. """ if hasattr(self, "_log"): warnings.warn( "The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.", FutureWarning, ) return self._log(logs) logs["epoch"] = self.epoch_logging if self.tb_writer: with self.tb_writer.as_default(): for k, v in logs.items(): tf.summary.scalar(k, v, step=self.global_step) self.tb_writer.flush() if is_wandb_available(): wandb.log(logs, step=self.global_step) if is_comet_available(): experiment = comet_ml.config.get_global_experiment() if experiment is not None: experiment._log_metrics( logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers" ) output = {**logs, **{"step": self.global_step}} logger.info(output) def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]: """ Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init :obj:`compute_metrics` argument). Args: eval_dataset (:class:`~tf.data.Dataset`, `optional`): Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)``. Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. """ eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset) output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation") logs = {**output.metrics} logs["epoch"] = self.epoch_logging self.log(logs) return output.metrics def prediction_step( self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor ) -> tf.Tensor: """ Compute the prediction on features and update the loss with labels. Subclass and override to inject some custom behavior. """ per_example_loss, logits = self.run_model(features, labels, False) scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype) self.eval_loss.update_state(scaled_loss) return logits @tf.function def distributed_prediction_steps(self, batch): nb_instances_in_batch = self._compute_nb_instances(batch) inputs = self._get_step_inputs(batch, nb_instances_in_batch) logits = self.args.strategy.run(self.prediction_step, inputs) return logits def train(self) -> None: """ Train method to train the model. """ train_ds = self.get_train_tfdataset() if self.args.debug: tf.summary.trace_on(graph=True, profiler=True) self.gradient_accumulator.reset() num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size # In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because # the dataset is repeated before being batched. # It has the effect only when TPU is used which requires explicit tensor shape in order to make # the gradient accumulation implementation work. approx = math.floor if self.args.dataloader_drop_last else math.ceil num_update_steps_per_epoch = approx(num_update_steps_per_epoch) # At least one update for each epoch. num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) self.steps_per_epoch = num_update_steps_per_epoch if self.args.max_steps > 0: t_total = self.args.max_steps epochs = (self.args.max_steps // self.steps_per_epoch) + int( self.args.max_steps % self.steps_per_epoch > 0 ) else: t_total = self.steps_per_epoch * self.args.num_train_epochs epochs = self.args.num_train_epochs # Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always. epochs = float(epochs) with self.args.strategy.scope(): self.create_optimizer_and_scheduler(num_training_steps=t_total) folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR) ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model) self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit) iterations = self.optimizer.iterations epochs_trained = 0 steps_trained_in_current_epoch = 0 if self.model.ckpt_manager.latest_checkpoint: logger.info( "Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint ) ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial() self.global_step = iterations.numpy() epochs_trained = self.global_step // self.steps_per_epoch steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", self.global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) tf.summary.experimental.set_step(self.global_step) with self.tb_writer.as_default(): tf.summary.text("args", self.args.to_json_string()) self.tb_writer.flush() logger.info("***** Running training *****") logger.info(" Num examples = %d", self.num_train_examples) # TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ? logger.info(" Num Epochs = %d", epochs) logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size ) logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps) logger.info(" Steps per epoch = %d", self.steps_per_epoch) logger.info(" Total optimization steps = %d", t_total) self.train_loss = tf.keras.metrics.Sum() start_time = datetime.datetime.now() for epoch_iter in range(epochs_trained, int(epochs)): # Reset the past mems state at the beginning of each epoch if necessary. if self.args.past_index >= 0: self._past = None for step, batch in enumerate(train_ds): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue self.distributed_training_steps(batch) self.global_step = iterations.numpy() self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch training_loss = self.train_loss.result() / (step + 1) if self.args.debug: logs = {} logs["loss"] = training_loss.numpy() logs["epoch"] = self.epoch_logging self.log(logs) if self.global_step == 1 and self.args.debug: with self.tb_writer.as_default(): tf.summary.trace_export( name="training", step=self.global_step, profiler_outdir=self.args.logging_dir ) if ( self.args.eval_steps > 0 and self.args.evaluate_during_training and self.global_step % self.args.eval_steps == 0 ): self.evaluate() if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or ( self.global_step == 1 and self.args.logging_first_step ): logs = {} logs["loss"] = training_loss.numpy() logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy() logs["epoch"] = self.epoch_logging self.log(logs) if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0: ckpt_save_path = self.model.ckpt_manager.save() logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path)) if self.args.max_steps > 0 and self.global_step >= t_total: break if self.global_step % self.steps_per_epoch == 0: break self.train_loss.reset_states() if self.args.max_steps > 0 and self.global_step >= self.args.max_steps: break end_time = datetime.datetime.now() logger.info("Training took: {}".format(str(end_time - start_time))) if self.args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") def training_step(self, features, labels, nb_instances_in_global_batch): """ Perform a training step on features and labels. Subclass and override to inject some custom behavior. """ per_example_loss, _ = self.run_model(features, labels, True) scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype) gradients = tf.gradients(scaled_loss, self.model.trainable_variables) gradients = [ g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables) ] if self.args.gradient_accumulation_steps > 1: self.gradient_accumulator(gradients) self.train_loss.update_state(scaled_loss) if self.args.gradient_accumulation_steps == 1: return gradients def apply_gradients(self, features, labels, nb_instances_in_global_batch): if self.args.gradient_accumulation_steps == 1: gradients = self.training_step(features, labels, nb_instances_in_global_batch) self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables))) else: for _ in tf.range(self.args.gradient_accumulation_steps): reduced_features = { k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items() } reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas] self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch) features = { k: tf.concat( [ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]], axis=0, ) for k, ft in features.items() } labels = tf.concat( [labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0 ) gradients = self.gradient_accumulator.gradients gradients = [ (tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients ] self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables))) self.gradient_accumulator.reset() @tf.function def distributed_training_steps(self, batch): with self.args.strategy.scope(): nb_instances_in_batch = self._compute_nb_instances(batch) inputs = self._get_step_inputs(batch, nb_instances_in_batch) self.args.strategy.run(self.apply_gradients, inputs) @staticmethod def _compute_nb_instances(batch): labels = batch[-1] if isinstance(labels, PerReplica): labels = tf.concat(labels.values, axis=0) nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32)) return nb_instances @staticmethod def _get_step_inputs(batch, nb_instances): features, labels = batch if isinstance(labels, PerReplica): # need to make a `PerReplica` objects for ``nb_instances`` nb_instances = PerReplica([nb_instances] * len(labels.values)) step_inputs = (features, labels, nb_instances) return step_inputs def run_model(self, features, labels, training): """ Computes the loss of the given features and labels pair. Subclass and override this method if you want to inject some custom behavior. Args: features (:obj:`tf.Tensor`): A batch of input features. labels (:obj:`tf.Tensor`): A batch of labels. training (:obj:`bool`): Whether or not to run the model in training mode. Returns: A tuple of two :obj:`tf.Tensor`: The loss and logits. """ if hasattr(self, "_run_model"): warnings.warn( "The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.", FutureWarning, ) return self._run_model(features, labels, training) if self.args.past_index >= 0 and getattr(self, "_past", None) is not None: features["mems"] = self._past if isinstance(labels, (dict)): outputs = self.model(features, training=training, **labels)[:2] else: outputs = self.model(features, labels=labels, training=training)[:2] loss, logits = outputs[:2] if self.args.past_index >= 0: self._past = outputs[self.args.past_index] return loss, logits def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput: """ Run prediction and returns predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in :obj:`evaluate()`. Args: test_dataset (:class:`~tf.data.Dataset`): Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)``. Returns: `NamedTuple`: predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`. label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some). metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset contained labels). """ test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset) return self.prediction_loop(test_ds, steps, num_examples, description="Prediction") def save_model(self, output_dir: Optional[str] = None): """ Will save the model, so you can reload it using :obj:`from_pretrained()`. """ output_dir = output_dir if output_dir is not None else self.args.output_dir logger.info("Saving model in {}".format(output_dir)) if not isinstance(self.model, TFPreTrainedModel): raise ValueError("Trainer.model appears to not be a PreTrainedModel") self.model.save_pretrained(output_dir)
34,714
42.887484
169
py
SLT-FAI
SLT-FAI-main/transformers/configuration_longformer.py
# coding=utf-8 # Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Longformer configuration """ from typing import List, Union from .configuration_roberta import RobertaConfig from .utils import logging logger = logging.get_logger(__name__) LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { "allenai/longformer-base-4096": "https://s3.amazonaws.com/models.huggingface.co/bert/allenai/longformer-base-4096/config.json", "allenai/longformer-large-4096": "https://s3.amazonaws.com/models.huggingface.co/bert/allenai/longformer-large-4096/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": "https://s3.amazonaws.com/models.huggingface.co/bert/allenai/longformer-large-4096-finetuned-triviaqa/config.json", "allenai/longformer-base-4096-extra.pos.embd.only": "https://s3.amazonaws.com/models.huggingface.co/bert/allenai/longformer-base-4096-extra.pos.embd.only/config.json", "allenai/longformer-large-4096-extra.pos.embd.only": "https://s3.amazonaws.com/models.huggingface.co/bert/allenai/longformer-large-4096-extra.pos.embd.only/config.json", } class LongformerConfig(RobertaConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.LongformerModel` or a :class:`~transformers.TFLongformerModel`. It is used to instantiate a Longformer model according to the specified arguments, defining the model architecture. This is the configuration class to store the configuration of a :class:`~transformers.LongformerModel`. It is used to instantiate an Longformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RoBERTa `roberta-base <https://huggingface.co/roberta-base>`__ architecture with a sequence length 4,096. The :class:`~transformers.LongformerConfig` class directly inherits :class:`~transformers.RobertaConfig`. It reuses the same defaults. Please check the parent class for more information. Args: attention_window (:obj:`int` or :obj:`List[int]`, `optional`, defaults to 512): Size of an attention window around each token. If an :obj:`int`, use the same size for all layers. To specify a different window size for each layer, use a :obj:`List[int]` where ``len(attention_window) == num_hidden_layers``. Example:: >>> from transformers import LongformerConfig, LongformerModel >>> # Initializing a Longformer configuration >>> configuration = LongformerConfig() >>> # Initializing a model from the configuration >>> model = LongformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "longformer" def __init__(self, attention_window: Union[List[int], int] = 512, sep_token_id: int = 2, **kwargs): super().__init__(sep_token_id=sep_token_id, **kwargs) self.attention_window = attention_window
3,626
49.375
173
py
SLT-FAI
SLT-FAI-main/transformers/modeling_auto.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Model class. """ import warnings from collections import OrderedDict from .configuration_auto import ( AlbertConfig, AutoConfig, BartConfig, BertConfig, BertGenerationConfig, BlenderbotConfig, CamembertConfig, CTRLConfig, DebertaConfig, DistilBertConfig, DPRConfig, ElectraConfig, EncoderDecoderConfig, FlaubertConfig, FSMTConfig, FunnelConfig, GPT2Config, LayoutLMConfig, LongformerConfig, LxmertConfig, MBartConfig, MobileBertConfig, OpenAIGPTConfig, PegasusConfig, ProphetNetConfig, ReformerConfig, RetriBertConfig, RobertaConfig, SqueezeBertConfig, T5Config, TransfoXLConfig, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLNetConfig, replace_list_option_in_docstrings, ) from .configuration_marian import MarianConfig from .configuration_utils import PretrainedConfig from .file_utils import add_start_docstrings from .modeling_albert import ( AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from .modeling_bart import ( BartForConditionalGeneration, BartForQuestionAnswering, BartForSequenceClassification, BartModel, ) from .modeling_bert import ( BertForMaskedLM, BertForMultipleChoice, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLMHeadModel, BertModel, ) from .modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder from .modeling_blenderbot import BlenderbotForConditionalGeneration from .modeling_camembert import ( CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, CamembertForQuestionAnswering, CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, ) from .modeling_ctrl import CTRLLMHeadModel, CTRLModel from .modeling_deberta import DebertaForSequenceClassification, DebertaModel from .modeling_distilbert import ( DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) from .modeling_dpr import DPRQuestionEncoder from .modeling_electra import ( ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ) from .modeling_encoder_decoder import EncoderDecoderModel from .modeling_flaubert import ( FlaubertForMultipleChoice, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel from .modeling_funnel import ( FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, ) from .modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model from .modeling_layoutlm import LayoutLMForMaskedLM, LayoutLMForTokenClassification, LayoutLMModel from .modeling_longformer import ( LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, ) from .modeling_lxmert import LxmertForPreTraining, LxmertModel from .modeling_marian import MarianMTModel from .modeling_mbart import MBartForConditionalGeneration from .modeling_mobilebert import ( MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) from .modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel from .modeling_pegasus import PegasusForConditionalGeneration from .modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel from .modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function RagModel, RagSequenceForGeneration, RagTokenForGeneration, ) from .modeling_reformer import ( ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerModel, ReformerModelWithLMHead, ) from .modeling_retribert import RetriBertModel from .modeling_roberta import ( RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, ) from .modeling_squeezebert import ( SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) from .modeling_t5 import T5ForConditionalGeneration, T5Model from .modeling_transfo_xl import TransfoXLLMHeadModel, TransfoXLModel from .modeling_xlm import ( XLMForMultipleChoice, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from .modeling_xlm_prophetnet import ( XLMProphetNetForCausalLM, XLMProphetNetForConditionalGeneration, XLMProphetNetModel, ) from .modeling_xlm_roberta import ( XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, ) from .modeling_xlnet import ( XLNetForMultipleChoice, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, ) from .utils import logging logger = logging.get_logger(__name__) MODEL_MAPPING = OrderedDict( [ (RetriBertConfig, RetriBertModel), (T5Config, T5Model), (DistilBertConfig, DistilBertModel), (AlbertConfig, AlbertModel), (CamembertConfig, CamembertModel), (XLMRobertaConfig, XLMRobertaModel), (BartConfig, BartModel), (LongformerConfig, LongformerModel), (RobertaConfig, RobertaModel), (LayoutLMConfig, LayoutLMModel), (SqueezeBertConfig, SqueezeBertModel), (BertConfig, BertModel), (OpenAIGPTConfig, OpenAIGPTModel), (GPT2Config, GPT2Model), (MobileBertConfig, MobileBertModel), (TransfoXLConfig, TransfoXLModel), (XLNetConfig, XLNetModel), (FlaubertConfig, FlaubertModel), (FSMTConfig, FSMTModel), (XLMConfig, XLMModel), (CTRLConfig, CTRLModel), (ElectraConfig, ElectraModel), (ReformerConfig, ReformerModel), (FunnelConfig, FunnelModel), (LxmertConfig, LxmertModel), (BertGenerationConfig, BertGenerationEncoder), (DebertaConfig, DebertaModel), (DPRConfig, DPRQuestionEncoder), (XLMProphetNetConfig, XLMProphetNetModel), (ProphetNetConfig, ProphetNetModel), ] ) MODEL_FOR_PRETRAINING_MAPPING = OrderedDict( [ (LayoutLMConfig, LayoutLMForMaskedLM), (RetriBertConfig, RetriBertModel), (T5Config, T5ForConditionalGeneration), (DistilBertConfig, DistilBertForMaskedLM), (AlbertConfig, AlbertForPreTraining), (CamembertConfig, CamembertForMaskedLM), (XLMRobertaConfig, XLMRobertaForMaskedLM), (BartConfig, BartForConditionalGeneration), (FSMTConfig, FSMTForConditionalGeneration), (LongformerConfig, LongformerForMaskedLM), (RobertaConfig, RobertaForMaskedLM), (SqueezeBertConfig, SqueezeBertForMaskedLM), (BertConfig, BertForPreTraining), (OpenAIGPTConfig, OpenAIGPTLMHeadModel), (GPT2Config, GPT2LMHeadModel), (MobileBertConfig, MobileBertForPreTraining), (TransfoXLConfig, TransfoXLLMHeadModel), (XLNetConfig, XLNetLMHeadModel), (FlaubertConfig, FlaubertWithLMHeadModel), (XLMConfig, XLMWithLMHeadModel), (CTRLConfig, CTRLLMHeadModel), (ElectraConfig, ElectraForPreTraining), (LxmertConfig, LxmertForPreTraining), ] ) MODEL_WITH_LM_HEAD_MAPPING = OrderedDict( [ (LayoutLMConfig, LayoutLMForMaskedLM), (T5Config, T5ForConditionalGeneration), (DistilBertConfig, DistilBertForMaskedLM), (AlbertConfig, AlbertForMaskedLM), (CamembertConfig, CamembertForMaskedLM), (XLMRobertaConfig, XLMRobertaForMaskedLM), (MarianConfig, MarianMTModel), (FSMTConfig, FSMTForConditionalGeneration), (BartConfig, BartForConditionalGeneration), (LongformerConfig, LongformerForMaskedLM), (RobertaConfig, RobertaForMaskedLM), (SqueezeBertConfig, SqueezeBertForMaskedLM), (BertConfig, BertForMaskedLM), (OpenAIGPTConfig, OpenAIGPTLMHeadModel), (GPT2Config, GPT2LMHeadModel), (MobileBertConfig, MobileBertForMaskedLM), (TransfoXLConfig, TransfoXLLMHeadModel), (XLNetConfig, XLNetLMHeadModel), (FlaubertConfig, FlaubertWithLMHeadModel), (XLMConfig, XLMWithLMHeadModel), (CTRLConfig, CTRLLMHeadModel), (ElectraConfig, ElectraForMaskedLM), (EncoderDecoderConfig, EncoderDecoderModel), (ReformerConfig, ReformerModelWithLMHead), (FunnelConfig, FunnelForMaskedLM), ] ) MODEL_FOR_CAUSAL_LM_MAPPING = OrderedDict( [ (CamembertConfig, CamembertForCausalLM), (XLMRobertaConfig, XLMRobertaForCausalLM), (RobertaConfig, RobertaForCausalLM), (BertConfig, BertLMHeadModel), (OpenAIGPTConfig, OpenAIGPTLMHeadModel), (GPT2Config, GPT2LMHeadModel), (TransfoXLConfig, TransfoXLLMHeadModel), (XLNetConfig, XLNetLMHeadModel), ( XLMConfig, XLMWithLMHeadModel, ), # XLM can be MLM and CLM => model should be split similar to BERT; leave here for now (CTRLConfig, CTRLLMHeadModel), (ReformerConfig, ReformerModelWithLMHead), (BertGenerationConfig, BertGenerationDecoder), (ProphetNetConfig, XLMProphetNetForCausalLM), (ProphetNetConfig, ProphetNetForCausalLM), ] ) MODEL_FOR_MASKED_LM_MAPPING = OrderedDict( [ (LayoutLMConfig, LayoutLMForMaskedLM), (DistilBertConfig, DistilBertForMaskedLM), (AlbertConfig, AlbertForMaskedLM), (BartConfig, BartForConditionalGeneration), (CamembertConfig, CamembertForMaskedLM), (XLMRobertaConfig, XLMRobertaForMaskedLM), (LongformerConfig, LongformerForMaskedLM), (RobertaConfig, RobertaForMaskedLM), (SqueezeBertConfig, SqueezeBertForMaskedLM), (BertConfig, BertForMaskedLM), (MobileBertConfig, MobileBertForMaskedLM), (FlaubertConfig, FlaubertWithLMHeadModel), (XLMConfig, XLMWithLMHeadModel), (ElectraConfig, ElectraForMaskedLM), (ReformerConfig, ReformerForMaskedLM), (FunnelConfig, FunnelForMaskedLM), ] ) MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = OrderedDict( [ (T5Config, T5ForConditionalGeneration), (PegasusConfig, PegasusForConditionalGeneration), (MarianConfig, MarianMTModel), (MBartConfig, MBartForConditionalGeneration), (BlenderbotConfig, BlenderbotForConditionalGeneration), (BartConfig, BartForConditionalGeneration), (FSMTConfig, FSMTForConditionalGeneration), (EncoderDecoderConfig, EncoderDecoderModel), (XLMProphetNetConfig, XLMProphetNetForConditionalGeneration), (ProphetNetConfig, ProphetNetForConditionalGeneration), ] ) MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = OrderedDict( [ (DistilBertConfig, DistilBertForSequenceClassification), (AlbertConfig, AlbertForSequenceClassification), (CamembertConfig, CamembertForSequenceClassification), (XLMRobertaConfig, XLMRobertaForSequenceClassification), (BartConfig, BartForSequenceClassification), (LongformerConfig, LongformerForSequenceClassification), (RobertaConfig, RobertaForSequenceClassification), (SqueezeBertConfig, SqueezeBertForSequenceClassification), (BertConfig, BertForSequenceClassification), (XLNetConfig, XLNetForSequenceClassification), (MobileBertConfig, MobileBertForSequenceClassification), (FlaubertConfig, FlaubertForSequenceClassification), (XLMConfig, XLMForSequenceClassification), (ElectraConfig, ElectraForSequenceClassification), (FunnelConfig, FunnelForSequenceClassification), (DebertaConfig, DebertaForSequenceClassification), (GPT2Config, GPT2ForSequenceClassification), (OpenAIGPTConfig, OpenAIGPTForSequenceClassification), ] ) MODEL_FOR_QUESTION_ANSWERING_MAPPING = OrderedDict( [ (DistilBertConfig, DistilBertForQuestionAnswering), (AlbertConfig, AlbertForQuestionAnswering), (CamembertConfig, CamembertForQuestionAnswering), (BartConfig, BartForQuestionAnswering), (LongformerConfig, LongformerForQuestionAnswering), (XLMRobertaConfig, XLMRobertaForQuestionAnswering), (RobertaConfig, RobertaForQuestionAnswering), (SqueezeBertConfig, SqueezeBertForQuestionAnswering), (BertConfig, BertForQuestionAnswering), (XLNetConfig, XLNetForQuestionAnsweringSimple), (FlaubertConfig, FlaubertForQuestionAnsweringSimple), (MobileBertConfig, MobileBertForQuestionAnswering), (XLMConfig, XLMForQuestionAnsweringSimple), (ElectraConfig, ElectraForQuestionAnswering), (ReformerConfig, ReformerForQuestionAnswering), (FunnelConfig, FunnelForQuestionAnswering), ] ) MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = OrderedDict( [ (LayoutLMConfig, LayoutLMForTokenClassification), (DistilBertConfig, DistilBertForTokenClassification), (CamembertConfig, CamembertForTokenClassification), (FlaubertConfig, FlaubertForTokenClassification), (XLMConfig, XLMForTokenClassification), (XLMRobertaConfig, XLMRobertaForTokenClassification), (LongformerConfig, LongformerForTokenClassification), (RobertaConfig, RobertaForTokenClassification), (SqueezeBertConfig, SqueezeBertForTokenClassification), (BertConfig, BertForTokenClassification), (MobileBertConfig, MobileBertForTokenClassification), (XLNetConfig, XLNetForTokenClassification), (AlbertConfig, AlbertForTokenClassification), (ElectraConfig, ElectraForTokenClassification), (FlaubertConfig, FlaubertForTokenClassification), (FunnelConfig, FunnelForTokenClassification), ] ) MODEL_FOR_MULTIPLE_CHOICE_MAPPING = OrderedDict( [ (CamembertConfig, CamembertForMultipleChoice), (ElectraConfig, ElectraForMultipleChoice), (XLMRobertaConfig, XLMRobertaForMultipleChoice), (LongformerConfig, LongformerForMultipleChoice), (RobertaConfig, RobertaForMultipleChoice), (SqueezeBertConfig, SqueezeBertForMultipleChoice), (BertConfig, BertForMultipleChoice), (DistilBertConfig, DistilBertForMultipleChoice), (MobileBertConfig, MobileBertForMultipleChoice), (XLNetConfig, XLNetForMultipleChoice), (AlbertConfig, AlbertForMultipleChoice), (XLMConfig, XLMForMultipleChoice), (FlaubertConfig, FlaubertForMultipleChoice), (FunnelConfig, FunnelForMultipleChoice), ] ) AUTO_MODEL_PRETRAINED_DOCSTRING = r""" The model class to instantiate is selected based on the :obj:`model_type` property of the config object (either passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's missing, by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`: List options The model is set in evaluation mode by default using ``model.eval()`` (so for instance, dropout modules are deactivated). To train the model, you should first set it back in training mode with ``model.train()`` Args: pretrained_model_name_or_path: Can be either: - A string with the `shortcut name` of a pretrained model to load from cache or download, e.g., ``bert-base-uncased``. - A string with the `identifier name` of a pretrained model that was user-uploaded to our S3, e.g., ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args (additional positional arguments, `optional`): Will be passed along to the underlying model ``__init__()`` method. config (:class:`~transformers.PretrainedConfig`, `optional`): Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the `shortcut name` string of a pretrained model). - The model was saved using :meth:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - The model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict (`Dict[str, torch.Tensor]`, `optional`): A state dictionary to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir (:obj:`str`, `optional`): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`): Load the model weights from a TensorFlow checkpoint save file (see docstring of ``pretrained_model_name_or_path`` argument). force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (:obj:`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to only look at local files (e.g., not try doanloading the model). use_cdn(:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to use Cloudfront (a Content Delivery Network, or CDN) when searching for the model on our S3 (faster). Should be set to :obj:`False` for checkpoints larger than 20GB. kwargs (additional keyword arguments, `optional`): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., :obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. """ class AutoModel: r""" This is a generic model class that will be instantiated as one of the base model classes of the library when created with the :meth:`~transformers.AutoModel.from_pretrained` class method or the :meth:`~transformers.AutoModel.from_config` class methods. This class cannot be instantiated directly using ``__init__()`` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModel is designed to be instantiated " "using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModel.from_config(config)` methods." ) @classmethod @replace_list_option_in_docstrings(MODEL_MAPPING, use_model_types=False) def from_config(cls, config): r""" Instantiates one of the base model classes of the library from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :meth:`~transformers.AutoModel.from_pretrained` to load the model weights. Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: List options Examples:: >>> from transformers import AutoConfig, AutoModel >>> # Download configuration from S3 and cache. >>> config = AutoConfig.from_pretrained('bert-base-uncased') >>> model = AutoModel.from_config(config) """ if type(config) in MODEL_MAPPING.keys(): return MODEL_MAPPING[type(config)](config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_MAPPING.keys()) ) ) @classmethod @replace_list_option_in_docstrings(MODEL_MAPPING) @add_start_docstrings( "Instantiate one of the base model classes of the library from a pretrained model.", AUTO_MODEL_PRETRAINED_DOCSTRING, ) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Examples:: >>> from transformers import AutoConfig, AutoModel >>> # Download model and configuration from S3 and cache. >>> model = AutoModel.from_pretrained('bert-base-uncased') >>> # Update configuration during loading >>> model = AutoModel.from_pretrained('bert-base-uncased', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs ) if type(config) in MODEL_MAPPING.keys(): return MODEL_MAPPING[type(config)].from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **kwargs ) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_MAPPING.keys()) ) ) class AutoModelForPreTraining: r""" This is a generic model class that will be instantiated as one of the model classes of the library---with the architecture used for pretraining this model---when created with the when created with the :meth:`~transformers.AutoModelForPreTraining.from_pretrained` class method or the :meth:`~transformers.AutoModelForPreTraining.from_config` class method. This class cannot be instantiated directly using ``__init__()`` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForPreTraining is designed to be instantiated " "using the `AutoModelForPreTraining.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForPreTraining.from_config(config)` methods." ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_PRETRAINING_MAPPING, use_model_types=False) def from_config(cls, config): r""" Instantiates one of the model classes of the library---with the architecture used for pretraining this model---from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :meth:`~transformers.AutoModelForPreTraining.from_pretrained` to load the model weights. Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: List options Examples:: >>> from transformers import AutoConfig, AutoModelForPreTraining >>> # Download configuration from S3 and cache. >>> config = AutoConfig.from_pretrained('bert-base-uncased') >>> model = AutoModelForPreTraining.from_config(config) """ if type(config) in MODEL_FOR_PRETRAINING_MAPPING.keys(): return MODEL_FOR_PRETRAINING_MAPPING[type(config)](config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys()) ) ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_PRETRAINING_MAPPING) @add_start_docstrings( "Instantiate one of the model classes of the library---with the architecture used for pretraining this ", "model---from a pretrained model.", AUTO_MODEL_PRETRAINED_DOCSTRING, ) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Examples:: >>> from transformers import AutoConfig, AutoModelForPreTraining >>> # Download model and configuration from S3 and cache. >>> model = AutoModelForPreTraining.from_pretrained('bert-base-uncased') >>> # Update configuration during loading >>> model = AutoModelForPreTraining.from_pretrained('bert-base-uncased', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> model = AutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs ) if type(config) in MODEL_FOR_PRETRAINING_MAPPING.keys(): return MODEL_FOR_PRETRAINING_MAPPING[type(config)].from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **kwargs ) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys()) ) ) class AutoModelWithLMHead: r""" This is a generic model class that will be instantiated as one of the model classes of the library---with a language modeling head---when created with the when created with the :meth:`~transformers.AutoModelWithLMHead.from_pretrained` class method or the :meth:`~transformers.AutoModelWithLMHead.from_config` class method. This class cannot be instantiated directly using ``__init__()`` (throws an error). .. warning:: This class is deprecated and will be removed in a future version. Please use :class:`~transformers.AutoModelForCausalLM` for causal language models, :class:`~transformers.AutoModelForMaskedLM` for masked language models and :class:`~transformers.AutoModelForSeq2SeqLM` for encoder-decoder models. """ def __init__(self): raise EnvironmentError( "AutoModelWithLMHead is designed to be instantiated " "using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelWithLMHead.from_config(config)` methods." ) @classmethod @replace_list_option_in_docstrings(MODEL_WITH_LM_HEAD_MAPPING, use_model_types=False) def from_config(cls, config): r""" Instantiates one of the model classes of the library---with a language modeling head---from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :meth:`~transformers.AutoModelWithLMHead.from_pretrained` to load the model weights. Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: List options Examples:: >>> from transformers import AutoConfig, AutoModelWithLMHead >>> # Download configuration from S3 and cache. >>> config = AutoConfig.from_pretrained('bert-base-uncased') >>> model = AutoModelWithLMHead.from_config(config) """ warnings.warn( "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use " "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and " "`AutoModelForSeq2SeqLM` for encoder-decoder models.", FutureWarning, ) if type(config) in MODEL_WITH_LM_HEAD_MAPPING.keys(): return MODEL_WITH_LM_HEAD_MAPPING[type(config)](config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys()) ) ) @classmethod @replace_list_option_in_docstrings(MODEL_WITH_LM_HEAD_MAPPING) @add_start_docstrings( "Instantiate one of the model classes of the library---with a language modeling head---from a pretrained ", "model.", AUTO_MODEL_PRETRAINED_DOCSTRING, ) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Examples:: >>> from transformers import AutoConfig, AutoModelWithLMHead >>> # Download model and configuration from S3 and cache. >>> model = AutoModelWithLMHead.from_pretrained('bert-base-uncased') >>> # Update configuration during loading >>> model = AutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ warnings.warn( "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use " "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and " "`AutoModelForSeq2SeqLM` for encoder-decoder models.", FutureWarning, ) config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs ) if type(config) in MODEL_WITH_LM_HEAD_MAPPING.keys(): return MODEL_WITH_LM_HEAD_MAPPING[type(config)].from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **kwargs ) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys()) ) ) class AutoModelForCausalLM: r""" This is a generic model class that will be instantiated as one of the model classes of the library---with a causal language modeling head---when created with the when created with the :meth:`~transformers.AutoModelForCausalLM.from_pretrained` class method or the :meth:`~transformers.AutoModelForCausalLM.from_config` class method. This class cannot be instantiated directly using ``__init__()`` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForCausalLM is designed to be instantiated " "using the `AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForCausalLM.from_config(config)` methods." ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_CAUSAL_LM_MAPPING, use_model_types=False) def from_config(cls, config): r""" Instantiates one of the model classes of the library---with a causal language modeling head---from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :meth:`~transformers.AutoModelForCausalLM.from_pretrained` to load the model weights. Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: List options Examples:: >>> from transformers import AutoConfig, AutoModelForCausalLM >>> # Download configuration from S3 and cache. >>> config = AutoConfig.from_pretrained('gpt2') >>> model = AutoModelForCausalLM.from_config(config) """ if type(config) in MODEL_FOR_CAUSAL_LM_MAPPING.keys(): return MODEL_FOR_CAUSAL_LM_MAPPING[type(config)](config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_CAUSAL_LM_MAPPING.keys()) ) ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_CAUSAL_LM_MAPPING) @add_start_docstrings( "Instantiate one of the model classes of the library---with a causal language modeling head---from a " "pretrained model.", AUTO_MODEL_PRETRAINED_DOCSTRING, ) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Examples:: >>> from transformers import AutoConfig, AutoModelForCausalLM >>> # Download model and configuration from S3 and cache. >>> model = AutoModelForCausalLM.from_pretrained('gpt2') >>> # Update configuration during loading >>> model = AutoModelForCausalLM.from_pretrained('gpt2', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_json_file('./tf_model/gpt2_tf_model_config.json') >>> model = AutoModelForCausalLM.from_pretrained('./tf_model/gpt2_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs ) if type(config) in MODEL_FOR_CAUSAL_LM_MAPPING.keys(): return MODEL_FOR_CAUSAL_LM_MAPPING[type(config)].from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **kwargs ) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_CAUSAL_LM_MAPPING.keys()) ) ) class AutoModelForMaskedLM: r""" This is a generic model class that will be instantiated as one of the model classes of the library---with a masked language modeling head---when created with the when created with the :meth:`~transformers.AutoModelForMaskedLM.from_pretrained` class method or the :meth:`~transformers.AutoModelForMasedLM.from_config` class method. This class cannot be instantiated directly using ``__init__()`` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForMaskedLM is designed to be instantiated " "using the `AutoModelForMaskedLM.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForMaskedLM.from_config(config)` methods." ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_MASKED_LM_MAPPING, use_model_types=False) def from_config(cls, config): r""" Instantiates one of the model classes of the library---with a masked language modeling head---from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :meth:`~transformers.AutoModelForMaskedLM.from_pretrained` to load the model weights. Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: List options Examples:: >>> from transformers import AutoConfig, AutoModelForMaskedLM >>> # Download configuration from S3 and cache. >>> config = AutoConfig.from_pretrained('bert-base-uncased') >>> model = AutoModelForMaskedLM.from_config(config) """ if type(config) in MODEL_FOR_MASKED_LM_MAPPING.keys(): return MODEL_FOR_MASKED_LM_MAPPING[type(config)](config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MASKED_LM_MAPPING.keys()) ) ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_MASKED_LM_MAPPING) @add_start_docstrings( "Instantiate one of the model classes of the library---with a masked language modeling head---from a " "pretrained model.", AUTO_MODEL_PRETRAINED_DOCSTRING, ) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Examples:: >>> from transformers import AutoConfig, AutoModelForMaskedLM >>> # Download model and configuration from S3 and cache. >>> model = AutoModelForMaskedLM.from_pretrained('bert-base-uncased') >>> # Update configuration during loading >>> model = AutoModelForMaskedLM.from_pretrained('bert-base-uncased', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> model = AutoModelForMaskedLM.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs ) if type(config) in MODEL_FOR_MASKED_LM_MAPPING.keys(): return MODEL_FOR_MASKED_LM_MAPPING[type(config)].from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **kwargs ) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MASKED_LM_MAPPING.keys()) ) ) class AutoModelForSeq2SeqLM: r""" This is a generic model class that will be instantiated as one of the model classes of the library---with a sequence-to-sequence language modeling head---when created with the when created with the :meth:`~transformers.AutoModelForSeq2SeqLM.from_pretrained` class method or the :meth:`~transformers.AutoModelForSeq2SeqLM.from_config` class method. This class cannot be instantiated directly using ``__init__()`` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForSeq2SeqLM is designed to be instantiated " "using the `AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForSeq2SeqLM.from_config(config)` methods." ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, use_model_types=False) def from_config(cls, config): r""" Instantiates one of the model classes of the library---with a sequence-to-sequence language modeling head---from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :meth:`~transformers.AutoModelForSeq2SeqLM.from_pretrained` to load the model weights. Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: List options Examples:: >>> from transformers import AutoConfig, AutoModelForSeq2SeqLM >>> # Download configuration from S3 and cache. >>> config = AutoConfig.from_pretrained('t5') >>> model = AutoModelForSeq2SeqLM.from_config(config) """ if type(config) in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys(): return MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING[type(config)](config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()), ) ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING) @add_start_docstrings( "Instantiate one of the model classes of the library---with a sequence-to-sequence language modeling " "head---from a pretrained model.", AUTO_MODEL_PRETRAINED_DOCSTRING, ) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Examples:: >>> from transformers import AutoConfig, AutoModelForSeq2SeqLM >>> # Download model and configuration from S3 and cache. >>> model = AutoModelForSeq2SeqLM.from_pretrained('t5-base') >>> # Update configuration during loading >>> model = AutoModelForSeq2SeqLM.from_pretrained('t5-base', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_json_file('./tf_model/t5_tf_model_config.json') >>> model = AutoModelForSeq2SeqLM.from_pretrained('./tf_model/t5_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs ) if type(config) in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys(): return MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING[type(config)].from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **kwargs ) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()), ) ) class AutoModelForSequenceClassification: r""" This is a generic model class that will be instantiated as one of the model classes of the library---with a sequence classification head---when created with the when created with the :meth:`~transformers.AutoModelForSequenceClassification.from_pretrained` class method or the :meth:`~transformers.AutoModelForSequenceClassification.from_config` class method. This class cannot be instantiated directly using ``__init__()`` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForSequenceClassification is designed to be instantiated " "using the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForSequenceClassification.from_config(config)` methods." ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, use_model_types=False) def from_config(cls, config): r""" Instantiates one of the model classes of the library---with a sequence classification head---from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :meth:`~transformers.AutoModelForSequenceClassification.from_pretrained` to load the model weights. Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: List options Examples:: >>> from transformers import AutoConfig, AutoModelForSequenceClassification >>> # Download configuration from S3 and cache. >>> config = AutoConfig.from_pretrained('bert-base-uncased') >>> model = AutoModelForSequenceClassification.from_config(config) """ if type(config) in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys(): return MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING[type(config)](config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()), ) ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) @add_start_docstrings( "Instantiate one of the model classes of the library---with a sequence classification head---from a " "pretrained model.", AUTO_MODEL_PRETRAINED_DOCSTRING, ) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Examples:: >>> from transformers import AutoConfig, AutoModelForSequenceClassification >>> # Download model and configuration from S3 and cache. >>> model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased') >>> # Update configuration during loading >>> model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs ) if type(config) in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys(): return MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING[type(config)].from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **kwargs ) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()), ) ) class AutoModelForQuestionAnswering: r""" This is a generic model class that will be instantiated as one of the model classes of the library---with a question answering head---when created with the when created with the :meth:`~transformers.AutoModeForQuestionAnswering.from_pretrained` class method or the :meth:`~transformers.AutoModelForQuestionAnswering.from_config` class method. This class cannot be instantiated directly using ``__init__()`` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForQuestionAnswering is designed to be instantiated " "using the `AutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForQuestionAnswering.from_config(config)` methods." ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_QUESTION_ANSWERING_MAPPING, use_model_types=False) def from_config(cls, config): r""" Instantiates one of the model classes of the library---with a question answering head---from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :meth:`~transformers.AutoModelForQuestionAnswering.from_pretrained` to load the model weights. Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: List options Examples:: >>> from transformers import AutoConfig, AutoModelForQuestionAnswering >>> # Download configuration from S3 and cache. >>> config = AutoConfig.from_pretrained('bert-base-uncased') >>> model = AutoModelForQuestionAnswering.from_config(config) """ if type(config) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys(): return MODEL_FOR_QUESTION_ANSWERING_MAPPING[type(config)](config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()), ) ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_QUESTION_ANSWERING_MAPPING) @add_start_docstrings( "Instantiate one of the model classes of the library---with a question answering head---from a " "pretrained model.", AUTO_MODEL_PRETRAINED_DOCSTRING, ) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Examples:: >>> from transformers import AutoConfig, AutoModelForQuestionAnswering >>> # Download model and configuration from S3 and cache. >>> model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') >>> # Update configuration during loading >>> model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs ) if type(config) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys(): return MODEL_FOR_QUESTION_ANSWERING_MAPPING[type(config)].from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **kwargs ) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()), ) ) class AutoModelForTokenClassification: r""" This is a generic model class that will be instantiated as one of the model classes of the library---with a token classification head---when created with the when created with the :meth:`~transformers.AutoModelForTokenClassification.from_pretrained` class method or the :meth:`~transformers.AutoModelForTokenClassification.from_config` class method. This class cannot be instantiated directly using ``__init__()`` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForTokenClassification is designed to be instantiated " "using the `AutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForTokenClassification.from_config(config)` methods." ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, use_model_types=False) def from_config(cls, config): r""" Instantiates one of the model classes of the library---with a token classification head---from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :meth:`~transformers.AutoModelForTokenClassification.from_pretrained` to load the model weights. Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: List options Examples:: >>> from transformers import AutoConfig, AutoModelForTokenClassification >>> # Download configuration from S3 and cache. >>> config = AutoConfig.from_pretrained('bert-base-uncased') >>> model = AutoModelForTokenClassification.from_config(config) """ if type(config) in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys(): return MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING[type(config)](config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()), ) ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING) @add_start_docstrings( "Instantiate one of the model classes of the library---with a token classification head---from a " "pretrained model.", AUTO_MODEL_PRETRAINED_DOCSTRING, ) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Examples:: >>> from transformers import AutoConfig, AutoModelForTokenClassification >>> # Download model and configuration from S3 and cache. >>> model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased') >>> # Update configuration during loading >>> model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> model = AutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs ) if type(config) in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys(): return MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING[type(config)].from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **kwargs ) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()), ) ) class AutoModelForMultipleChoice: r""" This is a generic model class that will be instantiated as one of the model classes of the library---with a multiple choice classifcation head---when created with the when created with the :meth:`~transformers.AutoModelForMultipleChoice.from_pretrained` class method or the :meth:`~transformers.AutoModelForMultipleChoice.from_config` class method. This class cannot be instantiated directly using ``__init__()`` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoModelForMultipleChoice is designed to be instantiated " "using the `AutoModelForMultipleChoice.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForMultipleChoice.from_config(config)` methods." ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_MULTIPLE_CHOICE_MAPPING, use_model_types=False) def from_config(cls, config): r""" Instantiates one of the model classes of the library---with a multiple choice classification head---from a configuration. Note: Loading a model from its configuration file does **not** load the model weights. It only affects the model's configuration. Use :meth:`~transformers.AutoModelForMultipleChoice.from_pretrained` to load the model weights. Args: config (:class:`~transformers.PretrainedConfig`): The model class to instantiate is selected based on the configuration class: List options Examples:: >>> from transformers import AutoConfig, AutoModelForMultipleChoice >>> # Download configuration from S3 and cache. >>> config = AutoConfig.from_pretrained('bert-base-uncased') >>> model = AutoModelForMultipleChoice.from_config(config) """ if type(config) in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys(): return MODEL_FOR_MULTIPLE_CHOICE_MAPPING[type(config)](config) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()), ) ) @classmethod @replace_list_option_in_docstrings(MODEL_FOR_MULTIPLE_CHOICE_MAPPING) @add_start_docstrings( "Instantiate one of the model classes of the library---with a multiple choice classification head---from a " "pretrained model.", AUTO_MODEL_PRETRAINED_DOCSTRING, ) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Examples:: >>> from transformers import AutoConfig, AutoModelForMultipleChoice >>> # Download model and configuration from S3 and cache. >>> model = AutoModelForMultipleChoice.from_pretrained('bert-base-uncased') >>> # Update configuration during loading >>> model = AutoModelForMultipleChoice.from_pretrained('bert-base-uncased', output_attentions=True) >>> model.config.output_attentions True >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> model = AutoModelForMultipleChoice.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) if not isinstance(config, PretrainedConfig): config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs ) if type(config) in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys(): return MODEL_FOR_MULTIPLE_CHOICE_MAPPING[type(config)].from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **kwargs ) raise ValueError( "Unrecognized configuration class {} for this kind of AutoModel: {}.\n" "Model type should be one of {}.".format( config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()), ) )
67,297
43.129836
147
py
SLT-FAI
SLT-FAI-main/transformers/convert_bart_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BART checkpoint.""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.modeling_bart import _make_linear_from_emb from transformers.utils import logging FAIRSEQ_MODELS = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"] extra_arch = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse("0.9.0"): raise Exception("requires fairseq >= 0.9.0") logging.set_verbosity_info() logger = logging.get_logger(__name__) SAMPLE_TEXT = " Hello world! cécé herlolip" mnli_rename_keys = [ ("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"), ("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"), ("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"), ("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"), ] def remove_ignore_keys_(state_dict): ignore_keys = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(k, None) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def load_xsum_checkpoint(checkpoint_path): """Checkpoint path should end in model.pt""" sd = torch.load(checkpoint_path, map_location="cpu") hub_interface = torch.hub.load("pytorch/fairseq", "bart.large.cnn").eval() hub_interface.model.load_state_dict(sd["model"]) return hub_interface @torch.no_grad() def convert_bart_checkpoint(checkpoint_path, pytorch_dump_folder_path, hf_checkpoint_name=None): """ Copy/paste/tweak model's weights to our BERT structure. """ if not os.path.exists(checkpoint_path): bart = torch.hub.load("pytorch/fairseq", checkpoint_path).eval() else: bart = load_xsum_checkpoint(checkpoint_path) bart.model.upgrade_state_dict(bart.model.state_dict()) if hf_checkpoint_name is None: hf_checkpoint_name = checkpoint_path.replace(".", "-") config = BartConfig.from_pretrained(hf_checkpoint_name) tokens = bart.encode(SAMPLE_TEXT).unsqueeze(0) tokens2 = BartTokenizer.from_pretrained(hf_checkpoint_name).encode(SAMPLE_TEXT, return_tensors="pt").unsqueeze(0) assert torch.eq(tokens, tokens2).all() if checkpoint_path == "bart.large.mnli": state_dict = bart.state_dict() remove_ignore_keys_(state_dict) state_dict["model.shared.weight"] = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(state_dict, src, dest) model = BartForSequenceClassification(config).eval() model.load_state_dict(state_dict) fairseq_output = bart.predict("mnli", tokens, return_logits=True) new_model_outputs = model(tokens)[0] # logits else: # no classification heads to worry about state_dict = bart.model.state_dict() remove_ignore_keys_(state_dict) state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"] fairseq_output = bart.extract_features(tokens) if hf_checkpoint_name == "facebook/bart-large": model = BartModel(config).eval() model.load_state_dict(state_dict) new_model_outputs = model(tokens).model[0] else: model = BartForConditionalGeneration(config).eval() # an existing summarization ckpt model.model.load_state_dict(state_dict) if hasattr(model, "lm_head"): model.lm_head = _make_linear_from_emb(model.model.shared) new_model_outputs = model.model(tokens)[0] # Check results assert fairseq_output.shape == new_model_outputs.shape assert (fairseq_output == new_model_outputs).all().item() Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum" ) args = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
5,484
37.090278
117
py
SLT-FAI
SLT-FAI-main/transformers/modeling_bert_generation.py
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model specific for generation. """ import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from .configuration_bert_generation import BertGenerationConfig from .file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings, ) from .modeling_bert import BertEncoder from .modeling_outputs import BaseModelOutput, CausalLMOutput from .modeling_utils import PreTrainedModel from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "BertGenerationConfig" _TOKENIZER_FOR_DOC = "BertGenerationTokenizer" def load_tf_weights_in_bert_generation( model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False ): try: import numpy as np import tensorflow.compat.v1 as tf import tensorflow_hub as hub import tensorflow_text # noqa: F401 tf.disable_eager_execution() except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_model = hub.Module(tf_hub_path) init = tf.global_variables_initializer() with tf.Session() as sess: init.run() all_variables = tf_model.variable_map keep_track_variables = all_variables.copy() for key in list(all_variables.keys()): if "global" in key: logger.info(f"Skipping {key}...") continue if not is_encoder: model_pointer = getattr(model, model_class) else: model_pointer = model is_embedding = False logger.info(f"Trying to match {key}...") # remove start_string = "module/bert/" sub_layers = key.split("/")[2:] if is_encoder_named_decoder and sub_layers[0] == "encoder": logger.info(f"Skipping encoder layer {key} for decoder") continue if is_encoder and sub_layers[0] == "decoder": logger.info(f"Skipping decoder layer {key} for encoder") continue for i, sub_layer in enumerate(sub_layers): if sub_layer == "embeddings": is_embedding = True elif sub_layer == "LayerNorm": is_embedding = False if "layer" in sub_layer: model_pointer = model_pointer.layer[int(sub_layer.split("_")[-1])] elif sub_layer in ["kernel", "gamma"]: model_pointer = model_pointer.weight elif sub_layer == "beta": model_pointer = model_pointer.bias elif sub_layer == "encdec": model_pointer = model_pointer.crossattention.self elif sub_layer == "encdec_output": model_pointer = model_pointer.crossattention.output elif is_encoder_named_decoder and sub_layer == "decoder": model_pointer = model_pointer.encoder else: if sub_layer == "attention" and "encdec" in sub_layers[i + 1]: continue try: model_pointer = getattr(model_pointer, sub_layer) except AttributeError: logger.info(f"Skipping to initialize {key} at {sub_layer}...") raise AttributeError array = np.asarray(sess.run(all_variables[key])) if not is_embedding: logger.info("Transposing numpy weight of shape {} for {}".format(array.shape, key)) array = np.transpose(array) else: model_pointer = model_pointer.weight try: assert ( model_pointer.shape == array.shape ), f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (model_pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {key}") model_pointer.data = torch.from_numpy(array.astype(np.float32)) keep_track_variables.pop(key, None) logger.info("Weights not copied to PyTorch model: {}".format(", ".join(keep_track_variables.keys()))) return model class BertGenerationEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertGenerationPreTrainedModel(PreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertGenerationConfig base_model_prefix = "bert" authorized_missing_keys = [r"position_ids"] def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() BERT_GENERATION_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertGenerationConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_GENERATION_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertGenerationTokenizer`. See :meth:`transformers.PreTrainedTokenizer.__call__` and :meth:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.", BERT_GENERATION_START_DOCSTRING, ) class BertGenerationEncoder(BertGenerationPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. This model should be used when leveraging Bert or Roberta checkpoints for the :class:`~transformers.EncoderDecoderModel` class as described in `Leveraging Pre-trained Checkpoints for Sequence Generation Tasks <https://arxiv.org/abs/1907.12461>`__ by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn. To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder` argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertGenerationEmbeddings(config) self.encoder = BertEncoder(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_callable(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/bert_for_seq_generation_L-24_bbc_encoder", output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class BertGenerationOnlyLMHead(nn.Module): def __init__(self, config): super().__init__() self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): logits = self.decoder(hidden_states) return logits @add_start_docstrings( """BertGeneration Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_GENERATION_START_DOCSTRING, ) class BertGenerationDecoder(BertGenerationPreTrainedModel): def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warn("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`") self.bert = BertGenerationEncoder(config) self.lm_head = BertGenerationOnlyLMHead(config) self.init_weights() def get_output_embeddings(self): return self.lm_head.decoder @add_start_docstrings_to_callable(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` Returns: Example:: >>> from transformers import BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig >>> import torch >>> tokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder') >>> config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") >>> config.is_decoder = True >>> model = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder', config=config, return_dict=True) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutput( loss=lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) return {"input_ids": input_ids, "attention_mask": attention_mask}
23,694
44.39272
145
py
SLT-FAI
SLT-FAI-main/transformers/modeling_funnel.py
# coding=utf-8 # Copyright 2020-present Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Funnel Transformer model. """ import os from dataclasses import dataclass from typing import Optional, Tuple import numpy as np import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import functional as F from .activations import ACT2FN from .configuration_funnel import FunnelConfig from .file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings, ) from .modeling_outputs import ( BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from .modeling_utils import PreTrainedModel from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "FunnelConfig" _TOKENIZER_FOR_DOC = "FunnelTokenizer" FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = [ "funnel-transformer/small", # B4-4-4H768 "funnel-transformer/small-base", # B4-4-4H768, no decoder "funnel-transformer/medium", # B6-3x2-3x2H768 "funnel-transformer/medium-base", # B6-3x2-3x2H768, no decoder "funnel-transformer/intermediate", # B6-6-6H768 "funnel-transformer/intermediate-base", # B6-6-6H768, no decoder "funnel-transformer/large", # B8-8-8H1024 "funnel-transformer/large-base", # B8-8-8H1024, no decoder "funnel-transformer/xlarge-base", # B10-10-10H1024 "funnel-transformer/xlarge", # B10-10-10H1024, no decoder ] INF = 1e6 def load_tf_weights_in_funnel(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) _layer_map = { "k": "k_head", "q": "q_head", "v": "v_head", "o": "post_proj", "layer_1": "linear_1", "layer_2": "linear_2", "rel_attn": "attention", "ff": "ffn", "kernel": "weight", "gamma": "weight", "beta": "bias", "lookup_table": "weight", "word_embedding": "word_embeddings", "input": "embeddings", } for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info("Skipping {}".format("/".join(name))) continue if name[0] == "generator": continue pointer = model skipped = False for m_name in name[1:]: if not isinstance(pointer, FunnelPositionwiseFFN) and re.fullmatch(r"layer_\d+", m_name): layer_index = int(re.search(r"layer_(\d+)", m_name).groups()[0]) if layer_index < config.num_hidden_layers: block_idx = 0 while layer_index >= config.block_sizes[block_idx]: layer_index -= config.block_sizes[block_idx] block_idx += 1 pointer = pointer.blocks[block_idx][layer_index] else: layer_index -= config.num_hidden_layers pointer = pointer.layers[layer_index] elif m_name == "r" and isinstance(pointer, FunnelRelMultiheadAttention): pointer = pointer.r_kernel break elif m_name in _layer_map: pointer = getattr(pointer, _layer_map[m_name]) else: try: pointer = getattr(pointer, m_name) except AttributeError: print("Skipping {}".format("/".join(name)), array.shape) skipped = True break if not skipped: if len(pointer.shape) != len(array.shape): array = array.reshape(pointer.shape) if m_name == "kernel": array = np.transpose(array) pointer.data = torch.from_numpy(array) return model class FunnelEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) def forward(self, input_ids=None, inputs_embeds=None): if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) embeddings = self.layer_norm(inputs_embeds) embeddings = self.dropout(embeddings) return embeddings class FunnelAttentionStructure(nn.Module): """ Contains helpers for `FunnelRelMultiheadAttention `. """ cls_token_type_id: int = 2 def __init__(self, config): super().__init__() self.config = config self.sin_dropout = nn.Dropout(config.hidden_dropout) self.cos_dropout = nn.Dropout(config.hidden_dropout) # Track where we are at in terms of pooling from the original input, e.g., by how much the sequence length was # dividide. self.pooling_mult = None def init_attention_inputs(self, inputs_embeds, attention_mask=None, token_type_ids=None): """ Returns the attention inputs associated to the inputs of the model. """ # inputs_embeds has shape batch_size x seq_len x d_model # attention_mask and token_type_ids have shape batch_size x seq_len self.pooling_mult = 1 self.seq_len = seq_len = inputs_embeds.size(1) position_embeds = self.get_position_embeds(seq_len, inputs_embeds.dtype, inputs_embeds.device) token_type_mat = self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None cls_mask = ( F.pad(inputs_embeds.new_ones([seq_len - 1, seq_len - 1]), (1, 0, 1, 0)) if self.config.separate_cls else None ) return (position_embeds, token_type_mat, attention_mask, cls_mask) def token_type_ids_to_mat(self, token_type_ids): """Convert `token_type_ids` to `token_type_mat`.""" token_type_mat = token_type_ids[:, :, None] == token_type_ids[:, None] # Treat <cls> as in the same segment as both A & B cls_ids = token_type_ids == self.cls_token_type_id cls_mat = cls_ids[:, :, None] | cls_ids[:, None] return cls_mat | token_type_mat def get_position_embeds(self, seq_len, dtype, device): """ Create and cache inputs related to relative position encoding. Those are very different depending on whether we are using the factorized or the relative shift attention: For the factorized attention, it returns the matrices (phi, pi, psi, omega) used in the paper, appendix A.2.2, final formula. For the relative shif attention, it returns all possible vectors R used in the paper, appendix A.2.1, final formula. Paper link: https://arxiv.org/abs/2006.03236 """ d_model = self.config.d_model if self.config.attention_type == "factorized": # Notations from the paper, appending A.2.2, final formula. # We need to create and return the matrics phi, psi, pi and omega. pos_seq = torch.arange(0, seq_len, 1.0, dtype=dtype, device=device) freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=dtype, device=device) inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2))) sinusoid = pos_seq[:, None] * inv_freq[None] sin_embed = torch.sin(sinusoid) sin_embed_d = self.sin_dropout(sin_embed) cos_embed = torch.cos(sinusoid) cos_embed_d = self.cos_dropout(cos_embed) # This is different from the formula on the paper... phi = torch.cat([sin_embed_d, sin_embed_d], dim=-1) psi = torch.cat([cos_embed, sin_embed], dim=-1) pi = torch.cat([cos_embed_d, cos_embed_d], dim=-1) omega = torch.cat([-sin_embed, cos_embed], dim=-1) return (phi, pi, psi, omega) else: # Notations from the paper, appending A.2.1, final formula. # We need to create and return all the possible vectors R for all blocks and shifts. freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=dtype, device=device) inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2))) # Maximum relative positions for the first input rel_pos_id = torch.arange(-seq_len * 2, seq_len * 2, 1.0, dtype=dtype, device=device) zero_offset = seq_len * 2 sinusoid = rel_pos_id[:, None] * inv_freq[None] sin_embed = self.sin_dropout(torch.sin(sinusoid)) cos_embed = self.cos_dropout(torch.cos(sinusoid)) pos_embed = torch.cat([sin_embed, cos_embed], dim=-1) pos = torch.arange(0, seq_len, dtype=dtype, device=device) pooled_pos = pos position_embeds_list = [] for block_index in range(0, self.config.num_blocks): # For each block with block_index > 0, we need two types position embeddings: # - Attention(pooled-q, unpooled-kv) # - Attention(pooled-q, pooled-kv) # For block_index = 0 we only need the second one and leave the first one as None. # First type if block_index == 0: position_embeds_pooling = None else: pooled_pos = self.stride_pool_pos(pos, block_index) # construct rel_pos_id stride = 2 ** (block_index - 1) rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2) rel_pos = rel_pos[:, None] + zero_offset rel_pos = rel_pos.expand(rel_pos.size(0), d_model) position_embeds_pooling = torch.gather(pos_embed, 0, rel_pos) # Second type pos = pooled_pos stride = 2 ** block_index rel_pos = self.relative_pos(pos, stride) rel_pos = rel_pos[:, None] + zero_offset rel_pos = rel_pos.expand(rel_pos.size(0), d_model) position_embeds_no_pooling = torch.gather(pos_embed, 0, rel_pos) position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling]) return position_embeds_list def stride_pool_pos(self, pos_id, block_index): """ Pool `pos_id` while keeping the cls token separate (if `config.separate_cls=True`). """ if self.config.separate_cls: # Under separate <cls>, we treat the <cls> as the first token in # the previous block of the 1st real block. Since the 1st real # block always has position 1, the position of the previous block # will be at `1 - 2 ** block_index`. cls_pos = pos_id.new_tensor([-(2 ** block_index) + 1]) pooled_pos_id = pos_id[1:-1] if self.config.truncate_seq else pos_id[1:] return torch.cat([cls_pos, pooled_pos_id[::2]], 0) else: return pos_id[::2] def relative_pos(self, pos, stride, pooled_pos=None, shift=1): """ Build the relative positional vector between `pos` and `pooled_pos`. """ if pooled_pos is None: pooled_pos = pos ref_point = pooled_pos[0] - pos[0] num_remove = shift * len(pooled_pos) max_dist = ref_point + num_remove * stride min_dist = pooled_pos[0] - pos[-1] return torch.arange(max_dist, min_dist - 1, -stride, dtype=torch.long, device=pos.device) def stride_pool(self, tensor, axis): """ Perform pooling by stride slicing the tensor along the given axis. """ if tensor is None: return None # Do the stride pool recursively if axis is a list or a tuple of ints. if isinstance(axis, (list, tuple)): for ax in axis: tensor = self.stride_pool(tensor, ax) return tensor # Do the stride pool recursively if tensor is a list or tuple of tensors. if isinstance(tensor, (tuple, list)): return type(tensor)(self.stride_pool(x, axis) for x in tensor) # Deal with negative axis axis %= tensor.ndim axis_slice = ( slice(None, -1, 2) if self.config.separate_cls and self.config.truncate_seq else slice(None, None, 2) ) enc_slice = [slice(None)] * axis + [axis_slice] if self.config.separate_cls: cls_slice = [slice(None)] * axis + [slice(None, 1)] tensor = torch.cat([tensor[cls_slice], tensor], axis=axis) return tensor[enc_slice] def pool_tensor(self, tensor, mode="mean", stride=2): """Apply 1D pooling to a tensor of size [B x T (x H)].""" if tensor is None: return None # Do the pool recursively if tensor is a list or tuple of tensors. if isinstance(tensor, (tuple, list)): return type(tensor)(self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor) if self.config.separate_cls: suffix = tensor[:, :-1] if self.config.truncate_seq else tensor tensor = torch.cat([tensor[:, :1], suffix], dim=1) ndim = tensor.ndim if ndim == 2: tensor = tensor[:, None, :, None] elif ndim == 3: tensor = tensor[:, None, :, :] # Stride is applied on the second-to-last dimension. stride = (stride, 1) if mode == "mean": tensor = F.avg_pool2d(tensor, stride, stride=stride, ceil_mode=True) elif mode == "max": tensor = F.max_pool2d(tensor, stride, stride=stride, ceil_mode=True) elif mode == "min": tensor = -F.max_pool2d(-tensor, stride, stride=stride, ceil_mode=True) else: raise NotImplementedError("The supported modes are 'mean', 'max' and 'min'.") if ndim == 2: return tensor[:, 0, :, 0] elif ndim == 3: return tensor[:, 0] return tensor def pre_attention_pooling(self, output, attention_inputs): """ Pool `output` and the proper parts of `attention_inputs` before the attention layer. """ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs if self.config.pool_q_only: if self.config.attention_type == "factorized": position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:] token_type_mat = self.stride_pool(token_type_mat, 1) cls_mask = self.stride_pool(cls_mask, 0) output = self.pool_tensor(output, mode=self.config.pooling_type) else: self.pooling_mult *= 2 if self.config.attention_type == "factorized": position_embeds = self.stride_pool(position_embeds, 0) token_type_mat = self.stride_pool(token_type_mat, [1, 2]) cls_mask = self.stride_pool(cls_mask, [1, 2]) attention_mask = self.pool_tensor(attention_mask, mode="min") output = self.pool_tensor(output, mode=self.config.pooling_type) attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask) return output, attention_inputs def post_attention_pooling(self, attention_inputs): """ Pool the proper parts of `attention_inputs` after the attention layer. """ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs if self.config.pool_q_only: self.pooling_mult *= 2 if self.config.attention_type == "factorized": position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0) token_type_mat = self.stride_pool(token_type_mat, 2) cls_mask = self.stride_pool(cls_mask, 1) attention_mask = self.pool_tensor(attention_mask, mode="min") attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask) return attention_inputs def _relative_shift_gather(positional_attn, context_len, shift): batch_size, n_head, seq_len, max_rel_len = positional_attn.shape # max_rel_len = 2 * context_len + shift -1 is the numbers of possible relative positions i-j # What's next is the same as doing the following gather, which might be clearer code but less efficient. # idxs = context_len + torch.arange(0, context_len).unsqueeze(0) - torch.arange(0, seq_len).unsqueeze(1) # # matrix of context_len + i-j # return positional_attn.gather(3, idxs.expand([batch_size, n_head, context_len, context_len])) positional_attn = torch.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len]) positional_attn = positional_attn[:, :, shift:, :] positional_attn = torch.reshape(positional_attn, [batch_size, n_head, seq_len, max_rel_len - shift]) positional_attn = positional_attn[..., :context_len] return positional_attn class FunnelRelMultiheadAttention(nn.Module): def __init__(self, config, block_index): super().__init__() self.config = config self.block_index = block_index d_model, n_head, d_head = config.d_model, config.n_head, config.d_head self.hidden_dropout = nn.Dropout(config.hidden_dropout) self.attention_dropout = nn.Dropout(config.attention_dropout) self.q_head = nn.Linear(d_model, n_head * d_head, bias=False) self.k_head = nn.Linear(d_model, n_head * d_head) self.v_head = nn.Linear(d_model, n_head * d_head) self.r_w_bias = nn.Parameter(torch.zeros([n_head, d_head])) self.r_r_bias = nn.Parameter(torch.zeros([n_head, d_head])) self.r_kernel = nn.Parameter(torch.zeros([d_model, n_head, d_head])) self.r_s_bias = nn.Parameter(torch.zeros([n_head, d_head])) self.seg_embed = nn.Parameter(torch.zeros([2, n_head, d_head])) self.post_proj = nn.Linear(n_head * d_head, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=config.layer_norm_eps) self.scale = 1.0 / (d_head ** 0.5) def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None): """ Relative attention score for the positional encodings """ # q_head has shape batch_size x sea_len x n_head x d_head if self.config.attention_type == "factorized": # Notations from the paper, appending A.2.2, final formula (https://arxiv.org/abs/2006.03236) # phi and pi have shape seq_len x d_model, psi and omega have shape context_len x d_model phi, pi, psi, omega = position_embeds # Shape n_head x d_head u = self.r_r_bias * self.scale # Shape d_model x n_head x d_head w_r = self.r_kernel # Shape batch_size x sea_len x n_head x d_model q_r_attention = torch.einsum("binh,dnh->bind", q_head + u, w_r) q_r_attention_1 = q_r_attention * phi[:, None] q_r_attention_2 = q_r_attention * pi[:, None] # Shape batch_size x n_head x seq_len x context_len positional_attn = torch.einsum("bind,jd->bnij", q_r_attention_1, psi) + torch.einsum( "bind,jd->bnij", q_r_attention_2, omega ) else: shift = 2 if q_head.shape[1] != context_len else 1 # Notations from the paper, appending A.2.1, final formula (https://arxiv.org/abs/2006.03236) # Grab the proper positional encoding, shape max_rel_len x d_model r = position_embeds[self.block_index][shift - 1] # Shape n_head x d_head v = self.r_r_bias * self.scale # Shape d_model x n_head x d_head w_r = self.r_kernel # Shape max_rel_len x n_head x d_model r_head = torch.einsum("td,dnh->tnh", r, w_r) # Shape batch_size x n_head x seq_len x max_rel_len positional_attn = torch.einsum("binh,tnh->bnit", q_head + v, r_head) # Shape batch_size x n_head x seq_len x context_len positional_attn = _relative_shift_gather(positional_attn, context_len, shift) if cls_mask is not None: positional_attn *= cls_mask return positional_attn def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None): """ Relative attention score for the token_type_ids """ if token_type_mat is None: return 0 batch_size, seq_len, context_len = token_type_mat.shape # q_head has shape batch_size x seq_len x n_head x d_head # Shape n_head x d_head r_s_bias = self.r_s_bias * self.scale # Shape batch_size x n_head x seq_len x 2 token_type_bias = torch.einsum("bind,snd->bnis", q_head + r_s_bias, self.seg_embed) # Shape batch_size x n_head x seq_len x context_len token_type_mat = token_type_mat[:, None].expand([batch_size, q_head.shape[2], seq_len, context_len]) # Shapes batch_size x n_head x seq_len diff_token_type, same_token_type = torch.split(token_type_bias, 1, dim=-1) # Shape batch_size x n_head x seq_len x context_len token_type_attn = torch.where( token_type_mat, same_token_type.expand(token_type_mat.shape), diff_token_type.expand(token_type_mat.shape) ) if cls_mask is not None: token_type_attn *= cls_mask return token_type_attn def forward(self, query, key, value, attention_inputs, output_attentions=False): # query has shape batch_size x seq_len x d_model # key and value have shapes batch_size x context_len x d_model position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs batch_size, seq_len, _ = query.shape context_len = key.shape[1] n_head, d_head = self.config.n_head, self.config.d_head # Shape batch_size x seq_len x n_head x d_head q_head = self.q_head(query).view(batch_size, seq_len, n_head, d_head) # Shapes batch_size x context_len x n_head x d_head k_head = self.k_head(key).view(batch_size, context_len, n_head, d_head) v_head = self.v_head(value).view(batch_size, context_len, n_head, d_head) q_head = q_head * self.scale # Shape n_head x d_head r_w_bias = self.r_w_bias * self.scale # Shapes batch_size x n_head x seq_len x context_len content_score = torch.einsum("bind,bjnd->bnij", q_head + r_w_bias, k_head) positional_attn = self.relative_positional_attention(position_embeds, q_head, context_len, cls_mask) token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask) # merge attention scores attn_score = content_score + positional_attn + token_type_attn # precision safe in case of mixed precision training dtype = attn_score.dtype attn_score = attn_score.float() # perform masking if attention_mask is not None: attn_score = attn_score - INF * (1 - attention_mask[:, None, None].float()) # attention probability attn_prob = torch.softmax(attn_score, dim=-1, dtype=dtype) attn_prob = self.attention_dropout(attn_prob) # attention output, shape batch_size x seq_len x n_head x d_head attn_vec = torch.einsum("bnij,bjnd->bind", attn_prob, v_head) # Shape shape batch_size x seq_len x d_model attn_out = self.post_proj(attn_vec.reshape(batch_size, seq_len, n_head * d_head)) attn_out = self.hidden_dropout(attn_out) output = self.layer_norm(query + attn_out) return (output, attn_prob) if output_attentions else (output,) class FunnelPositionwiseFFN(nn.Module): def __init__(self, config): super().__init__() self.linear_1 = nn.Linear(config.d_model, config.d_inner) self.activation_function = ACT2FN[config.hidden_act] self.activation_dropout = nn.Dropout(config.activation_dropout) self.linear_2 = nn.Linear(config.d_inner, config.d_model) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps) def forward(self, hidden): h = self.linear_1(hidden) h = self.activation_function(h) h = self.activation_dropout(h) h = self.linear_2(h) h = self.dropout(h) return self.layer_norm(hidden + h) class FunnelLayer(nn.Module): def __init__(self, config, block_index): super().__init__() self.attention = FunnelRelMultiheadAttention(config, block_index) self.ffn = FunnelPositionwiseFFN(config) def forward(self, query, key, value, attention_inputs, output_attentions=False): attn = self.attention(query, key, value, attention_inputs, output_attentions=output_attentions) output = self.ffn(attn[0]) return (output, attn[1]) if output_attentions else (output,) class FunnelEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.attention_structure = FunnelAttentionStructure(config) self.blocks = nn.ModuleList( [ nn.ModuleList([FunnelLayer(config, block_index) for _ in range(block_size)]) for block_index, block_size in enumerate(config.block_sizes) ] ) def forward( self, inputs_embeds, attention_mask=None, token_type_ids=None, output_attentions=False, output_hidden_states=False, return_dict=False, ): # The pooling is not implemented on long tensors, so we convert this mask. attention_mask = attention_mask.type_as(inputs_embeds) attention_inputs = self.attention_structure.init_attention_inputs( inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, ) hidden = inputs_embeds all_hidden_states = (inputs_embeds,) if output_hidden_states else None all_attentions = () if output_attentions else None for block_index, block in enumerate(self.blocks): pooling_flag = hidden.size(1) > (2 if self.config.separate_cls else 1) pooling_flag = pooling_flag and block_index > 0 if pooling_flag: pooled_hidden, attention_inputs = self.attention_structure.pre_attention_pooling( hidden, attention_inputs ) for (layer_index, layer) in enumerate(block): for repeat_index in range(self.config.block_repeats[block_index]): do_pooling = (repeat_index == 0) and (layer_index == 0) and pooling_flag if do_pooling: query = pooled_hidden key = value = hidden if self.config.pool_q_only else pooled_hidden else: query = key = value = hidden layer_output = layer(query, key, value, attention_inputs, output_attentions=output_attentions) hidden = layer_output[0] if do_pooling: attention_inputs = self.attention_structure.post_attention_pooling(attention_inputs) if output_attentions: all_attentions = all_attentions + layer_output[1:] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden,) if not return_dict: return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions) def upsample(x, stride, target_len, separate_cls=True, truncate_seq=False): """Upsample tensor `x` to match `target_len` by repeating the tokens `stride` time on the sequence length dimension.""" if stride == 1: return x if separate_cls: cls = x[:, :1] x = x[:, 1:] output = torch.repeat_interleave(x, repeats=stride, dim=1) if separate_cls: if truncate_seq: output = nn.functional.pad(output, (0, 0, 0, stride - 1, 0, 0)) output = output[:, : target_len - 1] output = torch.cat([cls, output], dim=1) else: output = output[:, :target_len] return output class FunnelDecoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.attention_structure = FunnelAttentionStructure(config) self.layers = nn.ModuleList([FunnelLayer(config, 0) for _ in range(config.num_decoder_layers)]) def forward( self, final_hidden, first_block_hidden, attention_mask=None, token_type_ids=None, output_attentions=False, output_hidden_states=False, return_dict=False, ): upsampled_hidden = upsample( final_hidden, stride=2 ** (len(self.config.block_sizes) - 1), target_len=first_block_hidden.shape[1], separate_cls=self.config.separate_cls, truncate_seq=self.config.truncate_seq, ) hidden = upsampled_hidden + first_block_hidden all_hidden_states = (hidden,) if output_hidden_states else None all_attentions = () if output_attentions else None attention_inputs = self.attention_structure.init_attention_inputs( hidden, attention_mask=attention_mask, token_type_ids=token_type_ids, ) for layer in self.layers: layer_output = layer(hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions) hidden = layer_output[0] if output_attentions: all_attentions = all_attentions + layer_output[1:] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden,) if not return_dict: return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions) class FunnelDiscriminatorPredictions(nn.Module): """Prediction module for the discriminator, made up of two dense layers.""" def __init__(self, config): super().__init__() self.config = config self.dense = nn.Linear(config.d_model, config.d_model) self.dense_prediction = nn.Linear(config.d_model, 1) def forward(self, discriminator_hidden_states): hidden_states = self.dense(discriminator_hidden_states) hidden_states = ACT2FN[self.config.hidden_act](hidden_states) logits = self.dense_prediction(hidden_states).squeeze() return logits class FunnelPreTrainedModel(PreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = FunnelConfig load_tf_weights = load_tf_weights_in_funnel base_model_prefix = "funnel" def _init_weights(self, module): classname = module.__class__.__name__ if classname.find("Linear") != -1: if getattr(module, "weight", None) is not None: if self.config.initializer_std is None: fan_out, fan_in = module.weight.shape std = np.sqrt(1.0 / float(fan_in + fan_out)) else: std = self.config.initializer_std nn.init.normal_(module.weight, std=std) if getattr(module, "bias", None) is not None: nn.init.constant_(module.bias, 0.0) elif classname == "FunnelRelMultiheadAttention": nn.init.uniform_(module.r_w_bias, b=self.config.initializer_range) nn.init.uniform_(module.r_r_bias, b=self.config.initializer_range) nn.init.uniform_(module.r_kernel, b=self.config.initializer_range) nn.init.uniform_(module.r_s_bias, b=self.config.initializer_range) nn.init.uniform_(module.seg_embed, b=self.config.initializer_range) elif classname == "FunnelEmbeddings": std = 1.0 if self.config.initializer_std is None else self.config.initializer_std nn.init.normal_(module.word_embeddings.weight, std=std) class FunnelClassificationHead(nn.Module): def __init__(self, config, n_labels): super().__init__() self.linear_hidden = nn.Linear(config.d_model, config.d_model) self.dropout = nn.Dropout(config.hidden_dropout) self.linear_out = nn.Linear(config.d_model, n_labels) def forward(self, hidden): hidden = self.linear_hidden(hidden) hidden = torch.tanh(hidden) hidden = self.dropout(hidden) return self.linear_out(hidden) @dataclass class FunnelForPreTrainingOutput(ModelOutput): """ Output type of :class:`~transformers.FunnelForPreTrainingModel`. Args: loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): Total loss of the ELECTRA-style objective. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Prediction scores of the head (scores for each token before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None FUNNEL_START_DOCSTRING = r""" The Funnel Transformer model was proposed in `Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing <https://arxiv.org/abs/2006.03236>`__ by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le. This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.FunnelConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ FUNNEL_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( """ The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called decoder) or any task-specific head on top.""", FUNNEL_START_DOCSTRING, ) class FunnelBaseModel(FunnelPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = FunnelEmbeddings(config) self.encoder = FunnelEncoder(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small-base", output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # TODO: deal with head_mask if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) encoder_outputs = self.encoder( inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs @add_start_docstrings( "The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.", FUNNEL_START_DOCSTRING, ) class FunnelModel(FunnelPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = FunnelEmbeddings(config) self.encoder = FunnelEncoder(config) self.decoder = FunnelDecoder(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small", output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # TODO: deal with head_mask if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) encoder_outputs = self.encoder( inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict, ) decoder_outputs = self.decoder( final_hidden=encoder_outputs[0], first_block_hidden=encoder_outputs[1][self.config.block_sizes[0]], attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: idx = 0 outputs = (decoder_outputs[0],) if output_hidden_states: idx += 1 outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],) if output_attentions: idx += 1 outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],) return outputs return BaseModelOutput( last_hidden_state=decoder_outputs[0], hidden_states=(encoder_outputs.hidden_states + decoder_outputs.hidden_states) if output_hidden_states else None, attentions=(encoder_outputs.attentions + decoder_outputs.attentions) if output_attentions else None, ) add_start_docstrings( """ Funnel Transformer model with a binary classification head on top as used during pretraining for identifying generated tokens.""", FUNNEL_START_DOCSTRING, ) class FunnelForPreTraining(FunnelPreTrainedModel): def __init__(self, config): super().__init__(config) self.funnel = FunnelModel(config) self.discriminator_predictions = FunnelDiscriminatorPredictions(config) self.init_weights() @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=FunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`): Labels for computing the ELECTRA-style loss. Input should be a sequence of tokens (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``: - 0 indicates the token is an original token, - 1 indicates the token was replaced. Returns: Examples:: >>> from transformers import FunnelTokenizer, FunnelForPreTraining >>> import torch >>> tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small') >>> model = FunnelForPreTraining.from_pretrained('funnel-transformer/small', return_dict=True) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors= "pt") >>> logits = model(**inputs).logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict discriminator_hidden_states = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) discriminator_sequence_output = discriminator_hidden_states[0] logits = self.discriminator_predictions(discriminator_sequence_output) loss = None if labels is not None: loss_fct = nn.BCEWithLogitsLoss() if attention_mask is not None: active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1 active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss] active_labels = labels[active_loss] loss = loss_fct(active_logits, active_labels.float()) else: loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float()) if not return_dict: output = (logits,) + discriminator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return FunnelForPreTrainingOutput( loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, ) @add_start_docstrings("""Funnel Transformer Model with a `language modeling` head on top. """, FUNNEL_START_DOCSTRING) class FunnelForMaskedLM(FunnelPreTrainedModel): def __init__(self, config): super().__init__(config) self.funnel = FunnelModel(config) self.lm_head = nn.Linear(config.d_model, config.vocab_size) self.init_weights() def get_output_embeddings(self): return self.lm_head @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small", output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="<mask>", ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] prediction_logits = self.lm_head(last_hidden_state) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Funnel Transfprmer Model with a sequence classification/regression head on top (two linear layer on top of the first timestep of the last hidden state) e.g. for GLUE tasks. """, FUNNEL_START_DOCSTRING, ) class FunnelForSequenceClassification(FunnelPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.funnel = FunnelBaseModel(config) self.classifier = FunnelClassificationHead(config, config.num_labels) self.init_weights() @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small-base", output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] pooled_output = last_hidden_state[:, 0] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Funnel Transformer Model with a multiple choice classification head on top (two linear layer on top of the first timestep of the last hidden state, and a softmax) e.g. for RocStories/SWAG tasks. """, FUNNEL_START_DOCSTRING, ) class FunnelForMultipleChoice(FunnelPreTrainedModel): def __init__(self, config): super().__init__(config) self.funnel = FunnelBaseModel(config) self.classifier = FunnelClassificationHead(config, 1) self.init_weights() @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small-base", output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] pooled_output = last_hidden_state[:, 0] logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Funnel Transformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, FUNNEL_START_DOCSTRING, ) class FunnelForTokenClassification(FunnelPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.funnel = FunnelModel(config) self.dropout = nn.Dropout(config.hidden_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small", output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] last_hidden_state = self.dropout(last_hidden_state) logits = self.classifier(last_hidden_state) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Funnel Transformer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).""", FUNNEL_START_DOCSTRING, ) class FunnelForQuestionAnswering(FunnelPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.funnel = FunnelModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small", output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] logits = self.qa_outputs(last_hidden_state) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
65,709
41.284427
168
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_herbert.py
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .tokenization_bert import BasicTokenizer from .tokenization_xlm import XLMTokenizer from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": {"allegro/herbert-base-cased": "https://cdn.huggingface.co/allegro/herbert-base-cased/vocab.json"}, "merges_file": {"allegro/herbert-base-cased": "https://cdn.huggingface.co/allegro/herbert-base-cased/merges.txt"}, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"allegro/herbert-base-cased": 514} PRETRAINED_INIT_CONFIGURATION = {} class HerbertTokenizer(XLMTokenizer): """ Construct a BPE tokenizer for HerBERT. Peculiarities: - uses BERT's pre-tokenizer: BaseTokenizer splits tokens on spaces, and also on punctuation. Each occurence of a punctuation character will be treated separately. - Such pretokenized input is BPE subtokenized This tokenizer inherits from :class:`~transformers.XLMTokenizer` which contains most of the methods. Users should refer to the superclass for more information regarding methods. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, **kwargs): kwargs["cls_token"] = "<s>" kwargs["unk_token"] = "<unk>" kwargs["pad_token"] = "<pad>" kwargs["mask_token"] = "<mask>" kwargs["sep_token"] = "</s>" kwargs["do_lowercase_and_remove_accent"] = False kwargs["additional_special_tokens"] = [] super().__init__(**kwargs) self.bert_pre_tokenizer = BasicTokenizer( do_lower_case=False, never_split=self.all_special_tokens, tokenize_chinese_chars=False, strip_accents=False ) def _tokenize(self, text): pre_tokens = self.bert_pre_tokenizer.tokenize(text) split_tokens = [] for token in pre_tokens: if token: split_tokens.extend([t for t in self.bpe(token).split(" ")]) return split_tokens
2,898
34.353659
119
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_reformer_fast.py
# coding=utf-8 # Copyright 2020 The Trax Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model Reformer.""" import os from shutil import copyfile from typing import Optional, Tuple from .file_utils import is_sentencepiece_available from .tokenization_utils_fast import PreTrainedTokenizerFast from .utils import logging if is_sentencepiece_available(): from .tokenization_reformer import ReformerTokenizer else: ReformerTokenizer = None logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" #################################################### # Mapping from the keyword arguments names of Tokenizer `__init__` # to file names for serializing Tokenizer instances #################################################### VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} #################################################### # Mapping from the keyword arguments names of Tokenizer `__init__` # to pretrained vocabulary URL for all the model shortcut names. #################################################### PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/reformer-crime-and-punishment": "https://cdn.huggingface.co/google/reformer-crime-and-punishment/spiece.model" }, "tokenizer_file": { "google/reformer-crime-and-punishment": "https://cdn.huggingface.co/google/reformer-crime-and-punishment/tokenizer.json" }, } #################################################### # Mapping from model shortcut names to max length of inputs #################################################### PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/reformer-crime-and-punishment": 524288, } class ReformerTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" Reformer tokenizer (backed by HuggingFace's `tokenizers` library). Based on `SentencePiece <https://github.com/google/sentencepiece>`__ . This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the :obj:`sep_token`. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`): The token used for padding, for example when batching sequences of different lengths. additional_special_tokens (:obj:`List[str]`, `optional`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["attention_mask"] slow_tokenizer_class = ReformerTokenizer def __init__( self, vocab_file, tokenizer_file=None, eos_token="</s>", unk_token="<unk>", pad_token="<pad>", additional_special_tokens=[], **kwargs ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, additional_special_tokens=additional_special_tokens, **kwargs, ) self.vocab_file = vocab_file def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
5,158
37.789474
128
py
SLT-FAI
SLT-FAI-main/transformers/modeling_t5.py
# coding=utf-8 # Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch T5 model. """ import copy import math import os import warnings import torch import torch.nn.functional as F from torch import nn from torch.nn import CrossEntropyLoss from .configuration_t5 import T5Config from .file_utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings, ) from .modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "T5Config" _TOKENIZER_FOR_DOC = "T5Tokenizer" #################################################### # This dict contrains shortcut names and associated url # for the pretrained weights provided with the models #################################################### T5_PRETRAINED_MODEL_ARCHIVE_LIST = [ "t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", # See all T5 models at https://huggingface.co/models?filter=t5 ] #################################################### # This is a conversion method from TF 1.0 to PyTorch # More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28 #################################################### def load_tf_weights_in_t5(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] tf_weights = {} for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) tf_weights[name] = array for txt_name in names: name = txt_name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info("Skipping {}".format("/".join(name))) tf_weights.pop(txt_name, None) continue if "_slot_" in name[-1]: logger.info("Skipping {}".format("/".join(name))) tf_weights.pop(txt_name, None) continue pointer = model array = tf_weights[txt_name] for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] in ["kernel", "scale", "embedding"]: pointer = getattr(pointer, "weight") # elif scope_names[0] == 'scale': # pointer = getattr(pointer, 'weight') # elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta': # pointer = getattr(pointer, 'bias') # elif scope_names[0] == 'squad': # pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if scope_names[0] not in ["kernel", "scale", "embedding"]: pointer = getattr(pointer, "weight") if scope_names[0] != "embedding": logger.info("Transposing numpy weight of shape {} for {}".format(array.shape, name)) array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array.astype(np.float32)) tf_weights.pop(txt_name, None) logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys()))) # logger.info("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys()))) return model #################################################### # PyTorch Models are constructed by sub-classing # - torch.nn.Module for the layers and # - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module) #################################################### class T5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """Construct a layernorm module in the T5 style No bias and no substraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, x): # layer norm should always be calculated in float32 variance = x.to(torch.float32).pow(2).mean(-1, keepdim=True) x = x / torch.sqrt(variance + self.variance_epsilon) if self.weight.dtype == torch.float16: x = x.to(torch.float16) return self.weight * x class T5DenseReluDense(nn.Module): def __init__(self, config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): h = self.wi(hidden_states) h = F.relu(h) h = self.dropout(h) h = self.wo(h) return h class T5LayerFF(nn.Module): def __init__(self, config): super().__init__() self.DenseReluDense = T5DenseReluDense(config) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): norm_x = self.layer_norm(hidden_states) y = self.DenseReluDense(norm_x) layer_output = hidden_states + self.dropout(y) return layer_output class T5Attention(nn.Module): def __init__(self, config: T5Config, has_relative_attention_bias=False, is_bidirectional=False): super().__init__() self.is_bidirectional = is_bidirectional self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.d_model = config.d_model self.d_kv = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.d_kv # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.d_kv, self.pruned_heads) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.d_kv * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on. Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ ret = 0 n = -relative_position if bidirectional: num_buckets //= 2 ret += (n < 0).to(torch.long) * num_buckets # mtf.to_int32(mtf.less(n, 0)) * num_buckets n = torch.abs(n) else: n = torch.max(n, torch.zeros_like(n)) # now n is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = n < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret def compute_bias(self, qlen, klen): """ Compute binned relative position bias """ context_position = torch.arange(qlen, dtype=torch.long)[:, None] memory_position = torch.arange(klen, dtype=torch.long)[None, :] relative_position = memory_position - context_position # shape (qlen, klen) rp_bucket = self._relative_position_bucket( relative_position, # shape (qlen, klen) bidirectional=self.is_bidirectional, num_buckets=self.relative_attention_num_buckets, ) rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device) values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, qlen, klen) return values def forward( self, input, mask=None, kv=None, position_bias=None, past_key_value=None, head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if kv is None) or attention over source sentence (provided by kv). """ # Input is (bs, qlen, dim) # Mask is (bs, klen) (non-causal) or (bs, klen, klen) # past_key_value[0] is (bs, n_heads, q_len - 1, dim_per_head) bs, qlen, dim = input.size() if past_key_value is not None: assert self.is_decoder is True, "Encoder cannot cache past key value states" assert ( len(past_key_value) == 2 ), "past_key_value should have 2 past states: keys and values. Got {} past states".format( len(past_key_value) ) real_qlen = qlen + past_key_value[0].shape[2] if query_length is None else query_length else: real_qlen = qlen if kv is None: klen = real_qlen else: klen = kv.size(1) def shape(x): """ projection """ return x.view(bs, -1, self.n_heads, self.d_kv).transpose(1, 2) def unshape(x): """ compute context """ return x.transpose(1, 2).contiguous().view(bs, -1, self.inner_dim) q = shape(self.q(input)) # (bs, n_heads, qlen, dim_per_head) if kv is None: k = shape(self.k(input)) # (bs, n_heads, qlen, dim_per_head) v = shape(self.v(input)) # (bs, n_heads, qlen, dim_per_head) elif past_key_value is None: k = v = kv k = shape(self.k(k)) # (bs, n_heads, qlen, dim_per_head) v = shape(self.v(v)) # (bs, n_heads, qlen, dim_per_head) if past_key_value is not None: if kv is None: k_, v_ = past_key_value k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head) v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head) else: k, v = past_key_value if self.is_decoder and use_cache is True: present_key_value_state = ((k, v),) else: present_key_value_state = (None,) # (bs, n_heads, qlen, klen) scores = torch.matmul( q, k.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", q, k), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: raise ValueError("No position_bias provided and no weights to compute position_bias") position_bias = self.compute_bias(real_qlen, klen) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -qlen:, :] if mask is not None: position_bias = position_bias + mask # (bs, n_heads, qlen, klen) scores += position_bias weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen) weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen) # Mask heads if we want to if head_mask is not None: weights = weights * head_mask context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head) context = unshape(context) # (bs, qlen, dim) context = self.o(context) outputs = (context,) + present_key_value_state if output_attentions: outputs = outputs + (weights,) if self.has_relative_attention_bias: outputs = outputs + (position_bias,) return outputs class T5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = T5Attention( config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=not config.is_decoder ) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): norm_x = self.layer_norm(hidden_states) attention_output = self.SelfAttention( norm_x, mask=attention_mask, position_bias=position_bias, head_mask=head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) y = attention_output[0] layer_output = hidden_states + self.dropout(y) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class T5LayerCrossAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.EncDecAttention = T5Attention( config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=True ) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, kv, attention_mask=None, position_bias=None, head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): norm_x = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( norm_x, mask=attention_mask, kv=kv, position_bias=position_bias, head_mask=head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) y = attention_output[0] layer_output = hidden_states + self.dropout(y) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class T5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(T5LayerCrossAttention(config, has_relative_attention_bias=has_relative_attention_bias)) self.layer.append(T5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): if past_key_value is not None: assert self.is_decoder, "Only decoder can use `past_key_values`" expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 error_message = "There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states".format( expected_num_past_key_values, "2 (past / key) for cross attention" if expected_num_past_key_values == 4 else "", len(past_key_value), ) assert len(past_key_value) == expected_num_past_key_values, error_message self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, head_mask=head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights if self.is_decoder and encoder_hidden_states is not None: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, kv=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, head_mask=head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) outputs = (hidden_states,) # Add attentions if we output them outputs = outputs + (present_key_value_state,) + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias) class T5PreTrainedModel(PreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = T5Config load_tf_weights = load_tf_weights_in_t5 base_model_prefix = "transformer" @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """ Initialize the weights """ factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, T5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (T5Model, T5ForConditionalGeneration)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, T5DenseReluDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5Attention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model d_kv = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * d_kv) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * d_kv) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert ( decoder_start_token_id is not None ), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information" # shift inputs to the right shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values" return shifted_input_ids class T5Stack(T5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList( [T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.init_weights() def get_input_embeddings(self): return self.embed_tokens def get_output_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds") if inputs_embeds is None: assert self.embed_tokens is not None, "You have to intialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is True: assert self.is_decoder, ":obj:`use_cache` can only be set to `True` if {} is used as a decoder".format( self ) if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device) if self.is_decoder and encoder_attention_mask is not None: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, head_mask=head_mask[i], past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias) hidden_states, present_key_value_state = layer_outputs[:2] if i == 0: # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias) position_bias = layer_outputs[3 if output_attentions else 2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[5 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) # We keep only self-attention weights for now hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, present_key_value_states, all_hidden_states, all_attentions] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, ) T5_START_DOCSTRING = r""" The T5 model was proposed in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer <https://arxiv.org/abs/1910.10683>`__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting. This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ T5_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using :class:`~transformers.T5Tokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for detail. To know more on how to prepare :obj:`input_ids` for pretraining take a look a `T5 Training <./t5.html#training>`__. attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Provide for sequence to sequence training. T5 uses the :obj:`pad_token_id` as the starting token for :obj:`decoder_input_ids` generation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see :obj:`past_key_values`). To know more on how to prepare :obj:`decoder_input_ids` for pretraining take a look at `T5 Training <./t5.html#training>`__. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_input_ids` takes the value of :obj:`input_ids`. decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`): Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will also be used by default. encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`): Tuple consists of (:obj:`last_hidden_state`, :obj:`optional`: `hidden_states`, :obj:`optional`: `attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds` have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert :obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds` takes the value of :obj:`inputs_embeds`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare T5 Model transformer outputting raw hidden-states" "without any specific head on top.", T5_START_DOCSTRING, ) class T5Model(T5PreTrainedModel): def __init__(self, config: T5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) self.init_weights() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_callable(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, past_key_values=None, head_mask=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): r""" Returns: Example:: >>> from transformers import T5Tokenizer, T5Model >>> tokenizer = T5Tokenizer.from_pretrained('t5-small') >>> model = T5Model.from_pretrained('t5-small') >>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids, return_dict=True) >>> last_hidden_states = outputs.last_hidden_state """ if "decoder_past_key_value_states" in kwargs: warnings.warn( "The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) past_key_values = kwargs.pop("decoder_past_key_value_states") if "decoder_past_key_values" in kwargs: warnings.warn( "The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) past_key_values = kwargs.pop("decoder_past_key_values") assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}." use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings("""T5 Model with a `language modeling` head on top. """, T5_START_DOCSTRING) class T5ForConditionalGeneration(T5PreTrainedModel): authorized_missing_keys = [r"encoder\.embed_tokens\.weight", r"decoder\.embed_tokens\.weight", r"lm_head\.weight"] def __init__(self, config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.init_weights() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_callable(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, past_key_values=None, head_mask=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ..., config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): Used to hide legacy arguments that have been deprecated. Returns: Examples:: >>> from transformers import T5Tokenizer, T5ForConditionalGeneration >>> tokenizer = T5Tokenizer.from_pretrained('t5-small') >>> model = T5ForConditionalGeneration.from_pretrained('t5-small', return_dict=True) >>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you ", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model.generate(input_ids) """ if "lm_labels" in kwargs: warnings.warn( "The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.", FutureWarning, ) labels = kwargs.pop("lm_labels") if "decoder_past_key_value_states" in kwargs: warnings.warn( "The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) past_key_values = kwargs.pop("decoder_past_key_value_states") if "decoder_past_key_values" in kwargs: warnings.warn( "The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) past_key_values = kwargs.pop("decoder_past_key_values") assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}." use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # If decoding with past key value states, only the last tokens # should be given as an input if past_key_values is not None: assert labels is None, "Decoder should not use cached key value states when training." if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids[:, -1:] if decoder_inputs_embeds is not None: decoder_inputs_embeds = decoder_inputs_embeds[:, -1:] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim ** -0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs): # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return { "decoder_input_ids": input_ids, "past_key_values": past, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "use_cache": use_cache, } def _reorder_cache(self, past, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past reordered_decoder_past = () for layer_past_states in past: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past
56,316
43.032056
213
py
SLT-FAI
SLT-FAI-main/transformers/modeling_tf_electra.py
import warnings from dataclasses import dataclass from typing import Optional, Tuple import tensorflow as tf from .activations_tf import get_tf_activation from .configuration_electra import ElectraConfig from .file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings, ) from .modeling_tf_outputs import ( TFBaseModelOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from .modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFTokenClassificationLoss, get_initializer, keras_serializable, shape_list, ) from .tokenization_utils import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "ElectraConfig" _TOKENIZER_FOR_DOC = "ElectraTokenizer" TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/electra-small-generator", "google/electra-base-generator", "google/electra-large-generator", "google/electra-small-discriminator", "google/electra-base-discriminator", "google/electra-large-discriminator", # See all ELECTRA models at https://huggingface.co/models?filter=electra ] # Copied from transformers.modeling_tf_bert.TFBertSelfAttention class TFElectraSelfAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x, batch_size): x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul( query_layer, key_layer, transpose_b=True ) # (batch size, num_heads, seq_len_q, seq_len_k) dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = tf.nn.softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape( context_layer, (batch_size, -1, self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.modeling_tf_bert.TFBertSelfOutput class TFElectraSelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from from transformers.modeling_tf_bert.TFBertAttention with Bert->Electra class TFElectraAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.self_attention = TFElectraSelfAttention(config, name="self") self.dense_output = TFElectraSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False): self_outputs = self.self_attention( input_tensor, attention_mask, head_mask, output_attentions, training=training ) attention_output = self.dense_output(self_outputs[0], input_tensor, training=training) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.modeling_tf_bert.TFBertIntermediate class TFElectraIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.modeling_tf_bert.TFBertOutput class TFElectraOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.modeling_tf_bert.TFBertLayer with Bert->Electra class TFElectraLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.attention = TFElectraAttention(config, name="attention") self.intermediate = TFElectraIntermediate(config, name="intermediate") self.bert_output = TFElectraOutput(config, name="output") def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions, training=training ) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.bert_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.modeling_tf_bert.TFBertEncoder with Bert->Electra class TFElectraEncoder(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.layer = [TFElectraLayer(config, name="layer_._{}".format(i)) for i in range(config.num_hidden_layers)] def call( self, hidden_states, attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], output_attentions, training=training ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) # Copied from transformers.modeling_tf_bert.TFBertPooler class TFElectraPooler(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) def call(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) return pooled_output class TFElectraEmbeddings(tf.keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.embedding_size = config.embedding_size self.initializer_range = config.initializer_range self.position_embeddings = tf.keras.layers.Embedding( config.max_position_embeddings, config.embedding_size, embeddings_initializer=get_initializer(self.initializer_range), name="position_embeddings", ) self.token_type_embeddings = tf.keras.layers.Embedding( config.type_vocab_size, config.embedding_size, embeddings_initializer=get_initializer(self.initializer_range), name="token_type_embeddings", ) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def build(self, input_shape): """Build shared word embedding layer """ with tf.name_scope("word_embeddings"): # Create and initialize weights. The random normal initializer was chosen # arbitrarily, and works well. self.word_embeddings = self.add_weight( "weight", shape=[self.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) super().build(input_shape) # Copied from transformers.modeling_tf_bert.TFBertEmbeddings.call def call( self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, mode="embedding", training=False, ): """Get token embeddings of inputs. Args: inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids) mode: string, a valid value is one of "embedding" and "linear". Returns: outputs: (1) If mode == "embedding", output embedding tensor, float32 with shape [batch_size, length, embedding_size]; (2) mode == "linear", output linear tensor, float32 with shape [batch_size, length, vocab_size]. Raises: ValueError: if mode is not valid. Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ if mode == "embedding": return self._embedding(input_ids, position_ids, token_type_ids, inputs_embeds, training=training) elif mode == "linear": return self._linear(input_ids) else: raise ValueError("mode {} is not valid.".format(mode)) # Copied from transformers.modeling_tf_bert.TFBertEmbeddings._embedding def _embedding(self, input_ids, position_ids, token_type_ids, inputs_embeds, training=False): """Applies embedding based on inputs tensor.""" assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :] if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) if inputs_embeds is None: inputs_embeds = tf.gather(self.word_embeddings, input_ids) position_embeddings = tf.cast(self.position_embeddings(position_ids), inputs_embeds.dtype) token_type_embeddings = tf.cast(self.token_type_embeddings(token_type_ids), inputs_embeds.dtype) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings, training=training) return embeddings def _linear(self, inputs): """Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [batch_size, length, hidden_size] Returns: float32 tensor with shape [batch_size, length, vocab_size]. """ batch_size = shape_list(inputs)[0] length = shape_list(inputs)[1] x = tf.reshape(inputs, [-1, self.embedding_size]) logits = tf.matmul(x, self.word_embeddings, transpose_b=True) return tf.reshape(logits, [batch_size, length, self.vocab_size]) class TFElectraDiscriminatorPredictions(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense(config.hidden_size, name="dense") self.dense_prediction = tf.keras.layers.Dense(1, name="dense_prediction") self.config = config def call(self, discriminator_hidden_states, training=False): hidden_states = self.dense(discriminator_hidden_states) hidden_states = get_tf_activation(self.config.hidden_act)(hidden_states) logits = tf.squeeze(self.dense_prediction(hidden_states)) return logits class TFElectraGeneratorPredictions(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dense = tf.keras.layers.Dense(config.embedding_size, name="dense") def call(self, generator_hidden_states, training=False): hidden_states = self.dense(generator_hidden_states) hidden_states = get_tf_activation("gelu")(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class TFElectraPreTrainedModel(TFPreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ElectraConfig base_model_prefix = "electra" @keras_serializable class TFElectraMainLayer(tf.keras.layers.Layer): config_class = ElectraConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.embeddings = TFElectraEmbeddings(config, name="embeddings") if config.embedding_size != config.hidden_size: self.embeddings_project = tf.keras.layers.Dense(config.hidden_size, name="embeddings_project") self.encoder = TFElectraEncoder(config, name="encoder") self.config = config def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value self.embeddings.vocab_size = value.shape[0] def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError def get_extended_attention_mask(self, attention_mask, input_shape, dtype): if attention_mask is None: attention_mask = tf.fill(input_shape, 1) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def get_head_mask(self, head_mask): if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers return head_mask def call( self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds output_attentions = inputs[6] if len(inputs) > 6 else output_attentions output_hidden_states = inputs[7] if len(inputs) > 7 else output_hidden_states return_dict = inputs[8] if len(inputs) > 8 else return_dict assert len(inputs) <= 9, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) token_type_ids = inputs.get("token_type_ids", token_type_ids) position_ids = inputs.get("position_ids", position_ids) head_mask = inputs.get("head_mask", head_mask) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) assert len(inputs) <= 9, "Too many inputs." else: input_ids = inputs output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) hidden_states = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, hidden_states.dtype) head_mask = self.get_head_mask(head_mask) if hasattr(self, "embeddings_project"): hidden_states = self.embeddings_project(hidden_states, training=training) hidden_states = self.encoder( hidden_states, extended_attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=training, ) return hidden_states @dataclass class TFElectraForPreTrainingOutput(ModelOutput): """ Output type of :class:`~transformers.TFElectraForPreTrainingModel`. Args: loss (`optional`, returned when ``labels`` is provided, ``tf.Tensor`` of shape :obj:`(1,)`): Total loss of the ELECTRA objective. logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): Prediction scores of the head (scores for each token before SoftMax). hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None ELECTRA_START_DOCSTRING = r""" This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. note:: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all the tensors in the first argument of the model call function: :obj:`model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: :obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Parameters: config (:class:`~transformers.ElectraConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ ELECTRA_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.ElectraTokenizer`. See :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`__ head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to " "the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the " "hidden size and embedding size are different." "" "Both the generator and discriminator checkpoints may be loaded into this model.", ELECTRA_START_DOCSTRING, ) class TFElectraModel(TFElectraPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.electra = TFElectraMainLayer(config, name="electra") @add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/electra-small-discriminator", output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def call(self, inputs, **kwargs): outputs = self.electra(inputs, **kwargs) return outputs @add_start_docstrings( """Electra model with a binary classification head on top as used during pre-training for identifying generated tokens. Even though both the discriminator and generator may be loaded into this model, the discriminator is the only model of the two to have the correct classification head to be used for this model.""", ELECTRA_START_DOCSTRING, ) class TFElectraForPreTraining(TFElectraPreTrainedModel): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.electra = TFElectraMainLayer(config, name="electra") self.discriminator_predictions = TFElectraDiscriminatorPredictions(config, name="discriminator_predictions") @add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): r""" Returns: Examples:: >>> import tensorflow as tf >>> from transformers import ElectraTokenizer, TFElectraForPreTraining >>> tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') >>> model = TFElectraForPreTraining.from_pretrained('google/electra-small-discriminator') >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 >>> outputs = model(input_ids) >>> scores = outputs[0] """ return_dict = return_dict if return_dict is not None else self.electra.config.return_dict if inputs is None and "input_ids" in kwargs and isinstance(kwargs["input_ids"], (dict, BatchEncoding)): warnings.warn( "Using `input_ids` as a dictionary keyword argument is deprecated. Please use `inputs` instead." ) inputs = kwargs["input_ids"] discriminator_hidden_states = self.electra( inputs, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) discriminator_sequence_output = discriminator_hidden_states[0] logits = self.discriminator_predictions(discriminator_sequence_output) if not return_dict: return (logits,) + discriminator_hidden_states[1:] return TFElectraForPreTrainingOutput( logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, ) class TFElectraMaskedLMHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def call(self, hidden_states, training=False): hidden_states = self.input_embeddings(hidden_states, mode="linear") hidden_states = hidden_states + self.bias return hidden_states @add_start_docstrings( """Electra model with a language modeling head on top. Even though both the discriminator and generator may be loaded into this model, the generator is the only model of the two to have been trained for the masked language modeling task.""", ELECTRA_START_DOCSTRING, ) class TFElectraForMaskedLM(TFElectraPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.vocab_size = config.vocab_size self.electra = TFElectraMainLayer(config, name="electra") self.generator_predictions = TFElectraGeneratorPredictions(config, name="generator_predictions") if isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act self.generator_lm_head = TFElectraMaskedLMHead(config, self.electra.embeddings, name="generator_lm_head") def get_output_embeddings(self): return self.generator_lm_head @add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/electra-small-generator", output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.electra.config.return_dict if inputs is None and "input_ids" in kwargs and isinstance(kwargs["input_ids"], (dict, BatchEncoding)): warnings.warn( "Using `input_ids` as a dictionary keyword argument is deprecated. Please use `inputs` instead." ) inputs = kwargs["input_ids"] if isinstance(inputs, (tuple, list)): labels = inputs[9] if len(inputs) > 9 else labels if len(inputs) > 9: inputs = inputs[:9] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) generator_hidden_states = self.electra( inputs, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) generator_sequence_output = generator_hidden_states[0] prediction_scores = self.generator_predictions(generator_sequence_output, training=training) prediction_scores = self.generator_lm_head(prediction_scores, training=training) loss = None if labels is None else self.compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + generator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_states, attentions=generator_hidden_states.attentions, ) class TFElectraClassificationHead(tf.keras.layers.Layer): """Head for sentence-level classification tasks.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.out_proj = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj" ) def call(self, inputs, **kwargs): x = inputs[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = get_tf_activation("gelu")(x) # although BERT uses tanh here, it seems Electra authors used gelu here x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ELECTRA_START_DOCSTRING, ) class TFElectraForSequenceClassification(TFElectraPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.electra = TFElectraMainLayer(config, name="electra") self.classifier = TFElectraClassificationHead(config, name="classifier") @add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/electra-small-discriminator", output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.electra.config.return_dict if inputs is None and "input_ids" in kwargs and isinstance(kwargs["input_ids"], (dict, BatchEncoding)): warnings.warn( "Using `input_ids` as a dictionary keyword argument is deprecated. Please use `inputs` instead." ) inputs = kwargs["input_ids"] if isinstance(inputs, (tuple, list)): labels = inputs[9] if len(inputs) > 9 else labels if len(inputs) > 9: inputs = inputs[:9] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) outputs = self.electra( inputs, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) logits = self.classifier(outputs[0]) loss = None if labels is None else self.compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ELECTRA_START_DOCSTRING, ) class TFElectraForMultipleChoice(TFElectraPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.electra = TFElectraMainLayer(config, name="electra") self.sequence_summary = TFSequenceSummary( config, initializer_range=config.initializer_range, name="sequence_summary" ) self.classifier = tf.keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @property def dummy_inputs(self): """Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)} @add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/electra-small-discriminator", output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds output_attentions = inputs[6] if len(inputs) > 6 else output_attentions output_hidden_states = inputs[7] if len(inputs) > 7 else output_hidden_states return_dict = inputs[8] if len(inputs) > 8 else return_dict labels = inputs[9] if len(inputs) > 9 else labels assert len(inputs) <= 10, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) token_type_ids = inputs.get("token_type_ids", token_type_ids) position_ids = inputs.get("position_ids", position_ids) head_mask = inputs.get("head_mask", head_mask) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) labels = inputs.get("labels", labels) assert len(inputs) <= 10, "Too many inputs." else: input_ids = inputs return_dict = return_dict if return_dict is not None else self.electra.config.return_dict if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) outputs = self.electra( flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, flat_inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) logits = self.sequence_summary(outputs[0]) logits = self.classifier(logits) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Electra model with a token classification head on top. Both the discriminator and generator may be loaded into this model.""", ELECTRA_START_DOCSTRING, ) class TFElectraForTokenClassification(TFElectraPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.electra = TFElectraMainLayer(config, name="electra") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/electra-small-discriminator", output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.electra.config.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[9] if len(inputs) > 9 else labels if len(inputs) > 9: inputs = inputs[:9] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) discriminator_hidden_states = self.electra( inputs, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) discriminator_sequence_output = discriminator_hidden_states[0] discriminator_sequence_output = self.dropout(discriminator_sequence_output) logits = self.classifier(discriminator_sequence_output) loss = None if labels is None else self.compute_loss(labels, logits) if not return_dict: output = (logits,) + discriminator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, ) @add_start_docstrings( """Electra Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ELECTRA_START_DOCSTRING, ) class TFElectraForQuestionAnswering(TFElectraPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.electra = TFElectraMainLayer(config, name="electra") self.qa_outputs = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) @add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/electra-small-discriminator", output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, start_positions=None, end_positions=None, training=False, ): r""" start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.electra.config.return_dict if isinstance(inputs, (tuple, list)): start_positions = inputs[9] if len(inputs) > 9 else start_positions end_positions = inputs[10] if len(inputs) > 10 else end_positions if len(inputs) > 9: inputs = inputs[:9] elif isinstance(inputs, (dict, BatchEncoding)): start_positions = inputs.pop("start_positions", start_positions) end_positions = inputs.pop("end_positions", start_positions) discriminator_hidden_states = self.electra( inputs, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) discriminator_sequence_output = discriminator_hidden_states[0] logits = self.qa_outputs(discriminator_sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = ( start_logits, end_logits, ) + discriminator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, )
56,073
40.752792
160
py
SLT-FAI
SLT-FAI-main/transformers/modeling_tf_xlnet.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 XLNet model. """ from dataclasses import dataclass from typing import List, Optional, Tuple import tensorflow as tf from .activations_tf import get_tf_activation from .configuration_xlnet import XLNetConfig from .file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings, ) from .modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFSharedEmbeddings, TFTokenClassificationLoss, get_initializer, keras_serializable, shape_list, ) from .tokenization_utils import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "XLNetConfig" _TOKENIZER_FOR_DOC = "XLNetTokenizer" TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "xlnet-base-cased", "xlnet-large-cased", # See all XLNet models at https://huggingface.co/models?filter=xlnet ] class TFXLNetRelativeAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.d_model % config.n_head != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.d_model, config.n_head) ) self.n_head = config.n_head self.d_head = config.d_head self.d_model = config.d_model self.scale = 1 / (config.d_head ** 0.5) self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout) def build(self, input_shape): initializer = get_initializer(self.initializer_range) self.q = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="q" ) self.k = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="k" ) self.v = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="v" ) self.o = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="o" ) self.r = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="r" ) self.r_r_bias = self.add_weight( shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias" ) self.r_s_bias = self.add_weight( shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_s_bias" ) self.r_w_bias = self.add_weight( shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias" ) self.seg_embed = self.add_weight( shape=(2, self.n_head, self.d_head), initializer=initializer, trainable=True, name="seg_embed" ) super().build(input_shape) def prune_heads(self, heads): raise NotImplementedError def rel_shift(self, x, klen=-1): """perform relative shift to form the relative attention score.""" x_size = shape_list(x) x = tf.reshape(x, (x_size[1], x_size[0], x_size[2], x_size[3])) x = x[1:, ...] x = tf.reshape(x, (x_size[0], x_size[1] - 1, x_size[2], x_size[3])) x = x[:, 0:klen, :, :] # x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long)) return x def rel_attn_core( self, q_head, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask, head_mask, output_attentions, training=False ): """Core relative positional attention operations.""" # content based attention score ac = tf.einsum("ibnd,jbnd->ijbn", q_head + self.r_w_bias, k_head_h) # position based attention score bd = tf.einsum("ibnd,jbnd->ijbn", q_head + self.r_r_bias, k_head_r) bd = self.rel_shift(bd, klen=shape_list(ac)[1]) # segment based attention score if seg_mat is None: ef = 0 else: ef = tf.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed) ef = tf.einsum("ijbs,ibns->ijbn", seg_mat, ef) # merge attention scores and perform masking attn_score = (ac + bd + ef) * self.scale if attn_mask is not None: # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask if attn_mask.dtype == tf.float16: attn_score = attn_score - 65500 * attn_mask else: attn_score = attn_score - 1e30 * attn_mask # attention probability attn_prob = tf.nn.softmax(attn_score, axis=1) attn_prob = self.dropout(attn_prob, training=training) # Mask heads if we want to if head_mask is not None: attn_prob = attn_prob * head_mask # attention output attn_vec = tf.einsum("ijbn,jbnd->ibnd", attn_prob, v_head_h) if output_attentions: return attn_vec, attn_prob return attn_vec def post_attention(self, h, attn_vec, residual=True, training=False): """Post-attention processing.""" # post-attention projection (back to `d_model`) attn_out = tf.einsum("ibnd,hnd->ibh", attn_vec, self.o) attn_out = self.dropout(attn_out, training=training) if residual: attn_out = attn_out + h output = self.layer_norm(attn_out) return output def call( self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems, target_mapping, head_mask, output_attentions, training=False, ): if g is not None: # Two-stream attention with relative positional encoding. # content based attention score if mems is not None and len(shape_list(mems)) > 1: cat = tf.concat([mems, h], axis=0) else: cat = h # content-based key head k_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.k) # content-based value head v_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.v) # position-based key head k_head_r = tf.einsum("ibh,hnd->ibnd", r, self.r) # h-stream # content-stream query head q_head_h = tf.einsum("ibh,hnd->ibnd", h, self.q) # core attention ops attn_vec_h = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_h, head_mask, output_attentions, training=training, ) if output_attentions: attn_vec_h, attn_prob_h = attn_vec_h # post processing output_h = self.post_attention(h, attn_vec_h, training=training) # g-stream # query-stream query head q_head_g = tf.einsum("ibh,hnd->ibnd", g, self.q) # core attention ops if target_mapping is not None: q_head_g = tf.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping) attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_g, head_mask, output_attentions, training=training, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g attn_vec_g = tf.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping) else: attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_g, head_mask, output_attentions, training=training, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g # post processing output_g = self.post_attention(g, attn_vec_g, training=training) if output_attentions: attn_prob = attn_prob_h, attn_prob_g else: # Multi-head attention with relative positional encoding if mems is not None and len(shape_list(mems)) > 1: cat = tf.concat([mems, h], axis=0) else: cat = h # content heads q_head_h = tf.einsum("ibh,hnd->ibnd", h, self.q) k_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.k) v_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.v) # positional heads k_head_r = tf.einsum("ibh,hnd->ibnd", r, self.r) # core attention ops attn_vec = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_h, head_mask, output_attentions, training=training, ) if output_attentions: attn_vec, attn_prob = attn_vec # post processing output_h = self.post_attention(h, attn_vec, training=training) output_g = None outputs = (output_h, output_g) if output_attentions: outputs = outputs + (attn_prob,) return outputs class TFXLNetFeedForward(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.layer_1 = tf.keras.layers.Dense( config.d_inner, kernel_initializer=get_initializer(config.initializer_range), name="layer_1" ) self.layer_2 = tf.keras.layers.Dense( config.d_model, kernel_initializer=get_initializer(config.initializer_range), name="layer_2" ) self.dropout = tf.keras.layers.Dropout(config.dropout) if isinstance(config.ff_activation, str): self.activation_function = get_tf_activation(config.ff_activation) else: self.activation_function = config.ff_activation def call(self, inp, training=False): output = inp output = self.layer_1(output) output = self.activation_function(output) output = self.dropout(output, training=training) output = self.layer_2(output) output = self.dropout(output, training=training) output = self.layer_norm(output + inp) return output class TFXLNetLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.rel_attn = TFXLNetRelativeAttention(config, name="rel_attn") self.ff = TFXLNetFeedForward(config, name="ff") self.dropout = tf.keras.layers.Dropout(config.dropout) def call( self, output_h, output_g, non_tgt_mask, attn_mask, pos_emb, seg_mat, mems, target_mapping, head_mask, output_attentions, training=False, ): outputs = self.rel_attn( output_h, output_g, non_tgt_mask, attn_mask, pos_emb, seg_mat, mems, target_mapping, head_mask, output_attentions, training=training, ) output_h, output_g = outputs[:2] if output_g is not None: output_g = self.ff(output_g, training=training) output_h = self.ff(output_h, training=training) outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there return outputs class TFXLNetLMHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def call(self, hidden_states): hidden_states = self.input_embeddings(hidden_states, mode="linear") hidden_states = hidden_states + self.bias return hidden_states @keras_serializable class TFXLNetMainLayer(tf.keras.layers.Layer): config_class = XLNetConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.return_dict = config.return_dict self.mem_len = config.mem_len self.reuse_len = config.reuse_len self.d_model = config.d_model self.same_length = config.same_length self.attn_type = config.attn_type self.bi_data = config.bi_data self.clamp_len = config.clamp_len self.n_layer = config.n_layer self.use_bfloat16 = config.use_bfloat16 self.initializer_range = config.initializer_range self.word_embedding = TFSharedEmbeddings( config.vocab_size, config.d_model, initializer_range=config.initializer_range, name="word_embedding" ) self.layer = [TFXLNetLayer(config, name="layer_._{}".format(i)) for i in range(config.n_layer)] self.dropout = tf.keras.layers.Dropout(config.dropout) def get_input_embeddings(self): return self.word_embedding def set_input_embeddings(self, value): self.word_embedding.weight = value self.word_embedding.vocab_size = value.shape[0] def build(self, input_shape): initializer = get_initializer(self.initializer_range) self.mask_emb = self.add_weight( shape=(1, 1, self.d_model), initializer=initializer, trainable=True, name="mask_emb" ) def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError def _prune_heads(self, heads_to_prune): raise NotImplementedError def create_mask(self, qlen, mlen, dtype=tf.float32): """ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked. Args: qlen: TODO Lysandre didn't fill mlen: TODO Lysandre didn't fill :: same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen > ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1] qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1] [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1] v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0] """ attn_mask = tf.ones([qlen, qlen], dtype=dtype) mask_u = tf.matrix_band_part(attn_mask, 0, -1) mask_dia = tf.matrix_band_part(attn_mask, 0, 0) attn_mask_pad = tf.zeros([qlen, mlen], dtype=dtype) ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1) if self.same_length: mask_l = tf.matrix_band_part(attn_mask, -1, 0) ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1) return ret def cache_mem(self, curr_out, prev_mem): """cache hidden states into memory.""" if self.reuse_len is not None and self.reuse_len > 0: curr_out = curr_out[: self.reuse_len] if prev_mem is None: new_mem = curr_out[-self.mem_len :] else: new_mem = tf.concat([prev_mem, curr_out], 0)[-self.mem_len :] return tf.stop_gradient(new_mem) @staticmethod def positional_embedding(pos_seq, inv_freq, bsz=None): sinusoid_inp = tf.einsum("i,d->id", pos_seq, inv_freq) pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], axis=-1) pos_emb = pos_emb[:, None, :] if bsz is not None: pos_emb = tf.tile(pos_emb, [1, bsz, 1]) return pos_emb def relative_positional_encoding(self, qlen, klen, bsz=None, dtype=None): """create relative positional encoding.""" freq_seq = tf.range(0, self.d_model, 2.0) if dtype is not None and dtype != tf.float32: freq_seq = tf.cast(freq_seq, dtype=dtype) inv_freq = 1 / (10000 ** (freq_seq / self.d_model)) if self.attn_type == "bi": # beg, end = klen - 1, -qlen beg, end = klen, -qlen elif self.attn_type == "uni": # beg, end = klen - 1, -1 beg, end = klen, -1 else: raise ValueError("Unknown `attn_type` {}.".format(self.attn_type)) if self.bi_data: fwd_pos_seq = tf.range(beg, end, -1.0) bwd_pos_seq = tf.range(-beg, -end, 1.0) if dtype is not None and dtype != tf.float32: fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype) bwd_pos_seq = tf.cast(bwd_pos_seq, dtype=dtype) if self.clamp_len > 0: fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, self.clamp_len) bwd_pos_seq = tf.clip_by_value(bwd_pos_seq, -self.clamp_len, self.clamp_len) if bsz is not None: assert bsz % 2 == 0, f"With bi_data, the batch size {bsz} should be divisible by 2" fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2) else: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq) pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1) else: fwd_pos_seq = tf.range(beg, end, -1.0) if dtype is not None and dtype != tf.float32: fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype) if self.clamp_len > 0: fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, self.clamp_len) pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz) return pos_emb def call( self, inputs, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_cache=True, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask mems = inputs[2] if len(inputs) > 2 else mems perm_mask = inputs[3] if len(inputs) > 3 else perm_mask target_mapping = inputs[4] if len(inputs) > 4 else target_mapping token_type_ids = inputs[5] if len(inputs) > 5 else token_type_ids input_mask = inputs[6] if len(inputs) > 6 else input_mask head_mask = inputs[7] if len(inputs) > 7 else head_mask inputs_embeds = inputs[8] if len(inputs) > 8 else inputs_embeds use_cache = inputs[9] if len(inputs) > 9 else use_cache output_attentions = inputs[10] if len(inputs) > 10 else output_attentions output_hidden_states = inputs[11] if len(inputs) > 11 else output_hidden_states return_dict = inputs[12] if len(inputs) > 12 else return_dict assert len(inputs) <= 13, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) mems = inputs.get("mems", mems) perm_mask = inputs.get("perm_mask", perm_mask) target_mapping = inputs.get("target_mapping", target_mapping) token_type_ids = inputs.get("token_type_ids", token_type_ids) input_mask = inputs.get("input_mask", input_mask) head_mask = inputs.get("head_mask", head_mask) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) use_cache = inputs.get("use_cache", use_cache) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) assert len(inputs) <= 13, "Too many inputs." else: input_ids = inputs output_attentions = output_attentions if output_attentions is not None else self.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states return_dict = return_dict if return_dict is not None else self.return_dict # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end # but we want a unified interface in the library with the batch size on the first dimension # so we move here the first dimension (batch) to the end if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_ids = tf.transpose(input_ids, perm=(1, 0)) qlen, bsz = shape_list(input_ids)[:2] elif inputs_embeds is not None: inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2)) qlen, bsz = shape_list(inputs_embeds)[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") token_type_ids = tf.transpose(token_type_ids, perm=(1, 0)) if token_type_ids is not None else None input_mask = tf.transpose(input_mask, perm=(1, 0)) if input_mask is not None else None attention_mask = tf.transpose(attention_mask, perm=(1, 0)) if attention_mask is not None else None perm_mask = tf.transpose(perm_mask, perm=(1, 2, 0)) if perm_mask is not None else None target_mapping = tf.transpose(target_mapping, perm=(1, 2, 0)) if target_mapping is not None else None mlen = shape_list(mems[0])[0] if mems is not None and mems[0] is not None else 0 klen = mlen + qlen dtype_float = tf.bfloat16 if self.use_bfloat16 else tf.float32 # Attention mask # causal attention mask if self.attn_type == "uni": attn_mask = self.create_mask(qlen, mlen) attn_mask = attn_mask[:, :, None, None] elif self.attn_type == "bi": attn_mask = None else: raise ValueError("Unsupported attention type: {}".format(self.attn_type)) # data mask: input mask & perm mask assert input_mask is None or attention_mask is None, ( "You can only use one of input_mask (uses 1 for padding) " "or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one." ) if input_mask is None and attention_mask is not None: input_mask = 1.0 - tf.cast(attention_mask, dtype=dtype_float) if input_mask is not None and perm_mask is not None: data_mask = input_mask[None] + perm_mask elif input_mask is not None and perm_mask is None: data_mask = input_mask[None] elif input_mask is None and perm_mask is not None: data_mask = perm_mask else: data_mask = None if data_mask is not None: # all mems can be attended to if mlen > 0: mems_mask = tf.zeros([shape_list(data_mask)[0], mlen, bsz], dtype=dtype_float) data_mask = tf.concat([mems_mask, data_mask], axis=1) if attn_mask is None: attn_mask = data_mask[:, :, :, None] else: attn_mask += data_mask[:, :, :, None] if attn_mask is not None: attn_mask = tf.cast(attn_mask > 0, dtype=dtype_float) if attn_mask is not None: non_tgt_mask = -tf.eye(qlen, dtype=dtype_float) if mlen > 0: non_tgt_mask = tf.concat([tf.zeros([qlen, mlen], dtype=dtype_float), non_tgt_mask], axis=-1) non_tgt_mask = tf.cast((attn_mask + non_tgt_mask[:, :, None, None]) > 0, dtype=dtype_float) else: non_tgt_mask = None # Word embeddings and prepare h & g hidden states if inputs_embeds is not None: word_emb_k = inputs_embeds else: word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k, training=training) if target_mapping is not None: word_emb_q = tf.tile(self.mask_emb, [shape_list(target_mapping)[0], bsz, 1]) # else: # We removed the inp_q input which was same as target mapping # inp_q_ext = inp_q[:, :, None] # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k output_g = self.dropout(word_emb_q, training=training) else: output_g = None # Segment embedding if token_type_ids is not None: # Convert `token_type_ids` to one-hot `seg_mat` if mlen > 0: mem_pad = tf.zeros([mlen, bsz], dtype=tf.int32) cat_ids = tf.concat([mem_pad, token_type_ids], 0) else: cat_ids = token_type_ids # `1` indicates not in the same segment [qlen x klen x bsz] seg_mat = tf.cast(tf.logical_not(tf.equal(token_type_ids[:, None], cat_ids[None, :])), tf.int32) seg_mat = tf.one_hot(seg_mat, 2, dtype=dtype_float) else: seg_mat = None # Positional encoding pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz, dtype=dtype_float) pos_emb = self.dropout(pos_emb, training=training) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer) # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.n_layer new_mems = () if mems is None: mems = [None] * len(self.layer) attentions = [] if output_attentions else None hidden_states = [] if output_hidden_states else None for i, layer_module in enumerate(self.layer): # cache new mems if self.mem_len is not None and self.mem_len > 0 and use_cache: new_mems = new_mems + (self.cache_mem(output_h, mems[i]),) if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) outputs = layer_module( output_h, output_g, non_tgt_mask, attn_mask, pos_emb, seg_mat, mems[i], target_mapping, head_mask[i], output_attentions, training=training, ) output_h, output_g = outputs[:2] if output_attentions: attentions.append(outputs[2]) # Add last hidden state if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) output = self.dropout(output_g if output_g is not None else output_h, training=training) # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method) output = tf.transpose(output, perm=(1, 0, 2)) if not (self.mem_len is not None and self.mem_len > 0 and use_cache): new_mems = None if output_hidden_states: if output_g is not None: hidden_states = tuple(tf.transpose(h, perm=(1, 0, 2)) for hs in hidden_states for h in hs) else: hidden_states = tuple(tf.transpose(hs, perm=(1, 0, 2)) for hs in hidden_states) if output_attentions: attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions) if not return_dict: return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None) return TFXLNetModelOutput( last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions ) class TFXLNetPreTrainedModel(TFPreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XLNetConfig base_model_prefix = "transformer" @dataclass class TFXLNetModelOutput(ModelOutput): """ Output type of :class:`~transformers.TFXLNetModel`. Args: last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict`` corresponds to ``sequence_length``. mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor = None mems: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFXLNetLMHeadModelOutput(ModelOutput): """ Output type of :class:`~transformers.TFXLNetLMHeadModel`. Args: loss (:obj:`tf.Tensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided) Language modeling loss (for next-token prediction). logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict`` corresponds to ``sequence_length``. mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None logits: tf.Tensor = None mems: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFXLNetForSequenceClassificationOutput(ModelOutput): """ Output type of :class:`~transformers.TFXLNetForSequenceClassification`. Args: loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None logits: tf.Tensor = None mems: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFXLNetForTokenClassificationOutput(ModelOutput): """ Output type of :class:`~transformers.TFXLNetForTokenClassificationOutput`. Args: loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) : Classification loss. logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None logits: tf.Tensor = None mems: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFXLNetForMultipleChoiceOutput(ModelOutput): """ Output type of :class:`~transformers.TFXLNetForMultipleChoice`. Args: loss (:obj:`tf.Tensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided): Classification loss. logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`): `num_choices` is the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None logits: tf.Tensor = None mems: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFXLNetForQuestionAnsweringSimpleOutput(ModelOutput): """ Output type of :class:`~transformers.TFXLNetForQuestionAnsweringSimple`. Args: loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length,)`): Span-start scores (before SoftMax). end_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length,)`): Span-end scores (before SoftMax). mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None start_logits: tf.Tensor = None end_logits: tf.Tensor = None mems: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None XLNET_START_DOCSTRING = r""" This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. note:: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all the tensors in the first argument of the model call function: :obj:`model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: :obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Parameters: config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ XLNET_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states (see :obj:`mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. :obj::obj:`use_cache` has to be set to :obj:`True` to make use of :obj:`mems`. perm_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`): Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``: - if ``perm_mask[k, i, j] = 0``, i attend to j in batch k; - if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`): Mask to indicate the output tokens to use. If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token. token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`__ input_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Negative of :obj:`attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in ``[0, 1]``: - 1 for tokens that are **masked**, - 0 for tokens that are **not maked**. You can only uses one of :obj:`input_mask` and :obj:`attention_mask`. head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare XLNet Model transformer outputing raw hidden-states without any specific head on top.", XLNET_START_DOCSTRING, ) class TFXLNetModel(TFXLNetPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLNetMainLayer(config, name="transformer") @add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased", output_type=TFXLNetModelOutput, config_class=_CONFIG_FOR_DOC, ) def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) return outputs @add_start_docstrings( """XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XLNET_START_DOCSTRING, ) class TFXLNetLMHeadModel(TFXLNetPreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLNetMainLayer(config, name="transformer") self.lm_loss = TFXLNetLMHead(config, self.transformer.word_embedding, name="lm_loss") def get_output_embeddings(self): return self.lm_loss.input_embeddings def prepare_inputs_for_generation(self, inputs, past, **kwargs): # Add dummy token at the end (no attention on this one) # At every pass, the attention values for the new token and the two last generated tokens # are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have # offset = 1; offset = 2 seems to have slightly better computation. offset = 2 effective_batch_size = inputs.shape[0] dummy_token = tf.zeros((effective_batch_size, 1), dtype=tf.int32) if past: inputs = tf.concat([inputs[:, -offset:], dummy_token], axis=1) else: inputs = tf.concat([inputs, dummy_token], axis=1) # Build permutation mask so that previous tokens don't see last token sequence_length = inputs.shape[1] perm_mask = tf.zeros((effective_batch_size, sequence_length, sequence_length - 1), dtype=tf.float32) perm_mask_seq_end = tf.ones((effective_batch_size, sequence_length, 1), dtype=tf.float32) perm_mask = tf.concat([perm_mask, perm_mask_seq_end], axis=-1) # We'll only predict the last token target_mapping = tf.zeros((effective_batch_size, 1, sequence_length - 1), dtype=tf.float32) target_mapping_seq_end = tf.ones((effective_batch_size, 1, 1), dtype=tf.float32) target_mapping = tf.concat([target_mapping, target_mapping_seq_end], axis=-1) inputs = { "inputs": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "use_cache": kwargs["use_cache"], } # if past is defined in model kwargs then use it for faster decoding if past: inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past) return inputs @add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFXLNetLMHeadModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, inputs, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_cache=True, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the cross entropy classification loss. Indices should be in ``[0, ..., config.vocab_size - 1]``. Return: Examples:: >>> import tensorflow as tf >>> import numpy as np >>> from transformers import XLNetTokenizer, TFXLNetLMHeadModel >>> tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') >>> model = TFXLNetLMHeadModel.from_pretrained('xlnet-large-cased') >>> # We show how to setup inputs to predict a next token using a bi-directional context. >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=True))[None, :] # We will predict the masked token >>> perm_mask = np.zeros((1, input_ids.shape[1], input_ids.shape[1])) >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token >>> target_mapping = np.zeros((1, 1, input_ids.shape[1])) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model(input_ids, perm_mask=tf.constant(perm_mask, dtype=tf.float32), target_mapping=tf.constant(target_mapping, dtype=tf.float32)) >>> next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] """ return_dict = return_dict if return_dict is not None else self.transformer.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[13] if len(inputs) > 13 else labels if len(inputs) > 13: inputs = inputs[:13] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) transformer_outputs = self.transformer( inputs, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_cache=True, output_attentions=None, output_hidden_states=None, return_dict=return_dict, training=training, ) hidden_state = transformer_outputs[0] logits = self.lm_loss(hidden_state, training=training) loss = None if labels is not None: # shift labels to the left and cut last logit token logits = logits[:, :-1] labels = labels[:, 1:] loss = self.compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetLMHeadModelOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLNET_START_DOCSTRING, ) class TFXLNetForSequenceClassification(TFXLNetPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXLNetMainLayer(config, name="transformer") self.sequence_summary = TFSequenceSummary( config, initializer_range=config.initializer_range, name="sequence_summary" ) self.logits_proj = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj" ) @add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased", output_type=TFXLNetForSequenceClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_cache=True, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.transformer.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[13] if len(inputs) > 13 else labels if len(inputs) > 13: inputs = inputs[:13] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) transformer_outputs = self.transformer( inputs, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) loss = None if labels is None else self.compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetForSequenceClassificationOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """XLNET Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, XLNET_START_DOCSTRING, ) class TFXLNetForMultipleChoice(TFXLNetPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLNetMainLayer(config, name="transformer") self.sequence_summary = TFSequenceSummary( config, initializer_range=config.initializer_range, name="sequence_summary" ) self.logits_proj = tf.keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj" ) @property def dummy_inputs(self): """Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)} @add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased", output_type=TFXLNetForMultipleChoiceOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, token_type_ids=None, input_mask=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, head_mask=None, inputs_embeds=None, use_cache=True, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask mems = inputs[2] if len(inputs) > 2 else mems perm_mask = inputs[3] if len(inputs) > 3 else perm_mask target_mapping = inputs[4] if len(inputs) > 4 else target_mapping token_type_ids = inputs[5] if len(inputs) > 5 else token_type_ids input_mask = inputs[6] if len(inputs) > 6 else input_mask head_mask = inputs[7] if len(inputs) > 7 else head_mask inputs_embeds = inputs[8] if len(inputs) > 8 else inputs_embeds use_cache = inputs[9] if len(inputs) > 9 else use_cache output_attentions = inputs[10] if len(inputs) > 10 else output_attentions output_hidden_states = inputs[11] if len(inputs) > 11 else output_hidden_states return_dict = inputs[12] if len(inputs) > 12 else return_dict labels = inputs[13] if len(inputs) > 13 else labels assert len(inputs) <= 14, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) mems = inputs.get("mems", mems) perm_mask = inputs.get("perm_mask", perm_mask) target_mapping = inputs.get("target_mapping", target_mapping) token_type_ids = inputs.get("token_type_ids", token_type_ids) input_mask = inputs.get("input_mask", input_mask) head_mask = inputs.get("head_mask", head_mask) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) use_cache = inputs.get("use_cache", use_cache) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) labels = inputs.get("labels", labels) assert len(inputs) <= 14, "Too many inputs." else: input_ids = inputs return_dict = return_dict if return_dict is not None else self.transformer.return_dict if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_input_mask = tf.reshape(input_mask, (-1, seq_length)) if input_mask is not None else None flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) transformer_outputs = self.transformer( flat_input_ids, flat_attention_mask, mems, perm_mask, target_mapping, flat_token_type_ids, flat_input_mask, head_mask, flat_inputs_embeds, use_cache, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) output = transformer_outputs[0] logits = self.sequence_summary(output) logits = self.logits_proj(logits) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetForMultipleChoiceOutput( loss=loss, logits=reshaped_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XLNET_START_DOCSTRING, ) class TFXLNetForTokenClassification(TFXLNetPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXLNetMainLayer(config, name="transformer") self.classifier = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased", output_type=TFXLNetForTokenClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_cache=True, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.transformer.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[13] if len(inputs) > 13 else labels if len(inputs) > 13: inputs = inputs[:13] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) transformer_outputs = self.transformer( inputs, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) output = transformer_outputs[0] logits = self.classifier(output) loss = None if labels is None else self.compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetForTokenClassificationOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, ) class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLNetMainLayer(config, name="transformer") self.qa_outputs = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) @add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased", output_type=TFXLNetForQuestionAnsweringSimpleOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_cache=True, output_attentions=None, output_hidden_states=None, return_dict=None, start_positions=None, end_positions=None, training=False, ): r""" start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.transformer.return_dict if isinstance(inputs, (tuple, list)): start_positions = inputs[13] if len(inputs) > 13 else start_positions end_positions = inputs[14] if len(inputs) > 14 else end_positions if len(inputs) > 13: inputs = inputs[:13] elif isinstance(inputs, (dict, BatchEncoding)): start_positions = inputs.pop("start_positions", start_positions) end_positions = inputs.pop("end_positions", start_positions) transformer_outputs = self.transformer( inputs, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = transformer_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetForQuestionAnsweringSimpleOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) # @add_start_docstrings("""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of # the hidden-states output to compute `span start logits` and `span end logits`). """, # XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) # class TFXLNetForQuestionAnswering(TFXLNetPreTrainedModel): # r""" # Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: # **start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) # ``tf.Tensor`` of shape ``(batch_size, config.start_n_top)`` # Log probabilities for the top config.start_n_top start token possibilities (beam-search). # **start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) # ``tf.Tensor`` of shape ``(batch_size, config.start_n_top)`` # Indices for the top config.start_n_top start token possibilities (beam-search). # **end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) # ``tf.Tensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)`` # Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). # **end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) # ``tf.Tensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)`` # Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). # **cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) # ``tf.Tensor`` of shape ``(batch_size,)`` # Log probabilities for the ``is_impossible`` label of the answers. # **mems**: # list of ``tf.Tensor`` (one for each layer): # that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model # if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context. # See details in the docstring of the `mems` input above. # **hidden_states**: (`optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``) # list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) # of shape ``(batch_size, sequence_length, hidden_size)``: # Hidden-states of the model at the output of each layer plus the initial embedding outputs. # **attentions**: (`optional`, returned when ``output_attentions=True``) # list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: # Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. # Examples:: # # For example purposes. Not runnable. # tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') # model = XLMForQuestionAnswering.from_pretrained('xlnet-large-cased') # input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1 # start_positions = tf.constant([1]) # end_positions = tf.constant([3]) # outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) # loss, start_scores, end_scores = outputs[:2] # """ # def __init__(self, config, *inputs, **kwargs): # super().__init__(config, *inputs, **kwargs) # self.start_n_top = config.start_n_top # self.end_n_top = config.end_n_top # self.transformer = TFXLNetMainLayer(config, name='transformer') # self.start_logits = TFPoolerStartLogits(config, name='start_logits') # self.end_logits = TFPoolerEndLogits(config, name='end_logits') # self.answer_class = TFPoolerAnswerClass(config, name='answer_class') # def call(self, inputs, training=False): # transformer_outputs = self.transformer(inputs, training=training) # hidden_states = transformer_outputs[0] # start_logits = self.start_logits(hidden_states, p_mask=p_mask) # outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it # if start_positions is not None and end_positions is not None: # # If we are on multi-GPU, let's remove the dimension added by batch splitting # for x in (start_positions, end_positions, cls_index, is_impossible): # if x is not None and x.dim() > 1: # x.squeeze_(-1) # # during training, compute the end logits based on the ground truth of the start position # end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) # loss_fct = CrossEntropyLoss() # start_loss = loss_fct(start_logits, start_positions) # end_loss = loss_fct(end_logits, end_positions) # total_loss = (start_loss + end_loss) / 2 # if cls_index is not None and is_impossible is not None: # # Predict answerability from the representation of CLS and START # cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) # loss_fct_cls = nn.BCEWithLogitsLoss() # cls_loss = loss_fct_cls(cls_logits, is_impossible) # # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss # total_loss += cls_loss * 0.5 # outputs = (total_loss,) + outputs # else: # # during inference, compute the end logits based on beam search # bsz, slen, hsz = hidden_states.size() # start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen) # start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top) # start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) # start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) # start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) # hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz) # p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None # end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) # end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) # end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top) # end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) # end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) # start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) # get the representation of START as weighted sum of hidden states # cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) # Shape (batch size,): one single `cls_logits` for each sample # outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs # # return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits # # or (if labels are provided) (total_loss,) # return outputs
82,670
43.978781
171
py
SLT-FAI
SLT-FAI-main/transformers/modeling_tf_lxmert.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the # Lxmert Authors. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 LXMERT model. """ from dataclasses import dataclass from typing import Dict, Optional, Tuple import tensorflow as tf from .activations_tf import get_tf_activation from .configuration_lxmert import LxmertConfig from .file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings, ) from .modeling_tf_utils import TFPreTrainedModel, get_initializer, keras_serializable, shape_list from .tokenization_utils_base import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LxmertConfig" _TOKENIZER_FOR_DOC = "LxmertTokenizer" TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "unc-nlp/lxmert-base-uncased", ] @dataclass class TFLxmertModelOutput(ModelOutput): """ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilites for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship" encoder") Args: language_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the language encoder. vision_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the visual encoder. pooled_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear language_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. vision_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. language_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ language_output: Optional[tf.Tensor] = None vision_output: Optional[tf.Tensor] = None pooled_output: Optional[tf.Tensor] = None language_hidden_states: Optional[Tuple[tf.Tensor]] = None vision_hidden_states: Optional[Tuple[tf.Tensor]] = None language_attentions: Optional[Tuple[tf.Tensor]] = None vision_attentions: Optional[Tuple[tf.Tensor]] = None cross_encoder_attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFLxmertForPreTrainingOutput(ModelOutput): """ Output type of :class:`~transformers.LxmertForPreTrainingModel`. Args: loss (`optional`, returned when ``labels`` is provided, ``tf.Tensor`` of shape :obj:`(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score: (:obj:`tf.Tensor` of shape :obj:`(batch_size, 2)`): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score: (:obj:`tf.Tensor` of shape :obj:`(batch_size, n_qa_answers)`): Prediction scores of question answering objective (classification). language_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. vision_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. language_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: [tf.Tensor] = None prediction_logits: Optional[tf.Tensor] = None cross_relationship_score: Optional[tf.Tensor] = None question_answering_score: Optional[tf.Tensor] = None language_hidden_states: Optional[Tuple[tf.Tensor]] = None vision_hidden_states: Optional[Tuple[tf.Tensor]] = None language_attentions: Optional[Tuple[tf.Tensor]] = None vision_attentions: Optional[Tuple[tf.Tensor]] = None cross_encoder_attentions: Optional[Tuple[tf.Tensor]] = None class TFLxmertVisualFeatureEncoder(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) # Object feature encoding self.visn_fc = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="visn_fc", ) self.visn_layer_norm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="visn_layer_norm" ) # Box position encoding self.box_fc = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="box_fc", ) self.box_layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="box_layer_norm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, visn_input, training=False): feats, boxes = visn_input x = self.visn_fc(feats) x = self.visn_layer_norm(x) y = self.box_fc(boxes) y = self.box_layer_norm(y) output = (x + y) / 2 output = self.dropout(output, training=training) return output class TFLxmertEmbeddings(tf.keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.hidden_size = config.hidden_size self.initializer_range = config.initializer_range self.position_embeddings = tf.keras.layers.Embedding( config.max_position_embeddings, config.hidden_size, embeddings_initializer=get_initializer(self.initializer_range), name="position_embeddings", ) self.token_type_embeddings = tf.keras.layers.Embedding( config.type_vocab_size, config.hidden_size, embeddings_initializer=get_initializer(self.initializer_range), name="token_type_embeddings", ) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def build(self, input_shape): """Build shared word embedding layer """ with tf.name_scope("word_embeddings"): # Create and initialize weights. The random normal initializer was chosen # arbitrarily, and works well. self.word_embeddings = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) super().build(input_shape) def call(self, inputs, mode="embedding", training=False): """Get token embeddings of inputs. Args: inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids) mode: string, a valid value is one of "embedding" and "linear". Returns: outputs: (1) If mode == "embedding", output embedding tensor, float32 with shape [batch_size, length, embedding_size]; (2) mode == "linear", output linear tensor, float32 with shape [batch_size, length, vocab_size]. Raises: ValueError: if mode is not valid. Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ if mode == "embedding": return self._embedding(inputs, training=training) elif mode == "linear": return self._linear(inputs) else: raise ValueError("mode {} is not valid.".format(mode)) def _embedding(self, inputs, training=False): """Applies embedding based on inputs tensor.""" input_ids, token_type_ids, inputs_embeds = inputs if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] seq_length = input_shape[1] position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :] if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) if inputs_embeds is None: inputs_embeds = tf.gather(self.word_embeddings, input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings, training=training) return embeddings def _linear(self, inputs): """Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [batch_size, length, hidden_size] Returns: float32 tensor with shape [batch_size, length, vocab_size]. """ batch_size = shape_list(inputs)[0] length = shape_list(inputs)[1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.word_embeddings, transpose_b=True) return tf.reshape(logits, [batch_size, length, self.vocab_size]) class TFLxmertAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query", ) self.key = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key", ) self.value = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value", ) self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x, batch_size): x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, hidden_states, context, attention_mask, output_attentions, training=False): batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(context) mixed_value_layer = self.value(context) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul( query_layer, key_layer, transpose_b=True ) # (batch size, num_heads, seq_len_q, seq_len_k) dk = tf.cast(shape_list(key_layer)[-1], tf.float32) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = tf.nn.softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape( context_layer, (batch_size, -1, self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class TFLxmertIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class TFLxmertOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TFLxmertAttentionOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TFLxmertSelfAttentionLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.self = TFLxmertAttention(config, name="self") self.attention_output = TFLxmertAttentionOutput(config, name="output") def call(self, input_tensor, attention_mask, output_attentions, training=False): # Self attention attends to itself, thus keys and querys are the same (input_tensor). self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions) if output_attentions: attention_probs = self_output[1] attention_output = self.attention_output(self_output[0], input_tensor) return (attention_output, attention_probs) if output_attentions else (attention_output,) class TFLxmertCrossAttentionLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.att = TFLxmertAttention(config, name="att") self.attention_output = TFLxmertAttentionOutput(config, name="output") def call( self, input_tensor, ctx_tensor, ctx_att_mask, output_attentions=False, training=False, ): output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training) if output_attentions: attention_probs = output[1] attention_output = self.attention_output(output[0], input_tensor, training=training) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs class TFLxmertLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.attention = TFLxmertSelfAttentionLayer(config, name="attention") self.intermediate = TFLxmertIntermediate(config, name="intermediate") self.transformer_output = TFLxmertOutput(config, name="output") def call(self, hidden_states, attention_mask, output_attentions, training=False): attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.transformer_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs class TFLxmertXLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.visual_attention = TFLxmertCrossAttentionLayer(config, name="visual_attention") # Self-attention Layers self.lang_self_att = TFLxmertSelfAttentionLayer(config, name="lang_self_att") self.visn_self_att = TFLxmertSelfAttentionLayer(config, name="visn_self_att") # Intermediate and Output Layers (FFNs) self.lang_inter = TFLxmertIntermediate(config, name="lang_inter") self.lang_output = TFLxmertOutput(config, name="lang_output") self.visn_inter = TFLxmertIntermediate(config, name="visn_inter") self.visn_output = TFLxmertOutput(config, name="visn_output") def cross_att( self, lang_input, lang_attention_mask, visn_input, visn_attention_mask, output_attentions, training=False, ): # Cross Attention # Keras saving and loading model *does not work* with the same inputs for two layers. lang_attention_lang_input = tf.identity(lang_input) visn_attention_lang_input = tf.identity(lang_input) lang_attention_visn_input = tf.identity(visn_input) visn_attention_visn_input = tf.identity(visn_input) lang_att_output = self.visual_attention( lang_attention_lang_input, lang_attention_visn_input, visn_attention_mask, output_attentions=output_attentions, training=training, ) visn_att_output = self.visual_attention( visn_attention_visn_input, visn_attention_lang_input, lang_attention_mask, output_attentions=output_attentions, training=training, ) return lang_att_output, visn_att_output def self_att( self, lang_input, lang_attention_mask, visn_input, visn_attention_mask, training=False, ): # Self Attention output_attentions = False lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training) visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training) return lang_att_output[0], visn_att_output[0] def output_fc(self, lang_input, visn_input, training=False): # FC layers lang_inter_output = self.lang_inter(lang_input) visn_inter_output = self.visn_inter(visn_input) # Layer output lang_output = self.lang_output(lang_inter_output, lang_input, training) visn_output = self.visn_output(visn_inter_output, visn_input, training) return lang_output, visn_output def call( self, lang_feats, lang_attention_mask, visn_feats, visn_attention_mask, output_attentions, training=False, ): lang_att_output = lang_feats visn_att_output = visn_feats lang_att_output, visn_att_output = self.cross_att( lang_att_output, lang_attention_mask, visn_att_output, visn_attention_mask, output_attentions, training=training, ) attention_probs = lang_att_output[1:] lang_att_output, visn_att_output = self.self_att( lang_att_output[0], lang_attention_mask, visn_att_output[0], visn_attention_mask, training=training, ) lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training) return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output) class TFLxmertEncoder(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.visn_fc = TFLxmertVisualFeatureEncoder(config, name="visn_fc") # Number of layers self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers # Layers # Using self.layer instead of self.l_layer to support loading BERT weights. self.layer = [TFLxmertLayer(config, name="layer_._{}".format(i)) for i in range(self.num_l_layers)] self.x_layers = [TFLxmertXLayer(config, name="x_layers_._{}".format(i)) for i in range(self.num_x_layers)] self.r_layers = [TFLxmertLayer(config, name="r_layers_._{}".format(i)) for i in range(self.num_r_layers)] self.config = config def call( self, lang_feats=None, lang_attention_mask=None, visual_feats=None, visual_pos=None, visual_attention_mask=None, output_attentions=None, training=False, ): vision_hidden_states = () language_hidden_states = () vision_attentions = () if output_attentions or self.config.output_attentions else None language_attentions = () if output_attentions or self.config.output_attentions else None cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None visual_feats = self.visn_fc([visual_feats, visual_pos], training=training) # Run language layers for layer_module in self.layer: l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training) lang_feats = l_outputs[0] language_hidden_states = language_hidden_states + (lang_feats,) if language_attentions is not None: language_attentions = language_attentions + (l_outputs[1],) # Run relational layers for layer_module in self.r_layers: v_outputs = layer_module( visual_feats, visual_attention_mask, output_attentions, training=training, ) visual_feats = v_outputs[0] vision_hidden_states = vision_hidden_states + (visual_feats,) if vision_attentions is not None: vision_attentions = vision_attentions + (v_outputs[1],) # Run cross-modality layers for layer_module in self.x_layers: x_outputs = layer_module( lang_feats, lang_attention_mask, visual_feats, visual_attention_mask, output_attentions, training=training, ) lang_feats, visual_feats = x_outputs[:2] vision_hidden_states = vision_hidden_states + (visual_feats,) language_hidden_states = language_hidden_states + (lang_feats,) if cross_encoder_attentions is not None: cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],) visual_encoder_outputs = ( vision_hidden_states, vision_attentions if output_attentions else None, ) lang_encoder_outputs = ( language_hidden_states, language_attentions if output_attentions else None, ) return ( visual_encoder_outputs, lang_encoder_outputs, cross_encoder_attentions if output_attentions else None, ) @keras_serializable class TFLxmertMainLayer(tf.keras.layers.Layer): config_class = LxmertConfig @property def dummy_inputs(self): """Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]]) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) return { "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": visual_pos, } def __init__(self, config, **kwargs): super().__init__(**kwargs) self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFLxmertEmbeddings(config, name="embeddings") self.encoder = TFLxmertEncoder(config, name="encoder") self.pooler = TFLxmertPooler(config, name="pooler") self.config = config def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value self.embeddings.vocab_size = value.shape[0] def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError def _prune_heads(self, heads_to_prune): raise NotImplementedError def call( self, inputs, visual_feats=None, visual_pos=None, attention_mask=None, visual_attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] visual_feats = inputs[1] if len(inputs) > 1 else visual_feats visual_pos = inputs[2] if len(inputs) > 2 else visual_pos attention_mask = inputs[3] if len(inputs) > 3 else attention_mask visual_attention_mask = inputs[4] if len(inputs) > 4 else visual_attention_mask token_type_ids = inputs[5] if len(inputs) > 5 else token_type_ids inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds output_attentions = inputs[7] if len(inputs) > 7 else output_attentions output_hidden_states = inputs[8] if len(inputs) > 8 else output_hidden_states return_dict = inputs[9] if len(inputs) > 9 else return_dict assert len(inputs) <= 10, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get("input_ids") visual_feats = inputs.get("visual_feats", visual_feats) visual_pos = inputs.get("visual_pos", visual_pos) attention_mask = inputs.get("attention_mask", attention_mask) visual_attention_mask = inputs.get("visual_attention_mask", visual_attention_mask) token_type_ids = inputs.get("token_type_ids", token_type_ids) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) assert len(inputs) <= 10, "Too many inputs." else: input_ids = inputs output_attentions = output_attentions if output_attentions is not None else self.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states return_dict = return_dict if return_dict is not None else self.return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if visual_pos is None or visual_feats is None: raise ValueError("visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, tf.float32) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 if visual_attention_mask is not None: extended_visual_attention_mask = visual_attention_mask[:, tf.newaxis, tf.newaxis, :] extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, tf.float32) extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * -10000.0 else: extended_visual_attention_mask = None # Positional Word Embeddings embedding_output = self.embeddings([input_ids, token_type_ids, inputs_embeds], training=training) # Run Lxmert encoder encoder_outputs = self.encoder( embedding_output, extended_attention_mask, visual_feats, visual_pos, extended_visual_attention_mask, output_attentions=output_attentions, training=training, ) visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2] vision_hidden_states = visual_encoder_outputs[0] language_hidden_states = lang_encoder_outputs[0] all_attentions = () if output_attentions: language_attentions = lang_encoder_outputs[1] vision_attentions = visual_encoder_outputs[1] cross_encoder_attentions = encoder_outputs[2] all_attentions = ( language_attentions, vision_attentions, cross_encoder_attentions, ) hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else () visual_output = vision_hidden_states[-1] lang_output = language_hidden_states[-1] pooled_output = self.pooler(lang_output) if not return_dict: return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions return TFLxmertModelOutput( pooled_output=pooled_output, language_output=lang_output, vision_output=visual_output, language_hidden_states=language_hidden_states if output_hidden_states else None, vision_hidden_states=vision_hidden_states if output_hidden_states else None, language_attentions=language_attentions if output_attentions else None, vision_attentions=vision_attentions if output_attentions else None, cross_encoder_attentions=cross_encoder_attentions if output_attentions else None, ) class TFLxmertPreTrainedModel(TFPreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LxmertConfig base_model_prefix = "lxmert" @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: return getattr(self, self.base_model_prefix).dummy_inputs LXMERT_START_DOCSTRING = r""" The LXMERT model was proposed in `LXMERT: Learning Cross-Modality Encoder Representations from Transformers <https://arxiv.org/abs/1908.07490>`__ by Hao Tan and Mohit Bansal. It's a vision and language transformer model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag predicition. This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. note:: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all the tensors in the first argument of the model call function: :obj:`model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: :obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Parameters: config (:class:`~transformers.LxmertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ LXMERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.LxmertTokenizer`. See :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ visual_feats: (:obj:`tf.Tensor` of shape :obj:՝(batch_size, num_visual_features, visual_feat_dim)՝): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos: (:obj:`tf.Tensor` of shape :obj:՝(batch_size, num_visual_features, visual_feat_dim)՝): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ visual_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): MMask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`__ inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Lxmert Model transformer outputing raw hidden-states without any specific head on top.", LXMERT_START_DOCSTRING, ) class TFLxmertModel(TFLxmertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.lxmert = TFLxmertMainLayer(config, name="lxmert") @add_start_docstrings_to_callable(LXMERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="unc-nlp/lxmert-base-uncased", output_type=TFLxmertModelOutput, config_class=_CONFIG_FOR_DOC, ) def call(self, inputs, *args, **kwargs): outputs = self.lxmert(inputs, *args, **kwargs) return outputs class TFLxmertPooler(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) def call(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) return pooled_output class TFLxmertPredictionHeadTransform(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class TFLxmertLMPredictionHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.transform = TFLxmertPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def call(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.input_embeddings(hidden_states, mode="linear") hidden_states = hidden_states + self.bias return hidden_states class TFLxmertMLMHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class TFLxmertPreTrainingHeads(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") self.seq_relationship = tf.keras.layers.Dense( 2, kernel_initializer=get_initializer(config.initializer_range), name="seq_relationship", ) def call(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class TFLxmertVisualAnswerHead(tf.keras.layers.Layer): def __init__(self, config, num_labels, **kwargs): super().__init__(**kwargs) hid_dim = config.hidden_size self.dense = tf.keras.layers.Dense( hid_dim * 2, kernel_initializer=get_initializer(config.initializer_range), name="logit_fc_._0", ) self.activation = get_tf_activation("gelu") self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2") self.dense_1 = tf.keras.layers.Dense( num_labels, kernel_initializer=get_initializer(config.initializer_range), name="logit_fc_._3", ) def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.dense_1(hidden_states) return hidden_states class TFLxmertVisualObjHead(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.transform = TFLxmertPredictionHeadTransform(config, name="transform") # Decide the use of visual losses visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels} if config.visual_attr_loss: visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels} if config.visual_obj_loss: visual_losses["feat"] = {"shape": (-1, 2048), "num": config.visual_feat_dim} self.visual_losses = visual_losses # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder_dict = { key: tf.keras.layers.Dense( self.visual_losses[key]["num"], kernel_initializer=get_initializer(config.initializer_range), name=f"decoder_dict.{key}", ) for key in self.visual_losses } def call(self, hidden_states): hidden_states = self.transform(hidden_states) output = {} for key in self.visual_losses: output[key] = self.decoder_dict[key](hidden_states) return output @add_start_docstrings("""Lxmert Model with a `language modeling` head on top. """, LXMERT_START_DOCSTRING) class TFLxmertForPreTraining(TFLxmertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.num_qa_labels = config.num_qa_labels self.visual_loss_normalizer = config.visual_loss_normalizer # Use of pre-training tasks self.task_mask_lm = config.task_mask_lm self.task_obj_predict = config.task_obj_predict self.task_matched = config.task_matched self.task_qa = config.task_qa # Lxmert backbone self.lxmert = TFLxmertMainLayer(config, name="lxmert") # Pre-training heads self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name="cls") if self.task_obj_predict: self.obj_predict_head = TFLxmertVisualObjHead(config, name="obj_predict_head") if self.task_qa: self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name="answer_head") # Loss functions self.loss_fcts = { "l2": tf.keras.losses.Huber(delta=1.0, name="huber_loss"), "visn_ce": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), "ce": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), } visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = { "shape": (-1,), "num": config.num_object_labels, "loss": "visn_ce", } if config.visual_attr_loss: visual_losses["attr"] = { "shape": (-1,), "num": config.num_attr_labels, "loss": "visn_ce", } if config.visual_obj_loss: visual_losses["feat"] = { "shape": (-1, config.visual_feat_dim), "num": config.visual_feat_dim, "loss": "l2", } self.visual_losses = visual_losses @property def dummy_inputs(self): """Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]]) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) if self.config.task_obj_predict: obj_labels = {} if self.config.visual_attr_loss and self.config.task_obj_predict: obj_labels["attr"] = ( tf.ones([batch_size, num_visual_features]), tf.ones([batch_size, num_visual_features]), ) if self.config.visual_feat_loss and self.config.task_obj_predict: obj_labels["feat"] = ( tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]), tf.ones([batch_size, num_visual_features]), ) if self.config.visual_obj_loss and self.config.task_obj_predict: obj_labels["obj"] = ( tf.ones([batch_size, num_visual_features]), tf.ones([batch_size, num_visual_features]), ) return { **{ "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": visual_pos, }, **({"obj_labels": obj_labels} if self.config.task_obj_predict else {}), } @add_start_docstrings_to_callable(LXMERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, inputs=None, visual_feats=None, visual_pos=None, attention_mask=None, visual_attention_mask=None, token_type_ids=None, inputs_embeds=None, masked_lm_labels=None, obj_labels=None, matched_label=None, ans=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" masked_lm_labels (``tf.Tensor`` of shape ``(batch_size, sequence_length)``, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` obj_labels: (``Dict[Str: Tuple[tf.Tensor, tf.Tensor]]``, `optional`, defaults to :obj: `None`): each key is named after each one of the visual losses and each element of the tuple is of the shape ``(batch_size, num_features)`` and ``(batch_size, num_features, visual_feature_dim)`` for each the label id and the label score respectively matched_label (``tf.Tensor`` of shape ``(batch_size,)``, `optional`): Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``: - 0 indicates that the sentence does not match the image, - 1 indicates that the sentence does match the image. ans: (``Torch.Tensor`` of shape ``(batch_size)``, `optional`, defaults to :obj: `None`): a one hot representation hof the correct answer `optional` Returns: """ if isinstance(inputs, (tuple, list)): masked_lm_labels = inputs[7] if len(inputs) > 7 else masked_lm_labels obj_labels = inputs[8] if len(inputs) > 8 else obj_labels matched_label = inputs[9] if len(inputs) > 9 else matched_label ans = inputs[10] if len(inputs) > 10 else ans if len(inputs) > 10: inputs = inputs[:10] elif isinstance(inputs, (dict, BatchEncoding)): masked_lm_labels = inputs.pop("masked_lm_labels", masked_lm_labels) obj_labels = inputs.pop("obj_labels", obj_labels) matched_label = inputs.pop("matched_label", matched_label) ans = inputs.pop("ans", ans) lxmert_output = self.lxmert( inputs, visual_feats=visual_feats, visual_pos=visual_pos, attention_mask=attention_mask, visual_attention_mask=visual_attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) lang_output, visual_output, pooled_output = ( lxmert_output[0], lxmert_output[1], lxmert_output[2], ) lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output) if self.task_qa: answer_score = self.answer_head(pooled_output) else: answer_score = pooled_output[0][0] total_loss = ( None if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None) else tf.constant(0.0) ) losses = () if masked_lm_labels is not None and self.task_mask_lm: masked_lm_loss = self.loss_fcts["ce"]( tf.reshape(masked_lm_labels, [-1]), tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]), ) total_loss += masked_lm_loss losses += (masked_lm_loss,) if matched_label is not None and self.task_matched: matched_loss = self.loss_fcts["ce"]( tf.reshape(matched_label, [-1]), tf.reshape(cross_relationship_score, [-1, 2]), ) total_loss += matched_loss losses += (matched_loss,) if obj_labels is not None and self.task_obj_predict: total_visn_loss = 0.0 visn_prediction_scores_dict = self.obj_predict_head(visual_output) for key, key_info in self.visual_losses.items(): label, mask_conf = obj_labels[key] output_dim = key_info["num"] loss_fct_name = key_info["loss"] label_shape = key_info["shape"] weight = self.visual_loss_normalizer visn_loss_fct = self.loss_fcts[loss_fct_name] visn_prediction_scores = visn_prediction_scores_dict[key] visn_loss = visn_loss_fct( tf.reshape(label, label_shape), tf.reshape(visn_prediction_scores, [-1, output_dim]), ) if visn_loss.ndim > 1: # Regression Losses visn_loss = tf.reduce_mean(visn_loss) visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight total_visn_loss += visn_loss losses += (visn_loss,) total_loss += total_visn_loss if ans is not None and self.task_qa: answer_loss = self.loss_fcts["ce"]( tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels]) ) # exclude "*2" here to match the effect of QA losses. # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper) # Now : (loss *1) for 12 epochs # # * 2 # Multiply by 2 because > half of the data will not have label total_loss += answer_loss losses += (answer_loss,) # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach() if not return_dict: output = ( lang_prediction_scores, cross_relationship_score, answer_score, ) + lxmert_output[3:] return ((total_loss,) + output) if total_loss is not None else output return TFLxmertForPreTrainingOutput( loss=total_loss, prediction_logits=lang_prediction_scores, cross_relationship_score=cross_relationship_score, question_answering_score=answer_score, language_hidden_states=lxmert_output.language_hidden_states, vision_hidden_states=lxmert_output.vision_hidden_states, language_attentions=lxmert_output.language_attentions, vision_attentions=lxmert_output.vision_attentions, cross_encoder_attentions=lxmert_output.cross_encoder_attentions, )
61,940
44.048
169
py
SLT-FAI
SLT-FAI-main/transformers/convert_slow_tokenizer.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities to convert slow tokenizers in their fast tokenizers counterparts. All the conversions are grouped here to gather SentencePiece dependencies outside of the fast tokenizers files and allow to make our dependency on SentencePiece optional. """ from typing import Dict, List, Tuple from tokenizers import Tokenizer, decoders, normalizers, pre_tokenizers, processors from tokenizers.models import BPE, Unigram, WordPiece # from transformers.tokenization_openai import OpenAIGPTTokenizer from transformers.utils import sentencepiece_model_pb2 as model from .file_utils import requires_sentencepiece class SentencePieceExtractor: """ Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece """ def __init__(self, model: str): requires_sentencepiece(self) from sentencepiece import SentencePieceProcessor self.sp = SentencePieceProcessor() self.sp.Load(model) def extract(self) -> Tuple[Dict[str, int], List[Tuple]]: sp = self.sp vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())} # Merges merges = [] for piece_l in vocab.keys(): for piece_r in vocab.keys(): merge = f"{piece_l}{piece_r}" piece_id = vocab.get(merge, None) if piece_id: merges += [(piece_l, piece_r, piece_id)] merges = sorted(merges, key=lambda val: val[2]) merges = [(val[0], val[1]) for val in merges] return vocab, merges def check_number_comma(piece: str) -> bool: return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit() def get_proto(filename: str): m = model.ModelProto() m.ParseFromString(open(filename, "rb").read()) return m class Converter: def __init__(self, original_tokenizer): self.original_tokenizer = original_tokenizer def converted(self) -> Tokenizer: raise NotImplementedError() class BertConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) # # Let the tokenizer know about special tokens if they are part of the vocab # if tokenizer.token_to_id(str(self.original_tokenizer.unk_token)) is not None: # tokenizer.add_special_tokens([str(self.original_tokenizer.unk_token)]) # if tokenizer.token_to_id(str(self.original_tokenizer.sep_token)) is not None: # tokenizer.add_special_tokens([str(self.original_tokenizer.sep_token)]) # if tokenizer.token_to_id(str(self.original_tokenizer.cls_token)) is not None: # tokenizer.add_special_tokens([str(self.original_tokenizer.cls_token)]) # if tokenizer.token_to_id(str(self.original_tokenizer.pad_token)) is not None: # tokenizer.add_special_tokens([str(self.original_tokenizer.pad_token)]) # if tokenizer.token_to_id(str(self.original_tokenizer.mask_token)) is not None: # tokenizer.add_special_tokens([str(self.original_tokenizer.mask_token)]) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer class FunnelConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) # # Let the tokenizer know about special tokens if they are part of the vocab # if tokenizer.token_to_id(str(self.original_tokenizer.unk_token)) is not None: # tokenizer.add_special_tokens([str(self.original_tokenizer.unk_token)]) # if tokenizer.token_to_id(str(self.original_tokenizer.sep_token)) is not None: # tokenizer.add_special_tokens([str(self.original_tokenizer.sep_token)]) # if tokenizer.token_to_id(str(self.original_tokenizer.cls_token)) is not None: # tokenizer.add_special_tokens([str(self.original_tokenizer.cls_token)]) # if tokenizer.token_to_id(str(self.original_tokenizer.pad_token)) is not None: # tokenizer.add_special_tokens([str(self.original_tokenizer.pad_token)]) # if tokenizer.token_to_id(str(self.original_tokenizer.mask_token)) is not None: # tokenizer.add_special_tokens([str(self.original_tokenizer.mask_token)]) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:2 $A:0 {sep}:0", # token_type_id is 2 for Funnel transformer pair=f"{cls}:2 $A:0 {sep}:0 $B:1 {sep}:1", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer class OpenAIGPTConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) unk_token = self.original_tokenizer.unk_token tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, unk_token=str(unk_token), end_of_word_suffix="</w>", fuse_unk=False, ) ) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) tokenizer.normalizer = normalizers.BertNormalizer(lowercase=True) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tokenizer.decoder = decoders.BPEDecoder(suffix="</w>") return tokenizer class GPT2Converter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) return tokenizer class HerbertConverter(Converter): def converted(self) -> Tokenizer: tokenizer_info_str = "#version:" token_suffix = "</w>" vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) if tokenizer_info_str in merges[0][0]: merges = merges[1:] tokenizer = Tokenizer( BPE( vocab, merges, dropout=None, unk_token=self.original_tokenizer.unk_token, end_of_word_suffix=token_suffix, ) ) tokenizer.normalizer = normalizers.BertNormalizer(lowercase=False, strip_accents=False) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tokenizer.decoder = decoders.BPEDecoder(suffix=token_suffix) tokenizer.post_processor = processors.BertProcessing( sep=(self.original_tokenizer.sep_token, self.original_tokenizer.sep_token_id), cls=(self.original_tokenizer.cls_token, self.original_tokenizer.cls_token_id), ) return tokenizer class RobertaConverter(Converter): def converted(self) -> Tokenizer: ot = self.original_tokenizer vocab = ot.encoder merges = list(ot.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.RobertaProcessing( sep=(ot.sep_token, ot.sep_token_id), cls=(ot.cls_token, ot.cls_token_id), add_prefix_space=ot.add_prefix_space, trim_offsets=True, # True by default on Roberta (historical) ) return tokenizer class SpmConverter(Converter): def __init__(self, *args): super().__init__(*args) self.proto = get_proto(self.original_tokenizer.vocab_file) def vocab(self, proto): return [(piece.piece, piece.score) for piece in proto.pieces] def unk_id(self, proto): return proto.trainer_spec.unk_id def tokenizer(self, proto): model_type = proto.trainer_spec.model_type vocab = self.vocab(proto) unk_id = self.unk_id(proto) if model_type == 1: tokenizer = Tokenizer(Unigram(vocab, unk_id)) elif model_type == 2: vocab, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract() tokenizer = Tokenizer( BPE( vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, ) ) else: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) return tokenizer def normalizer(self, proto): precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap return normalizers.Precompiled(precompiled_charsmap) def post_processor(self): return None def converted(self) -> Tokenizer: tokenizer = self.tokenizer(self.proto) # Tokenizer assemble tokenizer.normalizer = self.normalizer(self.proto) replacement = "▁" add_prefix_space = True tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [ pre_tokenizers.WhitespaceSplit(), pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space), ] ) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) post_processor = self.post_processor() if post_processor: tokenizer.post_processor = post_processor return tokenizer class AlbertConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): list_normalizers = [normalizers.Replace("``", '"'), normalizers.Replace("''", '"')] if not self.original_tokenizer.keep_accents: list_normalizers.append(normalizers.NFKD()) list_normalizers.append(normalizers.StripAccents()) if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) return normalizers.Sequence(list_normalizers) def post_processor(self): return processors.TemplateProcessing( single="[CLS]:0 $A:0 [SEP]:0", pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), ], ) class CamembertConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>NOTUSED", 0.0), ("<pad>", 0.0), ("</s>NOTUSED", 0.0), ("<unk>", 0.0), ] # We down-grade the original SentencePiece by -100 to avoid using it and use our added token instead vocab += [(piece.piece, piece.score if i != 0 else piece.score - 100) for i, piece in enumerate(proto.pieces)] vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): # See vocab unk position return 3 def post_processor(self): return processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class MBartConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [ ("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ] vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): return 3 def post_processor(self): return processors.TemplateProcessing( single="$A </s> en_XX", pair="$A $B </s> en_XX", special_tokens=[ ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class XLMRobertaConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self): return processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class XLNetConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): list_normalizers = [normalizers.Replace("``", '"'), normalizers.Replace("''", '"')] if not self.original_tokenizer.keep_accents: list_normalizers.append(normalizers.NFKD()) list_normalizers.append(normalizers.StripAccents()) if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) return normalizers.Sequence(list_normalizers) def post_processor(self): return processors.TemplateProcessing( single="$A:0 <sep>:0 <cls>:2", pair="$A:0 <sep>:0 $B:1 <sep>:1 <cls>:2", special_tokens=[ ("<sep>", self.original_tokenizer.convert_tokens_to_ids("<sep>")), ("<cls>", self.original_tokenizer.convert_tokens_to_ids("<cls>")), ], ) class ReformerConverter(SpmConverter): pass class BertGenerationConverter(SpmConverter): pass class PegasusConverter(SpmConverter): def vocab(self, proto): vocab = [ (self.original_tokenizer.pad_token, 0), (self.original_tokenizer.eos_token, 0), ] vocab += [(f"unk_{i}", -100) for i in range(2, 2 + self.original_tokenizer.offset)] vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]] return vocab def unk_id(self, proto): return proto.trainer_spec.unk_id + self.original_tokenizer.offset def post_processor(self): eos = self.original_tokenizer.eos_token return processors.TemplateProcessing( single=["$A", eos], pair=["$A", "$B", eos], special_tokens=[ (eos, self.original_tokenizer.eos_token_id), ], ) class T5Converter(SpmConverter): def vocab(self, proto): num_extra_ids = self.original_tokenizer._extra_ids vocab = [(piece.piece, piece.score) for piece in proto.pieces] vocab += [("<extra_id_{}>".format(i), 0.0) for i in range(num_extra_ids - 1, -1, -1)] return vocab def post_processor(self): return processors.TemplateProcessing( single=["$A", "</s>"], pair=["$A", "</s>", "$B", "</s>"], special_tokens=[ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) SLOW_TO_FAST_CONVERTERS = { "AlbertTokenizer": AlbertConverter, "BartTokenizer": RobertaConverter, "BertTokenizer": BertConverter, "CamembertTokenizer": CamembertConverter, "DistilBertTokenizer": BertConverter, "DPRReaderTokenizer": BertConverter, "DPRQuestionEncoderTokenizer": BertConverter, "DPRContextEncoderTokenizer": BertConverter, "ElectraTokenizer": BertConverter, "FunnelTokenizer": FunnelConverter, "GPT2Tokenizer": GPT2Converter, "HerbertTokenizer": HerbertConverter, "LayoutLMTokenizer": BertConverter, "LongformerTokenizer": RobertaConverter, "LxmertTokenizer": BertConverter, "MBartTokenizer": MBartConverter, "MobileBertTokenizer": BertConverter, "OpenAIGPTTokenizer": OpenAIGPTConverter, "PegasusTokenizer": PegasusConverter, "ReformerTokenizer": ReformerConverter, "RetriBertTokenizer": BertConverter, "RobertaTokenizer": RobertaConverter, "SqueezeBertTokenizer": BertConverter, "T5Tokenizer": T5Converter, "XLMRobertaTokenizer": XLMRobertaConverter, "XLNetTokenizer": XLNetConverter, } def convert_slow_tokenizer(transformer_tokenizer) -> Tokenizer: """Utilities to convert a slow tokenizer instance in a fast tokenizer instance. Args: transformer_tokenizer (:class:`~transformers.tokenization_utils_base.PreTrainedTokenizer`): Instance of a slow tokenizer to convert in the backend tokenizer for :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerFast`. Return: A instance of :class:`~tokenizers.Tokenizer` to be used as the backend tokenizer of a :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerFast` """ tokenizer_class_name = transformer_tokenizer.__class__.__name__ if tokenizer_class_name not in SLOW_TO_FAST_CONVERTERS: raise ValueError( f"An instance of tokenizer class {tokenizer_class_name} cannot be converted in a Fast tokenizer instance. " f"No converter was found. Currently available slow->fast convertors: {list(SLOW_TO_FAST_CONVERTERS.keys())}" ) converter_class = SLOW_TO_FAST_CONVERTERS[tokenizer_class_name] return converter_class(transformer_tokenizer).converted()
22,945
35.538217
120
py
SLT-FAI
SLT-FAI-main/transformers/convert_funnel_original_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Funnel checkpoint.""" import argparse import logging import torch from transformers import FunnelConfig, FunnelForPreTraining, load_tf_weights_in_funnel logging.basicConfig(level=logging.INFO) def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): # Initialise PyTorch model config = FunnelConfig.from_json_file(config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = FunnelForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_funnel(model, config, tf_checkpoint_path) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \n" "This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
2,128
33.33871
117
py
SLT-FAI
SLT-FAI-main/transformers/convert_marian_to_pytorch.py
import argparse import json import os import socket import time import warnings from pathlib import Path from typing import Dict, List, Union from zipfile import ZipFile import numpy as np import torch from tqdm import tqdm from transformers import MarianConfig, MarianMTModel, MarianTokenizer from transformers.hf_api import HfApi def remove_suffix(text: str, suffix: str): if text.endswith(suffix): return text[: -len(suffix)] return text # or whatever def remove_prefix(text: str, prefix: str): if text.startswith(prefix): return text[len(prefix) :] return text # or whatever def convert_encoder_layer(opus_dict, layer_prefix: str, converter: dict): sd = {} for k in opus_dict: if not k.startswith(layer_prefix): continue stripped = remove_prefix(k, layer_prefix) v = opus_dict[k].T # besides embeddings, everything must be transposed. sd[converter[stripped]] = torch.tensor(v).squeeze() return sd def load_layers_(layer_lst: torch.nn.ModuleList, opus_state: dict, converter, is_decoder=False): for i, layer in enumerate(layer_lst): layer_tag = f"decoder_l{i + 1}_" if is_decoder else f"encoder_l{i + 1}_" sd = convert_encoder_layer(opus_state, layer_tag, converter) layer.load_state_dict(sd, strict=True) def find_pretrained_model(src_lang: str, tgt_lang: str) -> List[str]: """Find models that can accept src_lang as input and return tgt_lang as output.""" prefix = "Helsinki-NLP/opus-mt-" api = HfApi() model_list = api.model_list() model_ids = [x.modelId for x in model_list if x.modelId.startswith("Helsinki-NLP")] src_and_targ = [ remove_prefix(m, prefix).lower().split("-") for m in model_ids if "+" not in m ] # + cant be loaded. matching = [f"{prefix}{a}-{b}" for (a, b) in src_and_targ if src_lang in a and tgt_lang in b] return matching def add_emb_entries(wemb, final_bias, n_special_tokens=1): vsize, d_model = wemb.shape embs_to_add = np.zeros((n_special_tokens, d_model)) new_embs = np.concatenate([wemb, embs_to_add]) bias_to_add = np.zeros((n_special_tokens, 1)) new_bias = np.concatenate((final_bias, bias_to_add), axis=1) return new_embs, new_bias def _cast_yaml_str(v): bool_dct = {"true": True, "false": False} if not isinstance(v, str): return v elif v in bool_dct: return bool_dct[v] try: return int(v) except (TypeError, ValueError): return v def cast_marian_config(raw_cfg: Dict[str, str]) -> Dict: return {k: _cast_yaml_str(v) for k, v in raw_cfg.items()} CONFIG_KEY = "special:model.yml" def load_config_from_state_dict(opus_dict): import yaml cfg_str = "".join([chr(x) for x in opus_dict[CONFIG_KEY]]) yaml_cfg = yaml.load(cfg_str[:-1], Loader=yaml.BaseLoader) return cast_marian_config(yaml_cfg) def find_model_file(dest_dir): # this one better model_files = list(Path(dest_dir).glob("*.npz")) assert len(model_files) == 1, model_files model_file = model_files[0] return model_file # Group Names Logic: change long opus model names to something shorter, like opus-mt-en-ROMANCE ROM_GROUP = ( "fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO+es_EC+es_ES+es_GT" "+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR+pt_PT+gl+lad+an+mwl+it+it_IT+co" "+nap+scn+vec+sc+ro+la" ) GROUPS = [ ("cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", "ZH"), (ROM_GROUP, "ROMANCE"), ("de+nl+fy+af+da+fo+is+no+nb+nn+sv", "NORTH_EU"), ("da+fo+is+no+nb+nn+sv", "SCANDINAVIA"), ("se+sma+smj+smn+sms", "SAMI"), ("nb_NO+nb+nn_NO+nn+nog+no_nb+no", "NORWAY"), ("ga+cy+br+gd+kw+gv", "CELTIC"), # https://en.wikipedia.org/wiki/Insular_Celtic_languages ] GROUP_TO_OPUS_NAME = { "opus-mt-ZH-de": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-de", "opus-mt-ZH-fi": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi", "opus-mt-ZH-sv": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-sv", "opus-mt-SCANDINAVIA-SCANDINAVIA": "da+fo+is+no+nb+nn+sv-da+fo+is+no+nb+nn+sv", "opus-mt-NORTH_EU-NORTH_EU": "de+nl+fy+af+da+fo+is+no+nb+nn+sv-de+nl+fy+af+da+fo+is+no+nb+nn+sv", "opus-mt-de-ZH": "de-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", "opus-mt-en_el_es_fi-en_el_es_fi": "en+el+es+fi-en+el+es+fi", "opus-mt-en-ROMANCE": "en-fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO" "+es_EC+es_ES+es_GT+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR" "+pt_PT+gl+lad+an+mwl+it+it_IT+co+nap+scn+vec+sc+ro+la", "opus-mt-en-CELTIC": "en-ga+cy+br+gd+kw+gv", "opus-mt-es-NORWAY": "es-nb_NO+nb+nn_NO+nn+nog+no_nb+no", "opus-mt-fi_nb_no_nn_ru_sv_en-SAMI": "fi+nb+no+nn+ru+sv+en-se+sma+smj+smn+sms", "opus-mt-fi-ZH": "fi-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", "opus-mt-fi-NORWAY": "fi-nb_NO+nb+nn_NO+nn+nog+no_nb+no", "opus-mt-ROMANCE-en": "fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO" "+es_EC+es_ES+es_GT+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR" "+pt_PT+gl+lad+an+mwl+it+it_IT+co+nap+scn+vec+sc+ro+la-en", "opus-mt-CELTIC-en": "ga+cy+br+gd+kw+gv-en", "opus-mt-sv-ZH": "sv-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", "opus-mt-sv-NORWAY": "sv-nb_NO+nb+nn_NO+nn+nog+no_nb+no", } OPUS_GITHUB_URL = "https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/" ORG_NAME = "Helsinki-NLP/" def convert_opus_name_to_hf_name(x): """For OPUS-MT-Train/ DEPRECATED""" for substr, grp_name in GROUPS: x = x.replace(substr, grp_name) return x.replace("+", "_") def convert_hf_name_to_opus_name(hf_model_name): """Relies on the assumption that there are no language codes like pt_br in models that are not in GROUP_TO_OPUS_NAME.""" hf_model_name = remove_prefix(hf_model_name, ORG_NAME) if hf_model_name in GROUP_TO_OPUS_NAME: opus_w_prefix = GROUP_TO_OPUS_NAME[hf_model_name] else: opus_w_prefix = hf_model_name.replace("_", "+") return remove_prefix(opus_w_prefix, "opus-mt-") def get_system_metadata(repo_root): import git return dict( helsinki_git_sha=git.Repo(path=repo_root, search_parent_directories=True).head.object.hexsha, transformers_git_sha=git.Repo(path=".", search_parent_directories=True).head.object.hexsha, port_machine=socket.gethostname(), port_time=time.strftime("%Y-%m-%d-%H:%M"), ) FRONT_MATTER_TEMPLATE = """--- language: {} tags: - translation license: apache-2.0 --- """ DEFAULT_REPO = "Tatoeba-Challenge" DEFAULT_MODEL_DIR = os.path.join(DEFAULT_REPO, "models") def write_model_card( hf_model_name: str, repo_root=DEFAULT_REPO, save_dir=Path("marian_converted"), dry_run=False, extra_metadata={}, ) -> str: """Copy the most recent model's readme section from opus, and add metadata. upload command: aws s3 sync model_card_dir s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun """ import pandas as pd hf_model_name = remove_prefix(hf_model_name, ORG_NAME) opus_name: str = convert_hf_name_to_opus_name(hf_model_name) assert repo_root in ("OPUS-MT-train", "Tatoeba-Challenge") opus_readme_path = Path(repo_root).joinpath("models", opus_name, "README.md") assert opus_readme_path.exists(), f"Readme file {opus_readme_path} not found" opus_src, opus_tgt = [x.split("+") for x in opus_name.split("-")] readme_url = f"https://github.com/Helsinki-NLP/{repo_root}/tree/master/models/{opus_name}/README.md" s, t = ",".join(opus_src), ",".join(opus_tgt) metadata = { "hf_name": hf_model_name, "source_languages": s, "target_languages": t, "opus_readme_url": readme_url, "original_repo": repo_root, "tags": ["translation"], } metadata.update(extra_metadata) metadata.update(get_system_metadata(repo_root)) # combine with opus markdown extra_markdown = ( f"### {hf_model_name}\n\n* source group: {metadata['src_name']} \n* target group: " f"{metadata['tgt_name']} \n* OPUS readme: [{opus_name}]({readme_url})\n" ) content = opus_readme_path.open().read() content = content.split("\n# ")[-1] # Get the lowest level 1 header in the README -- the most recent model. splat = content.split("*")[2:] print(splat[3]) content = "*".join(splat) content = ( FRONT_MATTER_TEMPLATE.format(metadata["src_alpha2"]) + extra_markdown + "\n* " + content.replace("download", "download original weights") ) items = "\n\n".join([f"- {k}: {v}" for k, v in metadata.items()]) sec3 = "\n### System Info: \n" + items content += sec3 if dry_run: return content, metadata sub_dir = save_dir / f"opus-mt-{hf_model_name}" sub_dir.mkdir(exist_ok=True) dest = sub_dir / "README.md" dest.open("w").write(content) pd.Series(metadata).to_json(sub_dir / "metadata.json") # if dry_run: return content, metadata def make_registry(repo_path="Opus-MT-train/models"): if not (Path(repo_path) / "fr-en" / "README.md").exists(): raise ValueError( f"repo_path:{repo_path} does not exist: " "You must run: git clone [email protected]:Helsinki-NLP/Opus-MT-train.git before calling." ) results = {} for p in Path(repo_path).iterdir(): n_dash = p.name.count("-") if n_dash == 0: continue else: lns = list(open(p / "README.md").readlines()) results[p.name] = _parse_readme(lns) return [(k, v["pre-processing"], v["download"], v["download"][:-4] + ".test.txt") for k, v in results.items()] def convert_all_sentencepiece_models(model_list=None, repo_path=None, dest_dir=Path("marian_converted")): """Requires 300GB""" save_dir = Path("marian_ckpt") dest_dir = Path(dest_dir) dest_dir.mkdir(exist_ok=True) save_paths = [] if model_list is None: model_list: list = make_registry(repo_path=repo_path) for k, prepro, download, test_set_url in tqdm(model_list): if "SentencePiece" not in prepro: # dont convert BPE models. continue if not os.path.exists(save_dir / k): download_and_unzip(download, save_dir / k) pair_name = convert_opus_name_to_hf_name(k) convert(save_dir / k, dest_dir / f"opus-mt-{pair_name}") save_paths.append(dest_dir / f"opus-mt-{pair_name}") return save_paths def lmap(f, x) -> List: return list(map(f, x)) def fetch_test_set(test_set_url): import wget fname = wget.download(test_set_url, "opus_test.txt") lns = Path(fname).open().readlines() src = lmap(str.strip, lns[::4]) gold = lmap(str.strip, lns[1::4]) mar_model = lmap(str.strip, lns[2::4]) assert ( len(gold) == len(mar_model) == len(src) ), f"Gold, marian and source lengths {len(gold)}, {len(mar_model)}, {len(src)} mismatched" os.remove(fname) return src, mar_model, gold def convert_whole_dir(path=Path("marian_ckpt/")): for subdir in tqdm(list(path.ls())): dest_dir = f"marian_converted/{subdir.name}" if (dest_dir / "pytorch_model.bin").exists(): continue convert(source_dir, dest_dir) def _parse_readme(lns): """Get link and metadata from opus model card equivalent.""" subres = {} for ln in [x.strip() for x in lns]: if not ln.startswith("*"): continue ln = ln[1:].strip() for k in ["download", "dataset", "models", "model", "pre-processing"]: if ln.startswith(k): break else: continue if k in ["dataset", "model", "pre-processing"]: splat = ln.split(":") _, v = splat subres[k] = v elif k == "download": v = ln.split("(")[-1][:-1] subres[k] = v return subres def save_tokenizer_config(dest_dir: Path): dname = dest_dir.name.split("-") dct = dict(target_lang=dname[-1], source_lang="-".join(dname[:-1])) save_json(dct, dest_dir / "tokenizer_config.json") def add_to_vocab_(vocab: Dict[str, int], special_tokens: List[str]): start = max(vocab.values()) + 1 added = 0 for tok in special_tokens: if tok in vocab: continue vocab[tok] = start + added added += 1 return added def find_vocab_file(model_dir): return list(model_dir.glob("*vocab.yml"))[0] def add_special_tokens_to_vocab(model_dir: Path) -> None: vocab = load_yaml(find_vocab_file(model_dir)) vocab = {k: int(v) for k, v in vocab.items()} num_added = add_to_vocab_(vocab, ["<pad>"]) print(f"added {num_added} tokens to vocab") save_json(vocab, model_dir / "vocab.json") save_tokenizer_config(model_dir) def check_equal(marian_cfg, k1, k2): v1, v2 = marian_cfg[k1], marian_cfg[k2] assert v1 == v2, f"hparams {k1},{k2} differ: {v1} != {v2}" def check_marian_cfg_assumptions(marian_cfg): assumed_settings = { "tied-embeddings-all": True, "layer-normalization": False, "right-left": False, "transformer-ffn-depth": 2, "transformer-aan-depth": 2, "transformer-no-projection": False, "transformer-postprocess-emb": "d", "transformer-postprocess": "dan", # Dropout, add, normalize "transformer-preprocess": "", "type": "transformer", "ulr-dim-emb": 0, "dec-cell-base-depth": 2, "dec-cell-high-depth": 1, "transformer-aan-nogate": False, } for k, v in assumed_settings.items(): actual = marian_cfg[k] assert actual == v, f"Unexpected config value for {k} expected {v} got {actual}" check_equal(marian_cfg, "transformer-ffn-activation", "transformer-aan-activation") check_equal(marian_cfg, "transformer-ffn-depth", "transformer-aan-depth") check_equal(marian_cfg, "transformer-dim-ffn", "transformer-dim-aan") BIAS_KEY = "decoder_ff_logit_out_b" BART_CONVERTER = { # for each encoder and decoder layer "self_Wq": "self_attn.q_proj.weight", "self_Wk": "self_attn.k_proj.weight", "self_Wv": "self_attn.v_proj.weight", "self_Wo": "self_attn.out_proj.weight", "self_bq": "self_attn.q_proj.bias", "self_bk": "self_attn.k_proj.bias", "self_bv": "self_attn.v_proj.bias", "self_bo": "self_attn.out_proj.bias", "self_Wo_ln_scale": "self_attn_layer_norm.weight", "self_Wo_ln_bias": "self_attn_layer_norm.bias", "ffn_W1": "fc1.weight", "ffn_b1": "fc1.bias", "ffn_W2": "fc2.weight", "ffn_b2": "fc2.bias", "ffn_ffn_ln_scale": "final_layer_norm.weight", "ffn_ffn_ln_bias": "final_layer_norm.bias", # Decoder Cross Attention "context_Wk": "encoder_attn.k_proj.weight", "context_Wo": "encoder_attn.out_proj.weight", "context_Wq": "encoder_attn.q_proj.weight", "context_Wv": "encoder_attn.v_proj.weight", "context_bk": "encoder_attn.k_proj.bias", "context_bo": "encoder_attn.out_proj.bias", "context_bq": "encoder_attn.q_proj.bias", "context_bv": "encoder_attn.v_proj.bias", "context_Wo_ln_scale": "encoder_attn_layer_norm.weight", "context_Wo_ln_bias": "encoder_attn_layer_norm.bias", } class OpusState: def __init__(self, source_dir): npz_path = find_model_file(source_dir) self.state_dict = np.load(npz_path) cfg = load_config_from_state_dict(self.state_dict) assert cfg["dim-vocabs"][0] == cfg["dim-vocabs"][1] assert "Wpos" not in self.state_dict, "Wpos key in state dictionary" self.state_dict = dict(self.state_dict) self.wemb, self.final_bias = add_emb_entries(self.state_dict["Wemb"], self.state_dict[BIAS_KEY], 1) self.pad_token_id = self.wemb.shape[0] - 1 cfg["vocab_size"] = self.pad_token_id + 1 # self.state_dict['Wemb'].sha self.state_keys = list(self.state_dict.keys()) assert "Wtype" not in self.state_dict, "Wtype key in state dictionary" self._check_layer_entries() self.source_dir = source_dir self.cfg = cfg hidden_size, intermediate_shape = self.state_dict["encoder_l1_ffn_W1"].shape assert ( hidden_size == cfg["dim-emb"] == 512 ), f"Hidden size {hidden_size} and configured size {cfg['dim_emb']} mismatched or not 512" # Process decoder.yml decoder_yml = cast_marian_config(load_yaml(source_dir / "decoder.yml")) check_marian_cfg_assumptions(cfg) self.hf_config = MarianConfig( vocab_size=cfg["vocab_size"], decoder_layers=cfg["dec-depth"], encoder_layers=cfg["enc-depth"], decoder_attention_heads=cfg["transformer-heads"], encoder_attention_heads=cfg["transformer-heads"], decoder_ffn_dim=cfg["transformer-dim-ffn"], encoder_ffn_dim=cfg["transformer-dim-ffn"], d_model=cfg["dim-emb"], activation_function=cfg["transformer-aan-activation"], pad_token_id=self.pad_token_id, eos_token_id=0, bos_token_id=0, max_position_embeddings=cfg["dim-emb"], scale_embedding=True, normalize_embedding="n" in cfg["transformer-preprocess"], static_position_embeddings=not cfg["transformer-train-position-embeddings"], dropout=0.1, # see opus-mt-train repo/transformer-dropout param. # default: add_final_layer_norm=False, num_beams=decoder_yml["beam-size"], decoder_start_token_id=self.pad_token_id, bad_words_ids=[[self.pad_token_id]], max_length=512, ) def _check_layer_entries(self): self.encoder_l1 = self.sub_keys("encoder_l1") self.decoder_l1 = self.sub_keys("decoder_l1") self.decoder_l2 = self.sub_keys("decoder_l2") if len(self.encoder_l1) != 16: warnings.warn(f"Expected 16 keys for each encoder layer, got {len(self.encoder_l1)}") if len(self.decoder_l1) != 26: warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}") if len(self.decoder_l2) != 26: warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}") @property def extra_keys(self): extra = [] for k in self.state_keys: if ( k.startswith("encoder_l") or k.startswith("decoder_l") or k in [CONFIG_KEY, "Wemb", "Wpos", "decoder_ff_logit_out_b"] ): continue else: extra.append(k) return extra def sub_keys(self, layer_prefix): return [remove_prefix(k, layer_prefix) for k in self.state_dict if k.startswith(layer_prefix)] def load_marian_model(self) -> MarianMTModel: state_dict, cfg = self.state_dict, self.hf_config assert cfg.static_position_embeddings, "config.static_position_embeddings should be True" model = MarianMTModel(cfg) assert "hidden_size" not in cfg.to_dict() load_layers_( model.model.encoder.layers, state_dict, BART_CONVERTER, ) load_layers_(model.model.decoder.layers, state_dict, BART_CONVERTER, is_decoder=True) # handle tensors not associated with layers wemb_tensor = torch.nn.Parameter(torch.FloatTensor(self.wemb)) bias_tensor = torch.nn.Parameter(torch.FloatTensor(self.final_bias)) model.model.shared.weight = wemb_tensor model.model.encoder.embed_tokens = model.model.decoder.embed_tokens = model.model.shared model.final_logits_bias = bias_tensor if "Wpos" in state_dict: print("Unexpected: got Wpos") wpos_tensor = torch.tensor(state_dict["Wpos"]) model.model.encoder.embed_positions.weight = wpos_tensor model.model.decoder.embed_positions.weight = wpos_tensor if cfg.normalize_embedding: assert "encoder_emb_ln_scale_pre" in state_dict raise NotImplementedError("Need to convert layernorm_embedding") assert not self.extra_keys, f"Failed to convert {self.extra_keys}" assert ( model.model.shared.padding_idx == self.pad_token_id ), f"Padding tokens {model.model.shared.padding_idx} and {self.pad_token_id} mismatched" return model def download_and_unzip(url, dest_dir): try: import wget except ImportError: raise ImportError("you must pip install wget") filename = wget.download(url) unzip(filename, dest_dir) os.remove(filename) def convert(source_dir: Path, dest_dir): dest_dir = Path(dest_dir) dest_dir.mkdir(exist_ok=True) add_special_tokens_to_vocab(source_dir) tokenizer = MarianTokenizer.from_pretrained(str(source_dir)) tokenizer.save_pretrained(dest_dir) opus_state = OpusState(source_dir) assert opus_state.cfg["vocab_size"] == len( tokenizer.encoder ), f"Original vocab size {opus_state.cfg['vocab_size']} and new vocab size {len(tokenizer.encoder)} mismatched" # save_json(opus_state.cfg, dest_dir / "marian_original_config.json") # ^^ Uncomment to save human readable marian config for debugging model = opus_state.load_marian_model() model = model.half() model.save_pretrained(dest_dir) model.from_pretrained(dest_dir) # sanity check def load_yaml(path): import yaml with open(path) as f: return yaml.load(f, Loader=yaml.BaseLoader) def save_json(content: Union[Dict, List], path: str) -> None: with open(path, "w") as f: json.dump(content, f) def unzip(zip_path: str, dest_dir: str) -> None: with ZipFile(zip_path, "r") as zipObj: zipObj.extractall(dest_dir) if __name__ == "__main__": """ Tatoeba conversion instructions in scripts/tatoeba/README.md """ parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--src", type=str, help="path to marian model sub dir", default="en-de") parser.add_argument("--dest", type=str, default=None, help="Path to the output PyTorch model.") args = parser.parse_args() source_dir = Path(args.src) assert source_dir.exists(), f"Source directory {source_dir} not found" dest_dir = f"converted-{source_dir.name}" if args.dest is None else args.dest convert(source_dir, dest_dir)
22,823
35.991896
116
py
SLT-FAI
SLT-FAI-main/transformers/convert_marian_tatoeba_to_pytorch.py
import argparse import os from pathlib import Path from typing import List, Tuple from transformers.convert_marian_to_pytorch import ( FRONT_MATTER_TEMPLATE, _parse_readme, convert_all_sentencepiece_models, get_system_metadata, remove_prefix, remove_suffix, ) try: import pandas as pd except ImportError: pass DEFAULT_REPO = "Tatoeba-Challenge" DEFAULT_MODEL_DIR = os.path.join(DEFAULT_REPO, "models") LANG_CODE_URL = "https://datahub.io/core/language-codes/r/language-codes-3b2.csv" ISO_URL = "https://cdn-datasets.huggingface.co/language_codes/iso-639-3.csv" ISO_PATH = "lang_code_data/iso-639-3.csv" LANG_CODE_PATH = "lang_code_data/language-codes-3b2.csv" class TatoebaConverter: """Convert Tatoeba-Challenge models to huggingface format. Steps: (1) convert numpy state dict to hf format (same code as OPUS-MT-Train conversion). (2) rename opus model to huggingface format. This means replace each alpha3 code with an alpha2 code if a unique one existes. e.g. aav-eng -> aav-en, heb-eng -> he-en (3) write a model card containing the original Tatoeba-Challenge/README.md and extra info about alpha3 group members. """ def __init__(self, save_dir="marian_converted"): assert Path(DEFAULT_REPO).exists(), "need git clone [email protected]:Helsinki-NLP/Tatoeba-Challenge.git" reg = self.make_tatoeba_registry() self.download_metadata() self.registry = reg reg_df = pd.DataFrame(reg, columns=["id", "prepro", "url_model", "url_test_set"]) assert reg_df.id.value_counts().max() == 1 reg_df = reg_df.set_index("id") reg_df["src"] = reg_df.reset_index().id.apply(lambda x: x.split("-")[0]).values reg_df["tgt"] = reg_df.reset_index().id.apply(lambda x: x.split("-")[1]).values released_cols = [ "url_base", "pair", # (ISO639-3/ISO639-5 codes), "short_pair", # (reduced codes), "chrF2_score", "bleu", "brevity_penalty", "ref_len", "src_name", "tgt_name", ] released = pd.read_csv("Tatoeba-Challenge/models/released-models.txt", sep="\t", header=None).iloc[:-1] released.columns = released_cols released["fname"] = released["url_base"].apply( lambda x: remove_suffix(remove_prefix(x, "https://object.pouta.csc.fi/Tatoeba-Challenge/opus"), ".zip") ) released["2m"] = released.fname.str.startswith("2m") released["date"] = pd.to_datetime( released["fname"].apply(lambda x: remove_prefix(remove_prefix(x, "2m-"), "-")) ) released["base_ext"] = released.url_base.apply(lambda x: Path(x).name) reg_df["base_ext"] = reg_df.url_model.apply(lambda x: Path(x).name) metadata_new = reg_df.reset_index().merge(released.rename(columns={"pair": "id"}), on=["base_ext", "id"]) metadata_renamer = {"src": "src_alpha3", "tgt": "tgt_alpha3", "id": "long_pair", "date": "train_date"} metadata_new = metadata_new.rename(columns=metadata_renamer) metadata_new["src_alpha2"] = metadata_new.short_pair.apply(lambda x: x.split("-")[0]) metadata_new["tgt_alpha2"] = metadata_new.short_pair.apply(lambda x: x.split("-")[1]) DROP_COLS_BOTH = ["url_base", "base_ext", "fname"] metadata_new = metadata_new.drop(DROP_COLS_BOTH, 1) metadata_new["prefer_old"] = metadata_new.long_pair.isin([]) self.metadata = metadata_new assert self.metadata.short_pair.value_counts().max() == 1, "Multiple metadata entries for a short pair" self.metadata = self.metadata.set_index("short_pair") # wget.download(LANG_CODE_URL) mapper = pd.read_csv(LANG_CODE_PATH) mapper.columns = ["a3", "a2", "ref"] self.iso_table = pd.read_csv(ISO_PATH, sep="\t").rename(columns=lambda x: x.lower()) more_3_to_2 = self.iso_table.set_index("id").part1.dropna().to_dict() more_3_to_2.update(mapper.set_index("a3").a2.to_dict()) self.alpha3_to_alpha2 = more_3_to_2 self.model_card_dir = Path(save_dir) self.constituents = GROUP_MEMBERS def convert_models(self, tatoeba_ids, dry_run=False): entries_to_convert = [x for x in self.registry if x[0] in tatoeba_ids] converted_paths = convert_all_sentencepiece_models(entries_to_convert, dest_dir=self.model_card_dir) for path in converted_paths: long_pair = remove_prefix(path.name, "opus-mt-").split("-") # eg. heb-eng assert len(long_pair) == 2 new_p_src = self.get_two_letter_code(long_pair[0]) new_p_tgt = self.get_two_letter_code(long_pair[1]) hf_model_id = f"opus-mt-{new_p_src}-{new_p_tgt}" new_path = path.parent.joinpath(hf_model_id) # opus-mt-he-en os.rename(str(path), str(new_path)) self.write_model_card(hf_model_id, dry_run=dry_run) def get_two_letter_code(self, three_letter_code): return self.alpha3_to_alpha2.get(three_letter_code, three_letter_code) def expand_group_to_two_letter_codes(self, grp_name): return [self.get_two_letter_code(x) for x in self.constituents[grp_name]] def get_tags(self, code, ref_name): if len(code) == 2: assert "languages" not in ref_name, f"{code}: {ref_name}" return [code], False elif "languages" in ref_name or len(self.constituents.get(code, [])) > 1: group = self.expand_group_to_two_letter_codes(code) group.append(code) return group, True else: # zho-> zh print(f"Three letter monolingual code: {code}") return [code], False def resolve_lang_code(self, r) -> Tuple[List[str], str, str]: """R is a row in ported""" short_pair = r.short_pair src, tgt = short_pair.split("-") src_tags, src_multilingual = self.get_tags(src, r.src_name) assert isinstance(src_tags, list) tgt_tags, tgt_multilingual = self.get_tags(tgt, r.tgt_name) assert isinstance(tgt_tags, list) return dedup(src_tags + tgt_tags), src_multilingual, tgt_multilingual def write_model_card( self, hf_model_id: str, repo_root=DEFAULT_REPO, dry_run=False, ) -> str: """Copy the most recent model's readme section from opus, and add metadata. upload command: aws s3 sync model_card_dir s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun """ short_pair = remove_prefix(hf_model_id, "opus-mt-") extra_metadata = self.metadata.loc[short_pair].drop("2m") extra_metadata["short_pair"] = short_pair lang_tags, src_multilingual, tgt_multilingual = self.resolve_lang_code(extra_metadata) opus_name = f"{extra_metadata.src_alpha3}-{extra_metadata.tgt_alpha3}" # opus_name: str = self.convert_hf_name_to_opus_name(hf_model_name) assert repo_root in ("OPUS-MT-train", "Tatoeba-Challenge") opus_readme_path = Path(repo_root).joinpath("models", opus_name, "README.md") assert opus_readme_path.exists(), f"Readme file {opus_readme_path} not found" opus_src, opus_tgt = [x.split("+") for x in opus_name.split("-")] readme_url = f"https://github.com/Helsinki-NLP/{repo_root}/tree/master/models/{opus_name}/README.md" s, t = ",".join(opus_src), ",".join(opus_tgt) metadata = { "hf_name": short_pair, "source_languages": s, "target_languages": t, "opus_readme_url": readme_url, "original_repo": repo_root, "tags": ["translation"], "languages": lang_tags, } lang_tags = l2front_matter(lang_tags) metadata["src_constituents"] = self.constituents[s] metadata["tgt_constituents"] = self.constituents[t] metadata["src_multilingual"] = src_multilingual metadata["tgt_multilingual"] = tgt_multilingual metadata.update(extra_metadata) metadata.update(get_system_metadata(repo_root)) # combine with Tatoeba markdown extra_markdown = f"### {short_pair}\n\n* source group: {metadata['src_name']} \n* target group: {metadata['tgt_name']} \n* OPUS readme: [{opus_name}]({readme_url})\n" content = opus_readme_path.open().read() content = content.split("\n# ")[-1] # Get the lowest level 1 header in the README -- the most recent model. splat = content.split("*")[2:] content = "*".join(splat) # BETTER FRONT MATTER LOGIC content = ( FRONT_MATTER_TEMPLATE.format(lang_tags) + extra_markdown + "\n* " + content.replace("download", "download original " "weights") ) items = "\n\n".join([f"- {k}: {v}" for k, v in metadata.items()]) sec3 = "\n### System Info: \n" + items content += sec3 if dry_run: return content, metadata sub_dir = self.model_card_dir / hf_model_id sub_dir.mkdir(exist_ok=True) dest = sub_dir / "README.md" dest.open("w").write(content) pd.Series(metadata).to_json(sub_dir / "metadata.json") return content, metadata def download_metadata(self): Path(LANG_CODE_PATH).parent.mkdir(exist_ok=True) import wget if not os.path.exists(ISO_PATH): wget.download(ISO_URL, ISO_PATH) if not os.path.exists(LANG_CODE_PATH): wget.download(LANG_CODE_URL, LANG_CODE_PATH) @staticmethod def make_tatoeba_registry(repo_path=DEFAULT_MODEL_DIR): if not (Path(repo_path) / "zho-eng" / "README.md").exists(): raise ValueError( f"repo_path:{repo_path} does not exist: " "You must run: git clone [email protected]:Helsinki-NLP/Tatoeba-Challenge.git before calling." ) results = {} for p in Path(repo_path).iterdir(): if len(p.name) != 7: continue lns = list(open(p / "README.md").readlines()) results[p.name] = _parse_readme(lns) return [(k, v["pre-processing"], v["download"], v["download"][:-4] + ".test.txt") for k, v in results.items()] GROUP_MEMBERS = { # three letter code -> (group/language name, {constituents...} # if this language is on the target side the constituents can be used as target language codes. # if the language is on the source side they are supported natively without special codes. "aav": ("Austro-Asiatic languages", {"hoc", "hoc_Latn", "kha", "khm", "khm_Latn", "mnw", "vie", "vie_Hani"}), "afa": ( "Afro-Asiatic languages", { "acm", "afb", "amh", "apc", "ara", "arq", "ary", "arz", "hau_Latn", "heb", "kab", "mlt", "rif_Latn", "shy_Latn", "som", "thv", "tir", }, ), "afr": ("Afrikaans", {"afr"}), "alv": ( "Atlantic-Congo languages", { "ewe", "fuc", "fuv", "ibo", "kin", "lin", "lug", "nya", "run", "sag", "sna", "swh", "toi_Latn", "tso", "umb", "wol", "xho", "yor", "zul", }, ), "ara": ("Arabic", {"afb", "apc", "apc_Latn", "ara", "ara_Latn", "arq", "arq_Latn", "arz"}), "art": ( "Artificial languages", { "afh_Latn", "avk_Latn", "dws_Latn", "epo", "ido", "ido_Latn", "ile_Latn", "ina_Latn", "jbo", "jbo_Cyrl", "jbo_Latn", "ldn_Latn", "lfn_Cyrl", "lfn_Latn", "nov_Latn", "qya", "qya_Latn", "sjn_Latn", "tlh_Latn", "tzl", "tzl_Latn", "vol_Latn", }, ), "aze": ("Azerbaijani", {"aze_Latn"}), "bat": ("Baltic languages", {"lit", "lav", "prg_Latn", "ltg", "sgs"}), "bel": ("Belarusian", {"bel", "bel_Latn"}), "ben": ("Bengali", {"ben"}), "bnt": ( "Bantu languages", {"kin", "lin", "lug", "nya", "run", "sna", "swh", "toi_Latn", "tso", "umb", "xho", "zul"}, ), "bul": ("Bulgarian", {"bul", "bul_Latn"}), "cat": ("Catalan", {"cat"}), "cau": ("Caucasian languages", {"abk", "kat", "che", "ady"}), "ccs": ("South Caucasian languages", {"kat"}), "ceb": ("Cebuano", {"ceb"}), "cel": ("Celtic languages", {"gla", "gle", "bre", "cor", "glv", "cym"}), "ces": ("Czech", {"ces"}), "cpf": ("Creoles and pidgins, French‑based", {"gcf_Latn", "hat", "mfe"}), "cpp": ( "Creoles and pidgins, Portuguese-based", {"zsm_Latn", "ind", "pap", "min", "tmw_Latn", "max_Latn", "zlm_Latn"}, ), "cus": ("Cushitic languages", {"som"}), "dan": ("Danish", {"dan"}), "deu": ("German", {"deu"}), "dra": ("Dravidian languages", {"tam", "kan", "mal", "tel"}), "ell": ("Modern Greek (1453-)", {"ell"}), "eng": ("English", {"eng"}), "epo": ("Esperanto", {"epo"}), "est": ("Estonian", {"est"}), "euq": ("Basque (family)", {"eus"}), "eus": ("Basque", {"eus"}), "fin": ("Finnish", {"fin"}), "fiu": ( "Finno-Ugrian languages", { "est", "fin", "fkv_Latn", "hun", "izh", "kpv", "krl", "liv_Latn", "mdf", "mhr", "myv", "sma", "sme", "udm", "vep", "vro", }, ), "fra": ("French", {"fra"}), "gem": ( "Germanic languages", { "afr", "ang_Latn", "dan", "deu", "eng", "enm_Latn", "fao", "frr", "fry", "gos", "got_Goth", "gsw", "isl", "ksh", "ltz", "nds", "nld", "nno", "nob", "nob_Hebr", "non_Latn", "pdc", "sco", "stq", "swe", "swg", "yid", }, ), "gle": ("Irish", {"gle"}), "glg": ("Galician", {"glg"}), "gmq": ("North Germanic languages", {"dan", "nob", "nob_Hebr", "swe", "isl", "nno", "non_Latn", "fao"}), "gmw": ( "West Germanic languages", { "afr", "ang_Latn", "deu", "eng", "enm_Latn", "frr", "fry", "gos", "gsw", "ksh", "ltz", "nds", "nld", "pdc", "sco", "stq", "swg", "yid", }, ), "grk": ("Greek languages", {"grc_Grek", "ell"}), "hbs": ("Serbo-Croatian", {"hrv", "srp_Cyrl", "bos_Latn", "srp_Latn"}), "heb": ("Hebrew", {"heb"}), "hin": ("Hindi", {"hin"}), "hun": ("Hungarian", {"hun"}), "hye": ("Armenian", {"hye", "hye_Latn"}), "iir": ( "Indo-Iranian languages", { "asm", "awa", "ben", "bho", "gom", "guj", "hif_Latn", "hin", "jdt_Cyrl", "kur_Arab", "kur_Latn", "mai", "mar", "npi", "ori", "oss", "pan_Guru", "pes", "pes_Latn", "pes_Thaa", "pnb", "pus", "rom", "san_Deva", "sin", "snd_Arab", "tgk_Cyrl", "tly_Latn", "urd", "zza", }, ), "ilo": ("Iloko", {"ilo"}), "inc": ( "Indic languages", { "asm", "awa", "ben", "bho", "gom", "guj", "hif_Latn", "hin", "mai", "mar", "npi", "ori", "pan_Guru", "pnb", "rom", "san_Deva", "sin", "snd_Arab", "urd", }, ), "ine": ( "Indo-European languages", { "afr", "afr_Arab", "aln", "ang_Latn", "arg", "asm", "ast", "awa", "bel", "bel_Latn", "ben", "bho", "bjn", "bos_Latn", "bre", "bul", "bul_Latn", "cat", "ces", "cor", "cos", "csb_Latn", "cym", "dan", "deu", "dsb", "egl", "ell", "eng", "enm_Latn", "ext", "fao", "fra", "frm_Latn", "frr", "fry", "gcf_Latn", "gla", "gle", "glg", "glv", "gom", "gos", "got_Goth", "grc_Grek", "gsw", "guj", "hat", "hif_Latn", "hin", "hrv", "hsb", "hye", "hye_Latn", "ind", "isl", "ita", "jdt_Cyrl", "ksh", "kur_Arab", "kur_Latn", "lad", "lad_Latn", "lat_Grek", "lat_Latn", "lav", "lij", "lit", "lld_Latn", "lmo", "ltg", "ltz", "mai", "mar", "max_Latn", "mfe", "min", "mkd", "mwl", "nds", "nld", "nno", "nob", "nob_Hebr", "non_Latn", "npi", "oci", "ori", "orv_Cyrl", "oss", "pan_Guru", "pap", "pcd", "pdc", "pes", "pes_Latn", "pes_Thaa", "pms", "pnb", "pol", "por", "prg_Latn", "pus", "roh", "rom", "ron", "rue", "rus", "rus_Latn", "san_Deva", "scn", "sco", "sgs", "sin", "slv", "snd_Arab", "spa", "sqi", "srd", "srp_Cyrl", "srp_Latn", "stq", "swe", "swg", "tgk_Cyrl", "tly_Latn", "tmw_Latn", "ukr", "urd", "vec", "wln", "yid", "zlm_Latn", "zsm_Latn", "zza", }, ), "isl": ("Icelandic", {"isl"}), "ita": ("Italian", {"ita"}), "itc": ( "Italic languages", { "arg", "ast", "bjn", "cat", "cos", "egl", "ext", "fra", "frm_Latn", "gcf_Latn", "glg", "hat", "ind", "ita", "lad", "lad_Latn", "lat_Grek", "lat_Latn", "lij", "lld_Latn", "lmo", "max_Latn", "mfe", "min", "mwl", "oci", "pap", "pcd", "pms", "por", "roh", "ron", "scn", "spa", "srd", "tmw_Latn", "vec", "wln", "zlm_Latn", "zsm_Latn", }, ), "jpn": ("Japanese", {"jpn", "jpn_Bopo", "jpn_Hang", "jpn_Hani", "jpn_Hira", "jpn_Kana", "jpn_Latn", "jpn_Yiii"}), "jpx": ("Japanese (family)", {"jpn"}), "kat": ("Georgian", {"kat"}), "kor": ("Korean", {"kor_Hani", "kor_Hang", "kor_Latn", "kor"}), "lav": ("Latvian", {"lav"}), "lit": ("Lithuanian", {"lit"}), "mkd": ("Macedonian", {"mkd"}), "mkh": ("Mon-Khmer languages", {"vie_Hani", "mnw", "vie", "kha", "khm_Latn", "khm"}), "msa": ("Malay (macrolanguage)", {"zsm_Latn", "ind", "max_Latn", "zlm_Latn", "min"}), "mul": ( "Multiple languages", { "abk", "acm", "ady", "afb", "afh_Latn", "afr", "akl_Latn", "aln", "amh", "ang_Latn", "apc", "ara", "arg", "arq", "ary", "arz", "asm", "ast", "avk_Latn", "awa", "aze_Latn", "bak", "bam_Latn", "bel", "bel_Latn", "ben", "bho", "bod", "bos_Latn", "bre", "brx", "brx_Latn", "bul", "bul_Latn", "cat", "ceb", "ces", "cha", "che", "chr", "chv", "cjy_Hans", "cjy_Hant", "cmn", "cmn_Hans", "cmn_Hant", "cor", "cos", "crh", "crh_Latn", "csb_Latn", "cym", "dan", "deu", "dsb", "dtp", "dws_Latn", "egl", "ell", "enm_Latn", "epo", "est", "eus", "ewe", "ext", "fao", "fij", "fin", "fkv_Latn", "fra", "frm_Latn", "frr", "fry", "fuc", "fuv", "gan", "gcf_Latn", "gil", "gla", "gle", "glg", "glv", "gom", "gos", "got_Goth", "grc_Grek", "grn", "gsw", "guj", "hat", "hau_Latn", "haw", "heb", "hif_Latn", "hil", "hin", "hnj_Latn", "hoc", "hoc_Latn", "hrv", "hsb", "hun", "hye", "iba", "ibo", "ido", "ido_Latn", "ike_Latn", "ile_Latn", "ilo", "ina_Latn", "ind", "isl", "ita", "izh", "jav", "jav_Java", "jbo", "jbo_Cyrl", "jbo_Latn", "jdt_Cyrl", "jpn", "kab", "kal", "kan", "kat", "kaz_Cyrl", "kaz_Latn", "kek_Latn", "kha", "khm", "khm_Latn", "kin", "kir_Cyrl", "kjh", "kpv", "krl", "ksh", "kum", "kur_Arab", "kur_Latn", "lad", "lad_Latn", "lao", "lat_Latn", "lav", "ldn_Latn", "lfn_Cyrl", "lfn_Latn", "lij", "lin", "lit", "liv_Latn", "lkt", "lld_Latn", "lmo", "ltg", "ltz", "lug", "lzh", "lzh_Hans", "mad", "mah", "mai", "mal", "mar", "max_Latn", "mdf", "mfe", "mhr", "mic", "min", "mkd", "mlg", "mlt", "mnw", "moh", "mon", "mri", "mwl", "mww", "mya", "myv", "nan", "nau", "nav", "nds", "niu", "nld", "nno", "nob", "nob_Hebr", "nog", "non_Latn", "nov_Latn", "npi", "nya", "oci", "ori", "orv_Cyrl", "oss", "ota_Arab", "ota_Latn", "pag", "pan_Guru", "pap", "pau", "pdc", "pes", "pes_Latn", "pes_Thaa", "pms", "pnb", "pol", "por", "ppl_Latn", "prg_Latn", "pus", "quc", "qya", "qya_Latn", "rap", "rif_Latn", "roh", "rom", "ron", "rue", "run", "rus", "sag", "sah", "san_Deva", "scn", "sco", "sgs", "shs_Latn", "shy_Latn", "sin", "sjn_Latn", "slv", "sma", "sme", "smo", "sna", "snd_Arab", "som", "spa", "sqi", "srp_Cyrl", "srp_Latn", "stq", "sun", "swe", "swg", "swh", "tah", "tam", "tat", "tat_Arab", "tat_Latn", "tel", "tet", "tgk_Cyrl", "tha", "tir", "tlh_Latn", "tly_Latn", "tmw_Latn", "toi_Latn", "ton", "tpw_Latn", "tso", "tuk", "tuk_Latn", "tur", "tvl", "tyv", "tzl", "tzl_Latn", "udm", "uig_Arab", "uig_Cyrl", "ukr", "umb", "urd", "uzb_Cyrl", "uzb_Latn", "vec", "vie", "vie_Hani", "vol_Latn", "vro", "war", "wln", "wol", "wuu", "xal", "xho", "yid", "yor", "yue", "yue_Hans", "yue_Hant", "zho", "zho_Hans", "zho_Hant", "zlm_Latn", "zsm_Latn", "zul", "zza", }, ), "nic": ( "Niger-Kordofanian languages", { "bam_Latn", "ewe", "fuc", "fuv", "ibo", "kin", "lin", "lug", "nya", "run", "sag", "sna", "swh", "toi_Latn", "tso", "umb", "wol", "xho", "yor", "zul", }, ), "nld": ("Dutch", {"nld"}), "nor": ("Norwegian", {"nob", "nno"}), "phi": ("Philippine languages", {"ilo", "akl_Latn", "war", "hil", "pag", "ceb"}), "pol": ("Polish", {"pol"}), "por": ("Portuguese", {"por"}), "pqe": ( "Eastern Malayo-Polynesian languages", {"fij", "gil", "haw", "mah", "mri", "nau", "niu", "rap", "smo", "tah", "ton", "tvl"}, ), "roa": ( "Romance languages", { "arg", "ast", "cat", "cos", "egl", "ext", "fra", "frm_Latn", "gcf_Latn", "glg", "hat", "ind", "ita", "lad", "lad_Latn", "lij", "lld_Latn", "lmo", "max_Latn", "mfe", "min", "mwl", "oci", "pap", "pms", "por", "roh", "ron", "scn", "spa", "tmw_Latn", "vec", "wln", "zlm_Latn", "zsm_Latn", }, ), "ron": ("Romanian", {"ron"}), "run": ("Rundi", {"run"}), "rus": ("Russian", {"rus"}), "sal": ("Salishan languages", {"shs_Latn"}), "sem": ("Semitic languages", {"acm", "afb", "amh", "apc", "ara", "arq", "ary", "arz", "heb", "mlt", "tir"}), "sla": ( "Slavic languages", { "bel", "bel_Latn", "bos_Latn", "bul", "bul_Latn", "ces", "csb_Latn", "dsb", "hrv", "hsb", "mkd", "orv_Cyrl", "pol", "rue", "rus", "slv", "srp_Cyrl", "srp_Latn", "ukr", }, ), "slv": ("Slovenian", {"slv"}), "spa": ("Spanish", {"spa"}), "swe": ("Swedish", {"swe"}), "taw": ("Tai", {"lao", "tha"}), "tgl": ("Tagalog", {"tgl_Latn"}), "tha": ("Thai", {"tha"}), "trk": ( "Turkic languages", { "aze_Latn", "bak", "chv", "crh", "crh_Latn", "kaz_Cyrl", "kaz_Latn", "kir_Cyrl", "kjh", "kum", "ota_Arab", "ota_Latn", "sah", "tat", "tat_Arab", "tat_Latn", "tuk", "tuk_Latn", "tur", "tyv", "uig_Arab", "uig_Cyrl", "uzb_Cyrl", "uzb_Latn", }, ), "tur": ("Turkish", {"tur"}), "ukr": ("Ukrainian", {"ukr"}), "urd": ("Urdu", {"urd"}), "urj": ( "Uralic languages", { "est", "fin", "fkv_Latn", "hun", "izh", "kpv", "krl", "liv_Latn", "mdf", "mhr", "myv", "sma", "sme", "udm", "vep", "vro", }, ), "vie": ("Vietnamese", {"vie", "vie_Hani"}), "war": ("Waray (Philippines)", {"war"}), "zho": ( "Chinese", { "cjy_Hans", "cjy_Hant", "cmn", "cmn_Bopo", "cmn_Hang", "cmn_Hani", "cmn_Hans", "cmn_Hant", "cmn_Hira", "cmn_Kana", "cmn_Latn", "cmn_Yiii", "gan", "hak_Hani", "lzh", "lzh_Bopo", "lzh_Hang", "lzh_Hani", "lzh_Hans", "lzh_Hira", "lzh_Kana", "lzh_Yiii", "nan", "nan_Hani", "wuu", "wuu_Bopo", "wuu_Hani", "wuu_Latn", "yue", "yue_Bopo", "yue_Hang", "yue_Hani", "yue_Hans", "yue_Hant", "yue_Hira", "yue_Kana", "zho", "zho_Hans", "zho_Hant", }, ), "zle": ("East Slavic languages", {"bel", "orv_Cyrl", "bel_Latn", "rus", "ukr", "rue"}), "zls": ("South Slavic languages", {"bos_Latn", "bul", "bul_Latn", "hrv", "mkd", "slv", "srp_Cyrl", "srp_Latn"}), "zlw": ("West Slavic languages", {"csb_Latn", "dsb", "hsb", "pol", "ces"}), } def l2front_matter(langs): return "".join(f"- {l}\n" for l in langs) def dedup(lst): """Preservers order""" new_lst = [] for item in lst: if not item: continue elif item in new_lst: continue else: new_lst.append(item) return new_lst if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-m", "--models", action="append", help="<Required> Set flag", required=True, nargs="+", dest="models" ) parser.add_argument("-save_dir", "--save_dir", default="marian_converted", help="where to save converted models") args = parser.parse_args() resolver = TatoebaConverter(save_dir=args.save_dir) resolver.convert_models(args.models[0])
33,227
25.5824
175
py
SLT-FAI
SLT-FAI-main/transformers/convert_openai_original_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OpenAI GPT checkpoint.""" import argparse import torch from transformers import CONFIG_NAME, WEIGHTS_NAME, OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import logging logging.set_verbosity_info() def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path): # Construct model if openai_config_file == "": config = OpenAIGPTConfig() else: config = OpenAIGPTConfig.from_json_file(openai_config_file) model = OpenAIGPTModel(config) # Load weights from numpy load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path) # Save pytorch-model pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME print("Save PyTorch model to {}".format(pytorch_weights_dump_path)) torch.save(model.state_dict(), pytorch_weights_dump_path) print("Save configuration file to {}".format(pytorch_config_dump_path)) with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--openai_checkpoint_folder_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--openai_config_file", default="", type=str, help="An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture.", ) args = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
2,654
34.878378
118
py
SLT-FAI
SLT-FAI-main/transformers/training_args.py
import dataclasses import json import os import warnings from dataclasses import dataclass, field from enum import Enum from typing import Any, Dict, List, Optional, Tuple from .file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required from .trainer_utils import EvaluationStrategy from .utils import logging if is_torch_available(): import torch if is_torch_tpu_available(): import torch_xla.core.xla_model as xm logger = logging.get_logger(__name__) def default_logdir() -> str: """ Same default as PyTorch """ import socket from datetime import datetime current_time = datetime.now().strftime("%b%d_%H-%M-%S") return os.path.join("runs", current_time + "_" + socket.gethostname()) @dataclass class TrainingArguments: """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**. Using :class:`~transformers.HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. Parameters: output_dir (:obj:`str`): The output directory where the model predictions and checkpoints will be written. overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`): If :obj:`True`, overwrite the content of the output directory. Use this to continue training if :obj:`output_dir` points to a checkpoint directory. do_train (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to run training or not. do_eval (:obj:`bool`, `optional`): Whether to run evaluation on the dev set or not. Will default to :obj:`evaluation_strategy` different from :obj:`"no"`. do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to run predictions on the test set or not. evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`"no"`): The evaluation strategy to adopt during training. Possible values are: * :obj:`"no"`: No evaluation is done during training. * :obj:`"steps"`: Evaluation is done (and logged) every :obj:`eval_steps`. * :obj:`"epoch"`: Evaluation is done at the end of each epoch. prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`): When performing evaluation and predictions, only returns the loss. per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8): The batch size per GPU/TPU core/CPU for training. per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8): The batch size per GPU/TPU core/CPU for evaluation. gradient_accumulation_steps (:obj:`int`, `optional`, defaults to 1): Number of updates steps to accumulate the gradients for, before performing a backward/update pass. .. warning:: When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every ``gradient_accumulation_steps * xxx_step`` training examples. eval_accumulation_steps (:obj:`int`, `optional`): Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but requires more memory). learning_rate (:obj:`float`, `optional`, defaults to 5e-5): The initial learning rate for Adam. weight_decay (:obj:`float`, `optional`, defaults to 0): The weight decay to apply (if not zero). adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8): Epsilon for the Adam optimizer. max_grad_norm (:obj:`float`, `optional`, defaults to 1.0): Maximum gradient norm (for gradient clipping). num_train_epochs(:obj:`float`, `optional`, defaults to 3.0): Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training). max_steps (:obj:`int`, `optional`, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides :obj:`num_train_epochs`. warmup_steps (:obj:`int`, `optional`, defaults to 0): Number of steps used for a linear warmup from 0 to :obj:`learning_rate`. logging_dir (:obj:`str`, `optional`): Tensorboard log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`. logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to log and evaluate the first :obj:`global_step` or not. logging_steps (:obj:`int`, `optional`, defaults to 500): Number of update steps between two logs. save_steps (:obj:`int`, `optional`, defaults to 500): Number of updates steps before two checkpoint saves. save_total_limit (:obj:`int`, `optional`): If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in :obj:`output_dir`. no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to not use CUDA even when it is available or not. seed (:obj:`int`, `optional`, defaults to 42): Random seed for initialization. fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to use 16-bit (mixed) precision training (through NVIDIA apex) instead of 32-bit training. fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'): For :obj:`fp16` training, apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details on the `apex documentation <https://nvidia.github.io/apex/amp.html>`__. local_rank (:obj:`int`, `optional`, defaults to -1): During distributed training, the rank of the process. tpu_num_cores (:obj:`int`, `optional`): When training on TPU, the number of TPU cores (automatically passed by launcher script). debug (:obj:`bool`, `optional`, defaults to :obj:`False`): When training on TPU, whether to print debug metrics or not. dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not. eval_steps (:obj:`int`, `optional`): Number of update steps between two evaluations if :obj:`evaluation_strategy="steps"`. Will default to the same value as :obj:`logging_steps` if not set. dataloader_num_workers (:obj:`int`, `optional`, defaults to 0): Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process. past_index (:obj:`int`, `optional`, defaults to -1): Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can make use of the past hidden states for their predictions. If this argument is set to a positive int, the ``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument ``mems``. run_name (:obj:`str`, `optional`): A descriptor for the run. Notably used for wandb logging. disable_tqdm (:obj:`bool`, `optional`): Whether or not to disable the tqdm progress bars. Will default to :obj:`True` if the logging level is set to warn or lower (default), :obj:`False` otherwise. remove_unused_columns (:obj:`bool`, `optional`, defaults to :obj:`True`): If using `nlp.Dataset` datasets, whether or not to automatically remove the columns unused by the model forward method. (Note: this behavior is not implemented for :class:`~transformers.TFTrainer` yet.) label_names (:obj:`List[str]`, `optional`): The list of keys in your dictionary of inputs that correspond to the labels. Will eventually default to :obj:`["labels"]` except if the model used is one of the :obj:`XxxForQuestionAnswering` in which case it will default to :obj:`["start_positions", "end_positions"]`. load_best_model_at_end (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to load the best model found during training at the end of training. .. note:: When set to :obj:`True`, the parameters :obj:`save_steps` will be ignored and the model will be saved after each evaluation. metric_for_best_model (:obj:`str`, `optional`) Use in conjunction with :obj:`load_best_model_at_end` to specify the metric to use to compare two different models. Must be the name of a metric returned by the evaluation with or without the prefix :obj:`"eval_"`. Will default to :obj:`"loss"` if unspecified and :obj:`load_best_model_at_end=True` (to use the evaluation loss). If you set this value, :obj:`greater_is_better` will default to :obj:`True`. Don't forget to set it to :obj:`False` if your metric is better when lower. greater_is_better (:obj:`bool`, `optional`) Use in conjunction with :obj:`load_best_model_at_end` and :obj:`metric_for_best_model` to specify if better models should have a greater metric or not. Will default to: - :obj:`True` if :obj:`metric_for_best_model` is set to a value that isn't :obj:`"loss"` or :obj:`"eval_loss"`. - :obj:`False` if :obj:`metric_for_best_model` is not set, or set to :obj:`"loss"` or :obj:`"eval_loss"`. """ output_dir: str = field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) overwrite_output_dir: bool = field( default=False, metadata={ "help": ( "Overwrite the content of the output directory." "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) do_eval: bool = field(default=None, metadata={"help": "Whether to run eval on the dev set."}) do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."}) evaluate_during_training: bool = field( default=False, metadata={"help": "Run evaluation during training at each logging step."}, ) evaluation_strategy: EvaluationStrategy = field( default="no", metadata={"help": "Run evaluation during training at each logging step."}, ) prediction_loss_only: bool = field( default=False, metadata={"help": "When performing evaluation and predictions, only returns the loss."}, ) per_device_train_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."} ) per_device_eval_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."} ) per_gpu_train_batch_size: Optional[int] = field( default=None, metadata={ "help": "Deprecated, the use of `--per_device_train_batch_size` is preferred. " "Batch size per GPU/TPU core/CPU for training." }, ) per_gpu_eval_batch_size: Optional[int] = field( default=None, metadata={ "help": "Deprecated, the use of `--per_device_eval_batch_size` is preferred." "Batch size per GPU/TPU core/CPU for evaluation." }, ) gradient_accumulation_steps: int = field( default=1, metadata={"help": "Number of updates steps to accumulate before performing a backward/update pass."}, ) eval_accumulation_steps: Optional[int] = field( default=None, metadata={"help": "Number of predictions steps to accumulate before moving the tensors to the CPU."}, ) learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for Adam."}) weight_decay: float = field(default=0.0, metadata={"help": "Weight decay if we apply some."}) adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for Adam optimizer"}) adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for Adam optimizer"}) adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for Adam optimizer."}) max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."}) num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."}) max_steps: int = field( default=-1, metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."}, ) warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={"help": "Tensorboard log dir."}) logging_first_step: bool = field(default=False, metadata={"help": "Log and eval the first global_step"}) logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."}) save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."}) save_total_limit: Optional[int] = field( default=None, metadata={ "help": ( "Limit the total amount of checkpoints." "Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints" ) }, ) no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"}) seed: int = field(default=42, metadata={"help": "random seed for initialization"}) fp16: bool = field( default=False, metadata={"help": "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"}, ) fp16_opt_level: str = field( default="O1", metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) }, ) local_rank: int = field(default=-1, metadata={"help": "For distributed training: local_rank"}) tpu_num_cores: Optional[int] = field( default=None, metadata={"help": "TPU: Number of TPU cores (automatically passed by launcher script)"} ) tpu_metrics_debug: bool = field( default=False, metadata={"help": "Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics"}, ) debug: bool = field(default=False, metadata={"help": "Whether to print debug metrics on TPU"}) dataloader_drop_last: bool = field( default=False, metadata={"help": "Drop the last incomplete batch if it is not divisible by the batch size."} ) eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."}) dataloader_num_workers: int = field( default=0, metadata={ "help": "Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process." }, ) past_index: int = field( default=-1, metadata={"help": "If >=0, uses the corresponding part of the output as the past state for next step."}, ) run_name: Optional[str] = field( default=None, metadata={"help": "An optional descriptor for the run. Notably used for wandb logging."} ) disable_tqdm: Optional[bool] = field( default=None, metadata={"help": "Whether or not to disable the tqdm progress bars."} ) remove_unused_columns: Optional[bool] = field( default=True, metadata={"help": "Remove columns not required by the model when using an nlp.Dataset."} ) label_names: Optional[List[str]] = field( default=None, metadata={"help": "The list of keys in your dictionary of inputs that correspond to the labels."} ) load_best_model_at_end: Optional[bool] = field( default=False, metadata={"help": "Whether or not to load the best model found during training at the end of training."}, ) metric_for_best_model: Optional[str] = field( default=None, metadata={"help": "The metric to use to compare two different models."} ) greater_is_better: Optional[bool] = field( default=None, metadata={"help": "Whether the `metric_for_best_model` should be maximized or not."} ) def __post_init__(self): if self.disable_tqdm is None: self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN if self.evaluate_during_training is True: self.evaluation_strategy = EvaluationStrategy.STEPS warnings.warn( "The `evaluate_during_training` argument is deprecated in favor of `evaluation_strategy` (which has more options)", FutureWarning, ) self.evaluation_strategy = EvaluationStrategy(self.evaluation_strategy) if self.do_eval is False and self.evaluation_strategy != EvaluationStrategy.NO: self.do_eval = True if self.eval_steps is None: self.eval_steps = self.logging_steps if self.load_best_model_at_end and self.metric_for_best_model is None: self.metric_for_best_model = "loss" if self.greater_is_better is None and self.metric_for_best_model is not None: self.greater_is_better = self.metric_for_best_model not in ["loss", "eval_loss"] if self.run_name is None: self.run_name = self.output_dir if self.device.type != "cuda" and self.fp16: raise ValueError("AMP (`--fp16`) can only be used on CUDA devices.") @property def train_batch_size(self) -> int: """ The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training). """ if self.per_gpu_train_batch_size: logger.warning( "Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future " "version. Using `--per_device_train_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size return per_device_batch_size * max(1, self.n_gpu) @property def eval_batch_size(self) -> int: """ The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training). """ if self.per_gpu_eval_batch_size: logger.warning( "Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future " "version. Using `--per_device_eval_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size return per_device_batch_size * max(1, self.n_gpu) @cached_property @torch_required def _setup_devices(self) -> Tuple["torch.device", int]: logger.info("PyTorch: setting up devices") if self.no_cuda: device = torch.device("cpu") n_gpu = 0 elif is_torch_tpu_available(): device = xm.xla_device() n_gpu = 0 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend="nccl") device = torch.device("cuda", self.local_rank) n_gpu = 1 if device.type == "cuda": torch.cuda.set_device(device) return device, n_gpu @property @torch_required def device(self) -> "torch.device": """ The device used by this process. """ return self._setup_devices[0] @property @torch_required def n_gpu(self): """ The number of GPUs used by this process. Note: This will only be greater than one when you have multiple GPUs available but are not using distributed training. For distributed training, it will always be 1. """ return self._setup_devices[1] def to_dict(self): """ Serializes this instance while replace `Enum` by their values (for JSON serialization support). """ d = dataclasses.asdict(self) for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value return d def to_json_string(self): """ Serializes this instance to a JSON string. """ return json.dumps(self.to_dict(), indent=2) def to_sanitized_dict(self) -> Dict[str, Any]: """ Sanitized serialization to use with TensorBoard’s hparams """ d = self.to_dict() d = {**d, **{"train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size}} valid_types = [bool, int, float, str] if is_torch_available(): valid_types.append(torch.Tensor) return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}
22,646
48.019481
142
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_openai_fast.py
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Tokenization classes for OpenAI GPT.""" from typing import Optional, Tuple from .tokenization_openai import OpenAIGPTTokenizer from .tokenization_utils_fast import PreTrainedTokenizerFast from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json"}, "merges_file": {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt"}, "tokenizer_file": {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-tokenizer.json"}, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "openai-gpt": 512, } class OpenAIGPTTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" GPT Tokenizer (backed by HuggingFace's `tokenizers` library). Based on Byte-Pair-Encoding with the following peculiarities: - lower case all inputs - uses BERT's BasicTokenizer for pre-BPE tokenization This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): Path to the vocabulary file. merges_file (:obj:`str`): Path to the merges file. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["attention_mask"] slow_tokenizer_class = OpenAIGPTTokenizer def __init__(self, vocab_file, merges_file, tokenizer_file=None, unk_token="<unk>", **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, **kwargs) @property def do_lower_case(self): return True def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
3,106
39.350649
119
py
SLT-FAI
SLT-FAI-main/transformers/generation_utils.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Iterable, List, Optional, Tuple import torch from torch import Tensor from torch.nn import functional as F from .file_utils import ModelOutput from .utils import logging logger = logging.get_logger(__name__) class GenerationMixin: """ A class contraining all of the functions supporting generation, to be used as a mixin in :class:`~transfomers.PreTrainedModel`. """ def prepare_inputs_for_generation(self, input_ids, **kwargs): """ Implement in subclasses of :class:`~transfomers.PreTrainedModel` for custom behavior to prepare inputs in the generate method. """ return {"input_ids": input_ids} def adjust_logits_during_generation(self, logits, **kwargs): """ Implement in subclasses of :class:`~transfomers.PreTrainedModel` for custom behavior to adjust the logits in the generate method. """ return logits def enforce_repetition_penalty_(self, lprobs, batch_size, num_beams, prev_output_tokens, repetition_penalty): """ Enforce the repetition penalty (from the `CTRL paper <https://arxiv.org/abs/1909.05858>`__). """ for i in range(batch_size * num_beams): for previous_token in set(prev_output_tokens[i].tolist()): # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability if lprobs[i, previous_token] < 0: lprobs[i, previous_token] *= repetition_penalty else: lprobs[i, previous_token] /= repetition_penalty def postprocess_next_token_scores( self, scores, input_ids, no_repeat_ngram_size, bad_words_ids, cur_len, min_length, max_length, eos_token_id, repetition_penalty, batch_size, num_beams, ): # repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858) if repetition_penalty != 1.0: self.enforce_repetition_penalty_( scores, batch_size, num_beams, input_ids, repetition_penalty, ) # set eos token prob to zero if min_length is not reached if eos_token_id is not None and cur_len < min_length: scores[:, eos_token_id] = -float("inf") if no_repeat_ngram_size > 0: # calculate a list of banned tokens to prevent repetitively generating the same ngrams num_batch_hypotheses = batch_size * num_beams # from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345 banned_batch_tokens = calc_banned_ngram_tokens( input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len ) for i, banned_tokens in enumerate(banned_batch_tokens): scores[i, banned_tokens] = -float("inf") if bad_words_ids is not None: # Exclude EOS token (already processed) bad_words_ids = list(filter(lambda bad_token_seq: bad_token_seq != [eos_token_id], bad_words_ids)) # calculate a list of banned tokens according to bad words banned_tokens = calc_banned_bad_words_ids(input_ids.tolist(), bad_words_ids) # Modify the scores in place by setting the banned tokens logits to `-inf` set_scores_to_inf_for_banned_tokens(scores, banned_tokens) return scores @torch.no_grad() def generate( self, input_ids: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, max_length: Optional[int] = None, min_length: Optional[int] = None, do_sample: Optional[bool] = None, early_stopping: Optional[bool] = None, num_beams: Optional[int] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, repetition_penalty: Optional[float] = None, bad_words_ids: Optional[Iterable[int]] = None, bos_token_id: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, no_repeat_ngram_size: Optional[int] = None, num_return_sequences: Optional[int] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_start_token_id: Optional[int] = None, use_cache: Optional[bool] = None, **model_kwargs ) -> torch.LongTensor: r""" Generates sequences for models with a language modeling head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling. Adapted in part from `Facebook's XLM beam search code <https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__. Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values indicated are the default values of those config. Most of these parameters are explained in more detail in `this blog post <https://huggingface.co/blog/how-to-generate>`__. Parameters: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`. decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): initial input_ids for the decoder of encoder-decoder type models. If :obj:`None` then only decoder_start_token_id is passed as the first token to the decoder. max_length (:obj:`int`, `optional`, defaults to 20): The maximum length of the sequence to be generated. min_length (:obj:`int`, `optional`, defaults to 10): The minimum length of the sequence to be generated. do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use sampling ; use greedy decoding otherwise. early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not. num_beams (:obj:`int`, `optional`, defaults to 1): Number of beams for beam search. 1 means no beam search. temperature (:obj:`float`, `optional`, defaults tp 1.0): The value used to module the next token probabilities. top_k (:obj:`int`, `optional`, defaults to 50): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (:obj:`float`, `optional`, defaults to 1.0): If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or higher are kept for generation. repetition_penalty (:obj:`float`, `optional`, defaults to 1.0): The parameter for repetition penalty. 1.0 means no penalty. See `this paper <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. bos_token_id (:obj:`int`, `optional`): The id of the `beginning-of-sequence` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. length_penalty (:obj:`float`, `optional`, defaults to 1.0): Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer sequences. no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. bad_words_ids(:obj:`List[int]`, `optional`): List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`. num_return_sequences(:obj:`int`, `optional`, defaults to 1): The number of independently computed returned sequences for each element in the batch. attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_start_token_id (:obj:`int`, `optional`): If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token. use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. model_kwargs: Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. Return: :obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. Examples:: tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. outputs = model.generate(max_length=40) # do greedy decoding print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache. input_context = 'The dog' input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog' for i in range(3): # 3 output sequences were generated print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. input_context = 'The dog' input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # generate 3 candidates using sampling for i in range(3): # 3 output sequences were generated print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache. input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache. input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']] input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated """ # We cannot generate if the model does not have a LM head if self.get_output_embeddings() is None: raise AttributeError( "You tried to generate sequences with a model that does not have a LM Head." "Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )" ) max_length = max_length if max_length is not None else self.config.max_length min_length = min_length if min_length is not None else self.config.min_length do_sample = do_sample if do_sample is not None else self.config.do_sample early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping use_cache = use_cache if use_cache is not None else self.config.use_cache num_beams = num_beams if num_beams is not None else self.config.num_beams temperature = temperature if temperature is not None else self.config.temperature top_k = top_k if top_k is not None else self.config.top_k top_p = top_p if top_p is not None else self.config.top_p repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty no_repeat_ngram_size = ( no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id ) if input_ids is not None: batch_size = input_ids.shape[0] # overriden by the input batch_size else: batch_size = 1 assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer." assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer." assert isinstance(do_sample, bool), "`do_sample` should be a boolean." assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean." assert isinstance(use_cache, bool), "`use_cache` should be a boolean." assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer." assert temperature > 0, "`temperature` should be strictly positive." assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer." assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1." assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1." assert input_ids is not None or ( isinstance(bos_token_id, int) and bos_token_id >= 0 ), "If input_ids is not defined, `bos_token_id` should be a positive integer." assert pad_token_id is None or ( isinstance(pad_token_id, int) and (pad_token_id >= 0) ), "`pad_token_id` should be a positive integer." assert (eos_token_id is None) or ( isinstance(eos_token_id, int) and (eos_token_id >= 0) ), "`eos_token_id` should be a positive integer." assert length_penalty > 0, "`length_penalty` should be strictly positive." assert ( isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0 ), "`no_repeat_ngram_size` should be a positive integer." assert ( isinstance(num_return_sequences, int) and num_return_sequences > 0 ), "`num_return_sequences` should be a strictly positive integer." assert ( bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list) ), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated" if input_ids is None: assert isinstance(bos_token_id, int) and bos_token_id >= 0, ( "you should either supply a context to complete as `input_ids` input " "or a `bos_token_id` (integer >= 0) as a first token to start the generation." ) input_ids = torch.full( (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device, ) else: assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)." # not allow to duplicate outputs when greedy decoding if do_sample is False: if num_beams == 1: # no_beam_search greedy generation conditions assert ( num_return_sequences == 1 ), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1" else: # beam_search greedy generation conditions assert ( num_beams >= num_return_sequences ), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences" # create attention mask if necessary # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140 if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids): attention_mask = input_ids.ne(pad_token_id).long() elif attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) # set pad_token_id to eos_token_id if not set. Important that this is done after # attention_mask is created if pad_token_id is None and eos_token_id is not None: logger.warning( "Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id) ) pad_token_id = eos_token_id # vocab size if hasattr(self.config, "vocab_size"): vocab_size = self.config.vocab_size elif ( self.config.is_encoder_decoder and hasattr(self.config, "decoder") and hasattr(self.config.decoder, "vocab_size") ): vocab_size = self.config.decoder.vocab_size else: raise ValueError("either self.config.vocab_size or self.config.decoder.vocab_size needs to be defined") # set effective batch size and effective batch multiplier according to do_sample if do_sample: effective_batch_size = batch_size * num_return_sequences effective_batch_mult = num_return_sequences else: effective_batch_size = batch_size effective_batch_mult = 1 if self.config.is_encoder_decoder: if decoder_start_token_id is None: # see if BOS token can be used for decoder_start_token_id if bos_token_id is not None: decoder_start_token_id = bos_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id") and self.config.decoder.bos_token_id is not None ): decoder_start_token_id = self.config.decoder.bos_token_id else: raise ValueError( "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation" ) assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self) assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder) # get encoder and store encoder outputs encoder = self.get_encoder() encoder_outputs: ModelOutput = encoder(input_ids, attention_mask=attention_mask, return_dict=True) # Expand input ids if num_beams > 1 or num_return_sequences > 1 if num_return_sequences > 1 or num_beams > 1: input_ids_len = input_ids.shape[-1] input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len) attention_mask = attention_mask.unsqueeze(1).expand( batch_size, effective_batch_mult * num_beams, input_ids_len ) input_ids = input_ids.contiguous().view( effective_batch_size * num_beams, input_ids_len ) # shape: (batch_size * num_return_sequences * num_beams, cur_len) attention_mask = attention_mask.contiguous().view( effective_batch_size * num_beams, input_ids_len ) # shape: (batch_size * num_return_sequences * num_beams, cur_len) if self.config.is_encoder_decoder: device = next(self.parameters()).device if decoder_input_ids is not None: # give initial decoder input ids input_ids = decoder_input_ids.repeat(effective_batch_size * num_beams, 1).to(device) else: # create empty decoder input_ids input_ids = torch.full( (effective_batch_size * num_beams, 1), decoder_start_token_id, dtype=torch.long, device=device, ) cur_len = input_ids.shape[-1] assert ( batch_size == encoder_outputs.last_hidden_state.shape[0] ), f"expected encoder_outputs.last_hidden_state to have 1st dimension bs={batch_size}, got {encoder_outputs.last_hidden_state.shape[0]} " # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1) expanded_batch_idxs = ( torch.arange(batch_size) .view(-1, 1) .repeat(1, num_beams * effective_batch_mult) .view(-1) .to(input_ids.device) ) # expand encoder_outputs encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select( 0, expanded_batch_idxs ) # save encoder_outputs in `model_kwargs` model_kwargs["encoder_outputs"] = encoder_outputs else: cur_len = input_ids.shape[-1] assert ( cur_len < max_length ), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`" if num_beams > 1: output = self._generate_beam_search( input_ids, cur_len=cur_len, max_length=max_length, min_length=min_length, do_sample=do_sample, early_stopping=early_stopping, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, pad_token_id=pad_token_id, eos_token_id=eos_token_id, batch_size=effective_batch_size, num_return_sequences=num_return_sequences, length_penalty=length_penalty, num_beams=num_beams, vocab_size=vocab_size, attention_mask=attention_mask, use_cache=use_cache, model_kwargs=model_kwargs, ) else: output = self._generate_no_beam_search( input_ids, cur_len=cur_len, max_length=max_length, min_length=min_length, do_sample=do_sample, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, pad_token_id=pad_token_id, eos_token_id=eos_token_id, batch_size=effective_batch_size, attention_mask=attention_mask, use_cache=use_cache, model_kwargs=model_kwargs, ) return output def _generate_no_beam_search( self, input_ids, cur_len, max_length, min_length, do_sample, temperature, top_k, top_p, repetition_penalty, no_repeat_ngram_size, bad_words_ids, pad_token_id, eos_token_id, batch_size, attention_mask, use_cache, model_kwargs, ): """Generate sequences for each example without beam search (num_beams == 1). All returned sequence are generated independantly. """ # length of generated sentences / unfinished sentences unfinished_sents = input_ids.new(batch_size).fill_(1) sent_lengths = input_ids.new(batch_size).fill_(max_length) past = None while cur_len < max_length: model_inputs = self.prepare_inputs_for_generation( input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_kwargs ) outputs = self(**model_inputs, return_dict=True) next_token_logits = outputs.logits[:, -1, :] scores = self.postprocess_next_token_scores( scores=next_token_logits, input_ids=input_ids, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, cur_len=cur_len, min_length=min_length, max_length=max_length, eos_token_id=eos_token_id, repetition_penalty=repetition_penalty, batch_size=batch_size, num_beams=1, ) # if model has past, then set the past variable to speed up decoding if "past_key_values" in outputs: past = outputs.past_key_values elif "mems" in outputs: past = outputs.mems if do_sample: # Temperature (higher temperature => more likely to sample low probability tokens) if temperature != 1.0: scores = scores / temperature # Top-p/top-k filtering next_token_logscores = top_k_top_p_filtering(scores, top_k=top_k, top_p=top_p) # Sample probs = F.softmax(next_token_logscores, dim=-1) next_token = torch.multinomial(probs, num_samples=1).squeeze(1) else: # Greedy decoding next_token = torch.argmax(next_token_logits, dim=-1) # update generations and finished sentences if eos_token_id is not None: # pad finished sentences if eos_token_id exist tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents) else: tokens_to_add = next_token # add token and increase length by one input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1) cur_len = cur_len + 1 if eos_token_id is not None: eos_in_sents = tokens_to_add == eos_token_id # if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool() sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len) # unfinished_sents is set to zero if eos in sentence unfinished_sents.mul_((~eos_in_sents).long()) # stop when there is a </s> in each sentence, or if we exceed the maximul length if unfinished_sents.max() == 0: break # extend attention_mask for new generated input if only decoder if self.config.is_encoder_decoder is False: attention_mask = torch.cat( [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 ) return input_ids def _generate_beam_search( self, input_ids, cur_len, max_length, min_length, do_sample, early_stopping, temperature, top_k, top_p, repetition_penalty, no_repeat_ngram_size, bad_words_ids, pad_token_id, eos_token_id, batch_size, num_return_sequences, length_penalty, num_beams, vocab_size, attention_mask, use_cache, model_kwargs, ): """Generate sequences for each example with beam search.""" # generated hypotheses generated_hyps = [ BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping) for _ in range(batch_size) ] # scores for each sentence in the beam beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times if do_sample is False: beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,) # cache compute states past = None # done sentences done = [False for _ in range(batch_size)] while cur_len < max_length: model_inputs = self.prepare_inputs_for_generation( input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_kwargs ) outputs = self(**model_inputs, return_dict=True) # (batch_size * num_beams, cur_len, vocab_size) next_token_logits = outputs.logits[:, -1, :] # (batch_size * num_beams, vocab_size) # if model has past, then set the past variable to speed up decoding if "past_key_values" in outputs: past = outputs.past_key_values elif "mems" in outputs: past = outputs.mems if self.config.is_encoder_decoder and do_sample is False: # TODO (PVP) still a bit hacky here - there might be a better solution next_token_logits = self.adjust_logits_during_generation( next_token_logits, cur_len=cur_len, max_length=max_length ) scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size) scores = self.postprocess_next_token_scores( scores=scores, input_ids=input_ids, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, cur_len=cur_len, min_length=min_length, max_length=max_length, eos_token_id=eos_token_id, repetition_penalty=repetition_penalty, batch_size=batch_size, num_beams=num_beams, ) assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format( scores.shape, (batch_size * num_beams, vocab_size) ) if do_sample: _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size) # Temperature if temperature != 1.0: _scores = _scores / temperature # Top-p/top-k filtering _scores = top_k_top_p_filtering( _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2 ) # (batch_size * num_beams, vocab_size) # re-organize to group the beam together to sample from all beam_idxs _scores = _scores.contiguous().view( batch_size, num_beams * vocab_size ) # (batch_size, num_beams * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search) probs = F.softmax(_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2) # Compute next scores next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2) # sort the sampled vector to make sure that the first num_beams samples are the best next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1) next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2) else: next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size) # re-organize to group the beam together (we are keeping top hypothesis accross beams) next_scores = next_scores.view( batch_size, num_beams * vocab_size ) # (batch_size, num_beams * vocab_size) next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True) assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams) # next batch beam content next_batch_beam = [] # for each sentence for batch_idx in range(batch_size): # if we are done with this sentence, add a pad token if done[batch_idx]: assert ( len(generated_hyps[batch_idx]) >= num_beams ), "Batch can only be done if at least {} beams have been generated".format(num_beams) assert ( eos_token_id is not None and pad_token_id is not None ), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined" next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch continue # next sentence beam content, this will get added to next_batch_beam next_sent_beam = [] # next tokens for this sentence for beam_token_rank, (beam_token_id, beam_token_score) in enumerate( zip(next_tokens[batch_idx], next_scores[batch_idx]) ): # get beam and token IDs beam_id = beam_token_id // vocab_size token_id = beam_token_id % vocab_size effective_beam_id = batch_idx * num_beams + beam_id # add to generated hypotheses if end of sentence if (eos_token_id is not None) and (token_id.item() == eos_token_id): # if beam_token does not belong to top num_beams tokens, it should not be added is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams if is_beam_token_worse_than_top_num_beams: continue generated_hyps[batch_idx].add( input_ids[effective_beam_id].clone(), beam_token_score.item(), ) else: # add next predicted token since it is not eos_token next_sent_beam.append((beam_token_score, token_id, effective_beam_id)) # once the beam for next step is full, don't add more tokens to it. if len(next_sent_beam) == num_beams: break # Check if we are done so that we can save a pad step if all(done) done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done( next_scores[batch_idx].max().item(), cur_len ) # update next beam content assert len(next_sent_beam) == num_beams, "Beam should always be full" next_batch_beam.extend(next_sent_beam) assert len(next_batch_beam) == num_beams * (batch_idx + 1), "We should have added num_beams each step" # stop when we are done with each sentence if all(done): break # sanity check / prepare next batch assert len(next_batch_beam) == batch_size * num_beams beam_scores = beam_scores.new([x[0] for x in next_batch_beam]) beam_tokens = input_ids.new([x[1] for x in next_batch_beam]) beam_idx = input_ids.new([x[2] for x in next_batch_beam]) # re-order batch and update current length input_ids = input_ids[beam_idx, :] input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1) cur_len = cur_len + 1 # re-order internal states if past is not None: past = self._reorder_cache(past, beam_idx) # extend attention_mask for new generated input if only decoder if self.config.is_encoder_decoder is False: attention_mask = torch.cat( [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 ) # finalize all open beam hypotheses and add to generated hypotheses for batch_idx in range(batch_size): if done[batch_idx]: continue # test that beam scores match previously calculated scores if not eos and batch_idx not done if eos_token_id is not None and all( (token_id % vocab_size).item() != eos_token_id for token_id in next_tokens[batch_idx] ): assert torch.all( next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx] ), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format( next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx], ) # need to add best num_beams hypotheses to generated hyps for beam_id in range(num_beams): effective_beam_id = batch_idx * num_beams + beam_id final_score = beam_scores[effective_beam_id].item() final_tokens = input_ids[effective_beam_id] generated_hyps[batch_idx].add(final_tokens, final_score) # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch output_batch_size = batch_size if do_sample else batch_size * num_return_sequences output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences # select the best hypotheses sent_lengths = input_ids.new(output_batch_size) best = [] # retrieve best hypotheses for i, hypotheses in enumerate(generated_hyps): sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0]) for j in range(output_num_return_sequences_per_batch): effective_batch_idx = output_num_return_sequences_per_batch * i + j best_hyp = sorted_hyps.pop()[1] sent_lengths[effective_batch_idx] = len(best_hyp) best.append(best_hyp) # prepare for adding eos sent_max_len = min(sent_lengths.max().item() + 1, max_length) decoded = input_ids.new(output_batch_size, sent_max_len) # shorter batches are padded if needed if sent_lengths.min().item() != sent_lengths.max().item(): assert pad_token_id is not None, "`pad_token_id` has to be defined" decoded.fill_(pad_token_id) # fill with hypotheses and eos_token_id if the latter fits in for i, hypo in enumerate(best): decoded[i, : sent_lengths[i]] = hypo if sent_lengths[i] < max_length: decoded[i, sent_lengths[i]] = eos_token_id return decoded @staticmethod def _reorder_cache(past: Tuple, beam_idx: Tensor) -> Tuple[Tensor]: return tuple(layer_past.index_select(1, beam_idx) for layer_past in past) def calc_banned_ngram_tokens(prev_input_ids: Tensor, num_hypos: int, no_repeat_ngram_size: int, cur_len: int) -> None: """Copied from fairseq for no_repeat_ngram in beam_search""" if cur_len + 1 < no_repeat_ngram_size: # return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet return [[] for _ in range(num_hypos)] generated_ngrams = [{} for _ in range(num_hypos)] for idx in range(num_hypos): gen_tokens = prev_input_ids[idx].tolist() generated_ngram = generated_ngrams[idx] for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]): prev_ngram_tuple = tuple(ngram[:-1]) generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] def _get_generated_ngrams(hypo_idx): # Before decoding the next token, prevent decoding of ngrams that have already appeared start_idx = cur_len + 1 - no_repeat_ngram_size ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].tolist()) return generated_ngrams[hypo_idx].get(ngram_idx, []) banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)] return banned_tokens def calc_banned_bad_words_ids(prev_input_ids: Iterable[int], bad_words_ids: Iterable[int]) -> Iterable[int]: banned_tokens = [] def _tokens_match(prev_tokens, tokens): if len(tokens) == 0: # if bad word tokens is just one token always ban it return True if len(tokens) > len(prev_tokens): # if bad word tokens are longer than prev tokens they can't be equal return False if prev_tokens[-len(tokens) :] == tokens: # if tokens match return True else: return False for prev_input_ids_slice in prev_input_ids: banned_tokens_slice = [] for banned_token_seq in bad_words_ids: assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format( bad_words_ids ) if _tokens_match(prev_input_ids_slice, banned_token_seq[:-1]) is False: # if tokens do not match continue continue banned_tokens_slice.append(banned_token_seq[-1]) banned_tokens.append(banned_tokens_slice) return banned_tokens def set_scores_to_inf_for_banned_tokens(scores: torch.Tensor, banned_tokens: List[List[int]]) -> None: """Modifies the scores in place by setting the banned token positions to `-inf`. Banned token is expected to be a list of list of banned tokens to ban in the format [[batch index, vocabulary position],...] Args: scores: logits distribution of shape (batch size, vocabulary size) banned_tokens: list of list of tokens to ban of length (batch_size) """ banned_mask_list = [] for idx, batch_banned_tokens in enumerate(banned_tokens): for token in batch_banned_tokens: banned_mask_list.append([idx, token]) if not banned_mask_list: return banned_mask = torch.LongTensor(banned_mask_list) indices = torch.ones(len(banned_mask)) # A sparse tensor is generated from a list of coordinates: [[0, 1], [0, 2], [2, 0]]. A conversion to dense tensor generates: # [ 0 1 1 ] # [ 0 0 0 ] # [ 1 0 0 ] banned_mask = torch.sparse.LongTensor(banned_mask.t(), indices, scores.size()).to(scores.device).to_dense().bool() scores.masked_fill_(banned_mask, -float("inf")) def top_k_top_p_filtering( logits: Tensor, top_k: int = 0, top_p: float = 1.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1, ) -> Tensor: """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) if top_k > 0: keep only top k tokens with highest probability (top-k filtering). if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) Make sure we keep at least min_tokens_to_keep per batch example in the output From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ if top_k > 0: top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if top_p < 1.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs > top_p if min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) sorted_indices_to_remove[..., :min_tokens_to_keep] = 0 # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) logits[indices_to_remove] = filter_value return logits class BeamHypotheses(object): def __init__(self, num_beams, max_length, length_penalty, early_stopping): """ Initialize n-best list of hypotheses. """ self.max_length = max_length - 1 # ignoring bos_token self.length_penalty = length_penalty self.early_stopping = early_stopping self.num_beams = num_beams self.beams = [] self.worst_score = 1e9 def __len__(self): """ Number of hypotheses in the list. """ return len(self.beams) def add(self, hyp, sum_logprobs): """ Add a new hypothesis to the list. """ score = sum_logprobs / len(hyp) ** self.length_penalty if len(self) < self.num_beams or score > self.worst_score: self.beams.append((score, hyp)) if len(self) > self.num_beams: sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)]) del self.beams[sorted_scores[0][1]] self.worst_score = sorted_scores[1][0] else: self.worst_score = min(score, self.worst_score) def is_done(self, best_sum_logprobs, cur_len): """ If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst one in the heap, then we are done with this sentence. """ if len(self) < self.num_beams: return False elif self.early_stopping: return True else: cur_score = best_sum_logprobs / cur_len ** self.length_penalty ret = self.worst_score >= cur_score return ret
51,388
48.035305
246
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_mobilebert_fast.py
# coding=utf-8 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for MobileBERT.""" from .tokenization_bert_fast import BertTokenizerFast from .tokenization_mobilebert import MobileBertTokenizer from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "mobilebert-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/google/mobilebert-uncased/vocab.txt" }, "tokenizer_file": { "mobilebert-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/google/mobilebert-uncased/tokenizer.json" }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"mobilebert-uncased": 512} PRETRAINED_INIT_CONFIGURATION = {} class MobileBertTokenizerFast(BertTokenizerFast): r""" Construct a "fast" MobileBERT tokenizer (backed by HuggingFace's `tokenizers` library). :class:`~transformers.MobileBertTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION slow_tokenizer_class = MobileBertTokenizer
2,070
35.982143
124
py
SLT-FAI
SLT-FAI-main/transformers/modeling_xlm_roberta.py
# coding=utf-8 # Copyright 2019 Facebook AI Research and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch XLM-RoBERTa model. """ from .configuration_xlm_roberta import XLMRobertaConfig from .file_utils import add_start_docstrings from .modeling_roberta import ( RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, ) from .utils import logging logger = logging.get_logger(__name__) XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [ "xlm-roberta-base", "xlm-roberta-large", "xlm-roberta-large-finetuned-conll02-dutch", "xlm-roberta-large-finetuned-conll02-spanish", "xlm-roberta-large-finetuned-conll03-english", "xlm-roberta-large-finetuned-conll03-german", # See all XLM-RoBERTa models at https://huggingface.co/models?filter=xlm-roberta ] XLM_ROBERTA_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.XLMRobertaConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ @add_start_docstrings( "The bare XLM-RoBERTa Model transformer outputting raw hidden-states without any specific head on top.", XLM_ROBERTA_START_DOCSTRING, ) class XLMRobertaModel(RobertaModel): """ This class overrides :class:`~transformers.RobertaModel`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = XLMRobertaConfig @add_start_docstrings( "XLM-RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.", XLM_ROBERTA_START_DOCSTRING, ) class XLMRobertaForCausalLM(RobertaForCausalLM): """ This class overrides :class:`~transformers.RobertaForCausalLM`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = XLMRobertaConfig @add_start_docstrings( """XLM-RoBERTa Model with a `language modeling` head on top. """, XLM_ROBERTA_START_DOCSTRING, ) class XLMRobertaForMaskedLM(RobertaForMaskedLM): """ This class overrides :class:`~transformers.RobertaForMaskedLM`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = XLMRobertaConfig @add_start_docstrings( """XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLM_ROBERTA_START_DOCSTRING, ) class XLMRobertaForSequenceClassification(RobertaForSequenceClassification): """ This class overrides :class:`~transformers.RobertaForSequenceClassification`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = XLMRobertaConfig @add_start_docstrings( """XLM-RoBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, XLM_ROBERTA_START_DOCSTRING, ) class XLMRobertaForMultipleChoice(RobertaForMultipleChoice): """ This class overrides :class:`~transformers.RobertaForMultipleChoice`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = XLMRobertaConfig @add_start_docstrings( """XLM-RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XLM_ROBERTA_START_DOCSTRING, ) class XLMRobertaForTokenClassification(RobertaForTokenClassification): """ This class overrides :class:`~transformers.RobertaForTokenClassification`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = XLMRobertaConfig @add_start_docstrings( """XLM-RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""", XLM_ROBERTA_START_DOCSTRING, ) class XLMRobertaForQuestionAnswering(RobertaForQuestionAnswering): """ This class overrides :class:`~transformers.RobertaForQuestionAnswering`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = XLMRobertaConfig
5,815
36.522581
127
py
SLT-FAI
SLT-FAI-main/transformers/modeling_camembert.py
# coding=utf-8 # Copyright 2019 Inria, Facebook AI Research and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch CamemBERT model. """ from .configuration_camembert import CamembertConfig from .file_utils import add_start_docstrings from .modeling_roberta import ( RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, ) from .utils import logging logger = logging.get_logger(__name__) _TOKENIZER_FOR_DOC = "CamembertTokenizer" CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "camembert-base", "Musixmatch/umberto-commoncrawl-cased-v1", "Musixmatch/umberto-wikipedia-uncased-v1", # See all CamemBERT models at https://huggingface.co/models?filter=camembert ] CAMEMBERT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.CamembertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ @add_start_docstrings( "The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.", CAMEMBERT_START_DOCSTRING, ) class CamembertModel(RobertaModel): """ This class overrides :class:`~transformers.RobertaModel`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = CamembertConfig @add_start_docstrings( """CamemBERT Model with a `language modeling` head on top. """, CAMEMBERT_START_DOCSTRING, ) class CamembertForMaskedLM(RobertaForMaskedLM): """ This class overrides :class:`~transformers.RobertaForMaskedLM`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = CamembertConfig @add_start_docstrings( """CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, CAMEMBERT_START_DOCSTRING, ) class CamembertForSequenceClassification(RobertaForSequenceClassification): """ This class overrides :class:`~transformers.RobertaForSequenceClassification`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = CamembertConfig @add_start_docstrings( """CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, CAMEMBERT_START_DOCSTRING, ) class CamembertForMultipleChoice(RobertaForMultipleChoice): """ This class overrides :class:`~transformers.RobertaForMultipleChoice`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = CamembertConfig @add_start_docstrings( """CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, CAMEMBERT_START_DOCSTRING, ) class CamembertForTokenClassification(RobertaForTokenClassification): """ This class overrides :class:`~transformers.RobertaForTokenClassification`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = CamembertConfig @add_start_docstrings( """CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits` """, CAMEMBERT_START_DOCSTRING, ) class CamembertForQuestionAnswering(RobertaForQuestionAnswering): """ This class overrides :class:`~transformers.RobertaForQuestionAnswering`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = CamembertConfig @add_start_docstrings( """CamemBERT Model with a `language modeling` head on top for CLM fine-tuning. """, CAMEMBERT_START_DOCSTRING ) class CamembertForCausalLM(RobertaForCausalLM): """ This class overrides :class:`~transformers.RobertaForCausalLM`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = CamembertConfig
5,685
36.163399
120
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_xlnet.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for XLNet model.""" import os import unicodedata from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from .file_utils import SPIECE_UNDERLINE from .tokenization_utils import PreTrainedTokenizer from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "xlnet-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-spiece.model", "xlnet-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-spiece.model", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "xlnet-base-cased": None, "xlnet-large-cased": None, } # Segments (not really needed) SEG_ID_A = 0 SEG_ID_B = 1 SEG_ID_CLS = 2 SEG_ID_SEP = 3 SEG_ID_PAD = 4 class XLNetTokenizer(PreTrainedTokenizer): """ Construct an XLNet tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a .spm extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to lowercase the input when tokenizing. remove_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to strip the text when tokenizing (removing excess spaces before and after the string). keep_accents (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to keep accents when tokenizing. bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. .. note:: When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the :obj:`cls_token`. eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the :obj:`sep_token`. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (:obj:`str`, `optional`, defaults to :obj:`"<sep>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (:obj:`str`, `optional`, defaults to :obj:`"<cls>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (:obj:`str`, `optional`, defaults to :obj:`"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<eop>", "<eod>"]`): Additional special tokens used by the tokenizer. Attributes: sp_model (:obj:`SentencePieceProcessor`): The `SentencePiece` processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES padding_side = "left" def __init__( self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=False, bos_token="<s>", eos_token="</s>", unk_token="<unk>", sep_token="<sep>", pad_token="<pad>", cls_token="<cls>", mask_token="<mask>", additional_special_tokens=["<eop>", "<eod>"], **kwargs ): super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs, ) self._pad_token_type_id = 3 self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(vocab_file) @property def vocab_size(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def preprocess_text(self, inputs): if self.remove_space: outputs = " ".join(inputs.strip().split()) else: outputs = inputs outputs = outputs.replace("``", '"').replace("''", '"') if not self.keep_accents: outputs = unicodedata.normalize("NFKD", outputs) outputs = "".join([c for c in outputs if not unicodedata.combining(c)]) if self.do_lower_case: outputs = outputs.lower() return outputs def _tokenize(self, text, sample=False): """ Tokenize a string. """ text = self.preprocess_text(text) if not sample: pieces = self.sp_model.EncodeAsPieces(text) else: pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1) new_pieces = [] for piece in pieces: if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit(): cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, "")) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(cur_pieces) else: new_pieces.append(piece) return new_pieces def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLNet sequence has the following format: - single sequence: ``X <sep> <cls>`` - pair of sequences: ``A <sep> B <sep> <cls>`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return token_ids_0 + sep + cls return token_ids_0 + sep + token_ids_1 + sep + cls def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` method. Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the token list is already formatted with special tokens for the model. Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1] return ([0] * len(token_ids_0)) + [1, 1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls_segment_id = [2] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] + cls_segment_id return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
13,149
39.712074
119
py
SLT-FAI
SLT-FAI-main/transformers/modeling_flax_roberta.py
# coding=utf-8 # Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict import numpy as np import flax.linen as nn import jax import jax.numpy as jnp from flax.linen import compact from .configuration_roberta import RobertaConfig from .file_utils import add_start_docstrings from .modeling_flax_utils import FlaxPreTrainedModel, gelu from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "RobertaConfig" _TOKENIZER_FOR_DOC = "RobertaTokenizer" ROBERTA_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ ROBERTA_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **maked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ # Copied from transformers.modeling_flax_bert.FlaxBertLayerNorm with Bert->Roberta class FlaxRobertaLayerNorm(nn.Module): """Layer normalization (https://arxiv.org/abs/1607.06450). Operates on the last axis of the input data. """ epsilon: float = 1e-6 dtype: jnp.dtype = jnp.float32 bias: bool = True scale: bool = True bias_init: jnp.ndarray = nn.initializers.zeros scale_init: jnp.ndarray = nn.initializers.ones @compact def __call__(self, x): """Applies layer normalization on the input. It normalizes the activations of the layer for each given example in a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that maintains the mean activation within each example close to 0 and the activation standard deviation close to 1. Args: x: the inputs epsilon: A small float added to variance to avoid dividing by zero. dtype: the dtype of the computation (default: float32). bias: If True, bias (beta) is added. scale: If True, multiply by scale (gamma). When the next layer is linear (also e.g. nn.relu), this can be disabled since the scaling will be done by the next layer. bias_init: Initializer for bias, by default, zero. scale_init: Initializer for scale, by default, one. Returns: Normalized inputs (the same shape as inputs). """ features = x.shape[-1] mean = jnp.mean(x, axis=-1, keepdims=True) mean2 = jnp.mean(jax.lax.square(x), axis=-1, keepdims=True) var = mean2 - jax.lax.square(mean) mul = jax.lax.rsqrt(var + self.epsilon) if self.scale: mul = mul * jnp.asarray(self.param("gamma", self.scale_init, (features,)), self.dtype) y = (x - mean) * mul if self.bias: y = y + jnp.asarray(self.param("beta", self.bias_init, (features,)), self.dtype) return y # Copied from transformers.modeling_flax_bert.FlaxBertEmbedding with Bert->Roberta class FlaxRobertaEmbedding(nn.Module): """ Specify a new class for doing the embedding stuff as Flax's one use 'embedding' for the parameter name and PyTorch use 'weight' """ vocab_size: int hidden_size: int emb_init: Callable[..., np.ndarray] = nn.initializers.normal(stddev=0.1) @compact def __call__(self, inputs): embedding = self.param("weight", self.emb_init, (self.vocab_size, self.hidden_size)) return jnp.take(embedding, inputs, axis=0) # Copied from transformers.modeling_flax_bert.FlaxBertEmbeddings with Bert->Roberta class FlaxRobertaEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" vocab_size: int hidden_size: int type_vocab_size: int max_length: int @compact def __call__(self, input_ids, token_type_ids, position_ids, attention_mask): # Embed w_emb = FlaxRobertaEmbedding(self.vocab_size, self.hidden_size, name="word_embeddings")( jnp.atleast_2d(input_ids.astype("i4")) ) p_emb = FlaxRobertaEmbedding(self.max_length, self.hidden_size, name="position_embeddings")( jnp.atleast_2d(position_ids.astype("i4")) ) t_emb = FlaxRobertaEmbedding(self.type_vocab_size, self.hidden_size, name="token_type_embeddings")( jnp.atleast_2d(token_type_ids.astype("i4")) ) # Sum all embeddings summed_emb = w_emb + jnp.broadcast_to(p_emb, w_emb.shape) + t_emb # Layer Norm layer_norm = FlaxRobertaLayerNorm(name="layer_norm")(summed_emb) return layer_norm # Copied from transformers.modeling_flax_bert.FlaxBertAttention with Bert->Roberta class FlaxRobertaAttention(nn.Module): num_heads: int head_size: int @compact def __call__(self, hidden_state, attention_mask): self_att = nn.attention.SelfAttention(num_heads=self.num_heads, qkv_features=self.head_size, name="self")( hidden_state, attention_mask ) layer_norm = FlaxRobertaLayerNorm(name="layer_norm")(self_att + hidden_state) return layer_norm # Copied from transformers.modeling_flax_bert.FlaxBertIntermediate with Bert->Roberta class FlaxRobertaIntermediate(nn.Module): output_size: int @compact def __call__(self, hidden_state): # TODO: Add ACT2FN reference to change activation function dense = nn.Dense(features=self.output_size, name="dense")(hidden_state) return gelu(dense) # Copied from transformers.modeling_flax_bert.FlaxBertOutput with Bert->Roberta class FlaxRobertaOutput(nn.Module): @compact def __call__(self, intermediate_output, attention_output): hidden_state = nn.Dense(attention_output.shape[-1], name="dense")(intermediate_output) hidden_state = FlaxRobertaLayerNorm(name="layer_norm")(hidden_state + attention_output) return hidden_state class FlaxRobertaLayer(nn.Module): num_heads: int head_size: int intermediate_size: int @compact def __call__(self, hidden_state, attention_mask): attention = FlaxRobertaAttention(self.num_heads, self.head_size, name="attention")( hidden_state, attention_mask ) intermediate = FlaxRobertaIntermediate(self.intermediate_size, name="intermediate")(attention) output = FlaxRobertaOutput(name="output")(intermediate, attention) return output # Copied from transformers.modeling_flax_bert.FlaxBertLayerCollection with Bert->Roberta class FlaxRobertaLayerCollection(nn.Module): """ Stores N RobertaLayer(s) """ num_layers: int num_heads: int head_size: int intermediate_size: int @compact def __call__(self, inputs, attention_mask): assert self.num_layers > 0, f"num_layers should be >= 1, got ({self.num_layers})" # Initialize input / output input_i = inputs # Forward over all encoders for i in range(self.num_layers): layer = FlaxRobertaLayer(self.num_heads, self.head_size, self.intermediate_size, name=f"{i}") input_i = layer(input_i, attention_mask) return input_i # Copied from transformers.modeling_flax_bert.FlaxBertEncoder with Bert->Roberta class FlaxRobertaEncoder(nn.Module): num_layers: int num_heads: int head_size: int intermediate_size: int @compact def __call__(self, hidden_state, attention_mask): layer = FlaxRobertaLayerCollection( self.num_layers, self.num_heads, self.head_size, self.intermediate_size, name="layer" )(hidden_state, attention_mask) return layer # Copied from transformers.modeling_flax_bert.FlaxBertPooler with Bert->Roberta class FlaxRobertaPooler(nn.Module): @compact def __call__(self, hidden_state): cls_token = hidden_state[:, 0] out = nn.Dense(hidden_state.shape[-1], name="dense")(cls_token) return jax.lax.tanh(out) # Copied from transformers.modeling_flax_bert.FlaxBertModule with Bert->Roberta class FlaxRobertaModule(nn.Module): vocab_size: int hidden_size: int type_vocab_size: int max_length: int num_encoder_layers: int num_heads: int head_size: int intermediate_size: int @compact def __call__(self, input_ids, token_type_ids, position_ids, attention_mask): # Embedding embeddings = FlaxRobertaEmbeddings( self.vocab_size, self.hidden_size, self.type_vocab_size, self.max_length, name="embeddings" )(input_ids, token_type_ids, position_ids, attention_mask) # N stacked encoding layers encoder = FlaxRobertaEncoder( self.num_encoder_layers, self.num_heads, self.head_size, self.intermediate_size, name="encoder" )(embeddings, attention_mask) pooled = FlaxRobertaPooler(name="pooler")(encoder) return encoder, pooled @add_start_docstrings( "The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.", ROBERTA_START_DOCSTRING, ) class FlaxRobertaModel(FlaxPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ model_class = FlaxRobertaModule config_class = RobertaConfig base_model_prefix = "roberta" @staticmethod def convert_from_pytorch(pt_state: Dict, config: RobertaConfig) -> Dict: jax_state = dict(pt_state) # Need to change some parameters name to match Flax names so that we don't have to fork any layer for key, tensor in pt_state.items(): # Key parts key_parts = set(key.split(".")) # Every dense layer has "kernel" parameters instead of "weight" if "dense.weight" in key: del jax_state[key] key = key.replace("weight", "kernel") jax_state[key] = tensor # SelfAttention needs also to replace "weight" by "kernel" if {"query", "key", "value"} & key_parts: # Flax SelfAttention decomposes the heads (num_head, size // num_heads) if "bias" in key: jax_state[key] = tensor.reshape((config.num_attention_heads, -1)) elif "weight": del jax_state[key] key = key.replace("weight", "kernel") tensor = tensor.reshape((config.num_attention_heads, -1, config.hidden_size)).transpose((2, 0, 1)) jax_state[key] = tensor # SelfAttention output is not a separate layer, remove one nesting if "attention.output.dense" in key: del jax_state[key] key = key.replace("attention.output.dense", "attention.self.out") jax_state[key] = tensor # SelfAttention output is not a separate layer, remove nesting on layer norm if "attention.output.LayerNorm" in key: del jax_state[key] key = key.replace("attention.output.LayerNorm", "attention.LayerNorm") jax_state[key] = tensor # There are some transposed parameters w.r.t their PyTorch counterpart if "intermediate.dense.kernel" in key or "output.dense.kernel" in key: jax_state[key] = tensor.T # Self Attention output projection needs to be transposed if "out.kernel" in key: jax_state[key] = tensor.reshape((config.hidden_size, config.num_attention_heads, -1)).transpose( 1, 2, 0 ) # Pooler needs to transpose its kernel if "pooler.dense.kernel" in key: jax_state[key] = tensor.T # Handle LayerNorm conversion if "LayerNorm" in key: del jax_state[key] # Replace LayerNorm by layer_norm new_key = key.replace("LayerNorm", "layer_norm") if "weight" in key: new_key = new_key.replace("weight", "gamma") elif "bias" in key: new_key = new_key.replace("bias", "beta") jax_state[new_key] = tensor return jax_state def __init__(self, config: RobertaConfig, state: dict, seed: int = 0, **kwargs): model = FlaxRobertaModule( vocab_size=config.vocab_size, hidden_size=config.hidden_size, type_vocab_size=config.type_vocab_size, max_length=config.max_position_embeddings, num_encoder_layers=config.num_hidden_layers, num_heads=config.num_attention_heads, head_size=config.hidden_size, intermediate_size=config.intermediate_size, ) super().__init__(config, model, state, seed) @property def module(self) -> nn.Module: return self._module def __call__(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None): if token_type_ids is None: token_type_ids = jnp.ones_like(input_ids) if position_ids is None: position_ids = np.arange( self.config.pad_token_id + 1, np.atleast_2d(input_ids).shape[-1] + self.config.pad_token_id + 1 ) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) return self.model.apply( {"params": self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), jnp.array(position_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), )
17,979
38.866962
127
py
SLT-FAI
SLT-FAI-main/transformers/configuration_deberta.py
# coding=utf-8 # Copyright 2020, Microsoft and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DeBERTa model configuration """ from .configuration_utils import PretrainedConfig from .utils import logging logger = logging.get_logger(__name__) DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/deberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/microsoft/deberta-base/config.json", "microsoft/deberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/microsoft/deberta-large/config.json", } class DebertaConfig(PretrainedConfig): r""" :class:`~transformers.DebertaConfig` is the configuration class to store the configuration of a :class:`~transformers.DebertaModel`. Arguments: vocab_size (:obj:`int`, `optional`, defaults to 30522): Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.DebertaModel` or :class:`~transformers.TFDebertaModel`. hidden_size (:obj:`int`, `optional`, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (:obj:`int`, `optional`, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (:obj:`int`, `optional`, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"`, :obj:`"gelu"`, :obj:`"tanh"`, :obj:`"gelu_fast"`, :obj:`"mish"`, :obj:`"linear"`, :obj:`"sigmoid"` and :obj:`"gelu_new"` are supported. hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, `optional`, defaults to 2): The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.DebertaModel` or :class:`~transformers.TFDebertaModel`. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. relative_attention (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether use relative position encoding. max_relative_positions (:obj:`int`, `optional`, defaults to 1): The range of relative positions :obj:`[-max_position_embeddings, max_position_embeddings]`. Use the same value as :obj:`max_position_embeddings`. pad_token_id (:obj:`int`, `optional`, defaults to 0): The value used to pad input_ids. position_biased_input (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether add absolute position embedding to content embedding. pos_att_type (:obj:`List[str]`, `optional`): The type of relative position attention, it can be a combination of :obj:`["p2c", "c2p", "p2p"]`, e.g. :obj:`["p2c"]`, :obj:`["p2c", "c2p"]`, :obj:`["p2c", "c2p", 'p2p"]`. layer_norm_eps (:obj:`float`, optional, defaults to 1e-12): The epsilon used by the layer normalization layers. """ model_type = "deberta" def __init__( self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=0, initializer_range=0.02, layer_norm_eps=1e-7, relative_attention=False, max_relative_positions=-1, pad_token_id=0, position_biased_input=True, pos_att_type=None, pooler_dropout=0, pooler_hidden_act="gelu", **kwargs ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.relative_attention = relative_attention self.max_relative_positions = max_relative_positions self.pad_token_id = pad_token_id self.position_biased_input = position_biased_input # Backwards compatibility if type(pos_att_type) == str: pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")] self.pos_att_type = pos_att_type self.vocab_size = vocab_size self.layer_norm_eps = layer_norm_eps self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size) self.pooler_dropout = pooler_dropout self.pooler_hidden_act = pooler_hidden_act
6,651
49.015038
121
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_pegasus_fast.py
# coding=utf-8 # Copyright 2020 Google and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional from .file_utils import add_start_docstrings, is_sentencepiece_available from .tokenization_reformer_fast import ReformerTokenizerFast from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING, BatchEncoding if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: PegasusTokenizer = None SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": {"google/pegasus-xsum": "https://cdn.huggingface.co/google/pegasus-xsum/spiece.model"}, "tokenizer_file": {"google/pegasus-xsum": "https://cdn.huggingface.co/google/pegasus-xsum/tokenizer.json"}, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/pegasus-xsum": 512, } class PegasusTokenizerFast(ReformerTokenizerFast): offset = 103 # entries 2-104 are only used for pretraining vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = PegasusTokenizer # def num_special_tokens_to_add(self, pair=False): # """Just EOS""" # return 1 def _special_token_mask(self, seq): all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special assert all_special_ids == set([0, 1]) return [1 if x in all_special_ids else 0 for x in seq] def get_special_tokens_mask( self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False ) -> List[int]: """Get list where entries are [1] if a token is [eos] or [pad] else 0.""" if already_has_special_tokens: return self._special_token_mask(token_ids_0) elif token_ids_1 is None: return self._special_token_mask(token_ids_0) + [1] else: return self._special_token_mask(token_ids_0 + token_ids_1) + [1] def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]: """ Build model inputs from a sequence by adding eos to the end. no bos token is added to the front. - single sequence: ``X </s>`` - pair of sequences: ``A B </s>`` (not intended use) Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return token_ids_0 + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_0 + token_ids_1 + [self.eos_token_id] @add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, return_tensors: str = "pt", truncation=True, padding="longest", **unused, ) -> BatchEncoding: if "" in src_texts: raise ValueError(f"found empty string in src_texts: {src_texts}") tokenizer_kwargs = dict( add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, truncation=truncation, padding=padding, ) model_inputs: BatchEncoding = self(src_texts, **tokenizer_kwargs) if tgt_texts is None: return model_inputs if max_target_length is not None: tokenizer_kwargs["max_length"] = max_target_length labels: BatchEncoding = self(tgt_texts, **tokenizer_kwargs)["input_ids"] model_inputs["labels"] = labels return model_inputs
4,793
39.285714
117
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_reformer.py
# coding=utf-8 # Copyright 2020 The Trax Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model Reformer.""" import os from shutil import copyfile from typing import Dict, Optional, Tuple import sentencepiece as spm from .tokenization_utils import PreTrainedTokenizer from .utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" #################################################### # Mapping from the keyword arguments names of Tokenizer `__init__` # to file names for serializing Tokenizer instances #################################################### VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} #################################################### # Mapping from the keyword arguments names of Tokenizer `__init__` # to pretrained vocabulary URL for all the model shortcut names. #################################################### PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/reformer-crime-and-punishment": "https://cdn.huggingface.co/google/reformer-crime-and-punishment/spiece.model" } } #################################################### # Mapping from model shortcut names to max length of inputs #################################################### PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/reformer-crime-and-punishment": 524288, } class ReformerTokenizer(PreTrainedTokenizer): """ Construct a Reformer tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__ . This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the :obj:`sep_token`. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`): The token used for padding, for example when batching sequences of different lengths. additional_special_tokens (:obj:`List[str]`, `optional`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["attention_mask"] def __init__( self, vocab_file, eos_token="</s>", unk_token="<unk>", pad_token="<pad>", additional_special_tokens=[], **kwargs ): super().__init__( eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, additional_special_tokens=additional_special_tokens, **kwargs, ) self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(vocab_file) @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self) -> Dict[str, int]: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def _tokenize(self, text, sample=False): """Take as input a string and return a list of strings (tokens) for words/sub-words""" if not sample: pieces = self.sp_model.EncodeAsPieces(text) else: pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1) return pieces def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index < self.sp_model.get_piece_size(): token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = self.sp_model.decode_pieces(tokens) return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
6,182
36.70122
126
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_electra.py
# coding=utf-8 # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .tokenization_bert import BertTokenizer VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/electra-small-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-generator/vocab.txt", "google/electra-base-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-generator/vocab.txt", "google/electra-large-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-generator/vocab.txt", "google/electra-small-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-discriminator/vocab.txt", "google/electra-base-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-discriminator/vocab.txt", "google/electra-large-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-discriminator/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/electra-small-generator": 512, "google/electra-base-generator": 512, "google/electra-large-generator": 512, "google/electra-small-discriminator": 512, "google/electra-base-discriminator": 512, "google/electra-large-discriminator": 512, } PRETRAINED_INIT_CONFIGURATION = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class ElectraTokenizer(BertTokenizer): r""" Construct an ELECTRA tokenizer. :class:`~transformers.ElectraTokenizer` is identical to :class:`~transformers.BertTokenizer` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
3,022
44.119403
145
py
SLT-FAI
SLT-FAI-main/transformers/configuration_mmbt.py
# coding=utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MMBT configuration """ from .utils import logging logger = logging.get_logger(__name__) class MMBTConfig(object): """ This is the configuration class to store the configuration of a :class:`~transformers.MMBTModel`. It is used to instantiate a MMBT model according to the specified arguments, defining the model architecture. Args: config (:class:`~transformers.PreTrainedConfig`): Config of the underlying Transformer models. Its values are copied over to use a single config. num_labels (:obj:`int`, `optional`): Size of final Linear layer for classification. modal_hidden_size (:obj:`int`, `optional`, defaults to 2048): Embedding dimension of the non-text modality encoder. """ def __init__(self, config, num_labels=None, modal_hidden_size=2048): self.__dict__ = config.__dict__ self.modal_hidden_size = modal_hidden_size if num_labels: self.num_labels = num_labels
1,652
37.44186
115
py
SLT-FAI
SLT-FAI-main/transformers/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ProphetNet checkpoint.""" import argparse import torch from transformers import logging from transformers.modeling_prophetnet import ProphetNetForConditionalGeneration from transformers.modeling_xlm_prophetnet import XLMProphetNetForConditionalGeneration # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) logger = logging.get_logger(__name__) logging.set_verbosity_info() def convert_prophetnet_checkpoint_to_pytorch(prophetnet_checkpoint_path: str, pytorch_dump_folder_path: str): """ Copy/paste/tweak prohpetnet's weights to our prophetnet structure. """ if "xprophetnet" in prophetnet_checkpoint_path: prophet_old = XLMProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path) prophet, loading_info = XLMProphetNetForConditionalGeneration.from_pretrained( prophetnet_checkpoint_path, output_loading_info=True ) else: prophet_old = ProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path) prophet, loading_info = ProphetNetForConditionalGeneration.from_pretrained( prophetnet_checkpoint_path, output_loading_info=True ) special_keys = ["key_proj", "value_proj", "query_proj"] mapping = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: attributes = key.split(".") if attributes[0] == "lm_head": model = prophet old_model = prophet_old else: model = prophet.prophetnet old_model = prophet_old.model is_key_init = False for attribute in attributes: if attribute in mapping: old_attribute = mapping[attribute] if not hasattr(old_model, old_attribute) and len(old_attribute) > 0: old_attribute = attribute elif hasattr(old_model, attribute): old_attribute = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" model.weight = old_model.weight logger.info(f"{attribute} is initialized.") is_key_init = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" model.bias = old_model.bias logger.info(f"{attribute} is initialized") is_key_init = True break elif attribute in special_keys and hasattr(old_model, "in_proj_weight"): embed_dim = old_model.in_proj_weight.shape[0] // 3 param = getattr(model, attribute) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": model.query_proj.weight = torch.nn.Parameter(old_model.in_proj_weight[:embed_dim, :]) model.query_proj.bias = torch.nn.Parameter(old_model.in_proj_bias[:embed_dim]) elif attribute == "key_proj": model.key_proj.weight = torch.nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :]) model.key_proj.bias = torch.nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim]) elif attribute == "value_proj": model.value_proj.weight = torch.nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :]) model.value_proj.bias = torch.nn.Parameter(old_model.in_proj_bias[2 * embed_dim :]) is_key_init = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." model.position_embeddings.weight = torch.nn.Parameter(old_model.embed_positions.weight[:512, :]) is_key_init = True break if attribute.isdigit(): model = model[int(attribute)] old_model = old_model[int(old_attribute)] else: model = getattr(model, attribute) if old_attribute == "": old_model = old_model else: if not hasattr(old_model, old_attribute): raise ValueError(f"{old_model} does not have {old_attribute}") old_model = getattr(old_model, old_attribute) if not is_key_init: raise ValueError(f"{key} was not correctly initialized!") print(f"Saving model to {pytorch_dump_folder_path}") prophet.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
7,181
43.06135
118
py
SLT-FAI
SLT-FAI-main/transformers/modeling_flax_utils.py
# coding=utf-8 # Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from abc import ABC, abstractmethod from pickle import UnpicklingError from typing import Dict import flax.linen as nn import jax import jax.numpy as jnp from flax.serialization import to_bytes from flax.traverse_util import unflatten_dict from jax.random import PRNGKey from .configuration_utils import PretrainedConfig from .file_utils import WEIGHTS_NAME, cached_path, hf_bucket_url, is_remote_url from .utils import logging logger = logging.get_logger(__name__) @jax.jit def gelu(x): r"""Gaussian error linear unit activation function. Computes the element-wise function: .. math:: \mathrm{gelu}(x) = \frac{x}{2} \left(1 + \mathrm{tanh} \left( \sqrt{\frac{2}{\pi}} \left(x + 0.044715 x^3 \right) \right) \right) We explicitly use the approximation rather than the exact formulation for speed. For more information, see `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_, section 2. """ return x * 0.5 * (1.0 + jax.lax.erf(x / jnp.sqrt(2.0))) ACT2FN = { "gelu": nn.gelu, "relu": nn.relu, "swish": nn.swish, "gelu_new": gelu, } class FlaxPreTrainedModel(ABC): config_class = None pretrained_model_archive_map = {} base_model_prefix = "" model_class = None def __init__(self, config: PretrainedConfig, module: nn.Module, params: Dict, seed: int = 0): if config is None: raise ValueError("config cannot be None") if module is None: raise ValueError("module cannot be None") if params is None: raise ValueError("state cannot be None") # Those are private to be exposed as typed property on derived classes. self._config = config self._module = module # Those are public as their type is generic to every derived classes. self.key = PRNGKey(seed) self.params = params self.model = module @property def config(self) -> PretrainedConfig: return self._config @staticmethod @abstractmethod def convert_from_pytorch(pt_state: Dict, config: PretrainedConfig) -> Dict: raise NotImplementedError() @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiate a pretrained Flax model from a pre-trained model configuration. """ config = kwargs.pop("config", None) # state_dict = kwargs.pop("state_dict", None) cache_dir = kwargs.pop("cache_dir", None) # from_tf = kwargs.pop("from_tf", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) # output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", False) use_cdn = kwargs.pop("use_cdn", True) # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, **kwargs, ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path else: archive_file = hf_bucket_url(pretrained_model_name_or_path, filename=WEIGHTS_NAME, use_cdn=use_cdn) # redirect to the cache, if necessary try: resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, ) except EnvironmentError: if pretrained_model_name_or_path in cls.pretrained_model_archive_map: msg = f"Couldn't reach server at '{archive_file}' to download pretrained weights." else: msg = ( f"Model name '{pretrained_model_name_or_path}' " f"was not found in model name list ({', '.join(cls.pretrained_model_archive_map.keys())}). " f"We assumed '{archive_file}' was a path or url to model weight files but " "couldn't find any such file at this path or url." ) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info(f"loading weights file {archive_file}") else: logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}") else: resolved_archive_file = None # Instantiate model. with open(resolved_archive_file, "rb") as state_f: try: from flax.serialization import from_bytes state = from_bytes(cls.model_class, state_f) except TypeError: try: import torch state = torch.load(state_f) state = {k: v.numpy() for k, v in state.items()} state = cls.convert_from_pytorch(state, config) state = unflatten_dict({tuple(k.split(".")[1:]): v for k, v in state.items()}) except UnpicklingError: raise EnvironmentError( f"Unable to convert model {archive_file} to Flax deserializable object. " "Supported format are PyTorch archive or Flax msgpack" ) return cls(config, state, *model_args, **model_kwargs) def save_pretrained(self, folder): folder_abs = os.path.abspath(folder) if not os.path.exists(folder_abs): os.mkdir(folder_abs) with open(os.path.join(folder_abs, f"{self._config.model_type}.flax", "wb")) as f: model_bytes = to_bytes(self.params) f.write(model_bytes)
7,330
36.594872
116
py
SLT-FAI
SLT-FAI-main/transformers/integrations.py
# Integrations with other Python libraries import math import os # Import 3rd-party integrations first: try: # Comet needs to be imported before any ML frameworks import comet_ml # noqa: F401 # XXX: there should be comet_ml.ensure_configured(), like `wandb`, for now emulate it comet_ml.Experiment(project_name="ensure_configured") _has_comet = True except (ImportError, ValueError): _has_comet = False try: import wandb wandb.ensure_configured() if wandb.api.api_key is None: _has_wandb = False wandb.termwarn("W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable.") else: _has_wandb = False if os.getenv("WANDB_DISABLED") else True except (ImportError, AttributeError): _has_wandb = False try: import optuna # noqa: F401 _has_optuna = True except (ImportError): _has_optuna = False try: import ray # noqa: F401 _has_ray = True except (ImportError): _has_ray = False try: from torch.utils.tensorboard import SummaryWriter # noqa: F401 _has_tensorboard = True except ImportError: try: from tensorboardX import SummaryWriter # noqa: F401 _has_tensorboard = True except ImportError: _has_tensorboard = False # No transformer imports above this point from .file_utils import is_torch_tpu_available from .trainer_callback import TrainerCallback from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun from .utils import logging logger = logging.get_logger(__name__) # Integration functions: def is_wandb_available(): return _has_wandb def is_comet_available(): return _has_comet def is_tensorboard_available(): return _has_tensorboard def is_optuna_available(): return _has_optuna def is_ray_available(): return _has_ray def default_hp_search_backend(): if is_optuna_available(): return "optuna" elif is_ray_available(): return "ray" def run_hp_search_optuna(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: def _objective(trial, checkpoint_dir=None): model_path = None if checkpoint_dir: for subdir in os.listdir(checkpoint_dir): if subdir.startswith(PREFIX_CHECKPOINT_DIR): model_path = os.path.join(checkpoint_dir, subdir) trainer.objective = None trainer.train(model_path=model_path, trial=trial) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) return trainer.objective timeout = kwargs.pop("timeout", None) n_jobs = kwargs.pop("n_jobs", 1) study = optuna.create_study(direction=direction, **kwargs) study.optimize(_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs) best_trial = study.best_trial return BestRun(str(best_trial.number), best_trial.value, best_trial.params) def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: def _objective(trial, checkpoint_dir=None): model_path = None if checkpoint_dir: for subdir in os.listdir(checkpoint_dir): if subdir.startswith(PREFIX_CHECKPOINT_DIR): model_path = os.path.join(checkpoint_dir, subdir) trainer.objective = None trainer.train(model_path=model_path, trial=trial) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) trainer._tune_save_checkpoint() ray.tune.report(objective=trainer.objective, **metrics, done=True) # The model and TensorBoard writer do not pickle so we have to remove them (if they exists) # while doing the ray hp search. _tb_writer = trainer.pop_callback(TensorBoardCallback) trainer.model = None # Setup default `resources_per_trial` and `reporter`. if "resources_per_trial" not in kwargs and trainer.args.n_gpu > 0: # `args.n_gpu` is considered the total number of GPUs that will be split # among the `n_jobs` n_jobs = int(kwargs.pop("n_jobs", 1)) num_gpus_per_trial = trainer.args.n_gpu if num_gpus_per_trial / n_jobs >= 1: num_gpus_per_trial = int(math.ceil(num_gpus_per_trial / n_jobs)) kwargs["resources_per_trial"] = {"gpu": num_gpus_per_trial} if "progress_reporter" not in kwargs: from ray.tune import CLIReporter kwargs["progress_reporter"] = CLIReporter(metric_columns=["objective"]) if "keep_checkpoints_num" in kwargs and kwargs["keep_checkpoints_num"] > 0: # `keep_checkpoints_num=0` would disabled checkpointing trainer.use_tune_checkpoints = True if kwargs["keep_checkpoints_num"] > 1: logger.warning( "Currently keeping {} checkpoints for each trial. Checkpoints are usually huge, " "consider setting `keep_checkpoints_num=1`." ) if "scheduler" in kwargs: from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining # Check if checkpointing is enabled for PopulationBasedTraining if isinstance(kwargs["scheduler"], PopulationBasedTraining): if not trainer.use_tune_checkpoints: logger.warning( "You are using PopulationBasedTraining but you haven't enabled checkpointing. " "This means your trials will train from scratch everytime they are exploiting " "new configurations. Consider enabling checkpointing by passing " "`keep_checkpoints_num=1` as an additional argument to `Trainer.hyperparameter_search`." ) # Check for `do_eval` and `eval_during_training` for schedulers that require intermediate reporting. if isinstance( kwargs["scheduler"], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining) ) and (not trainer.args.do_eval or not trainer.args.evaluate_during_training): raise RuntimeError( "You are using {cls} as a scheduler but you haven't enabled evaluation during training. " "This means your trials will not report intermediate results to Ray Tune, and " "can thus not be stopped early or used to exploit other trials parameters. " "If this is what you want, do not use {cls}. If you would like to use {cls}, " "make sure you pass `do_eval=True` and `evaluate_during_training=True` in the " "Trainer `args`.".format(cls=type(kwargs["scheduler"]).__name__) ) analysis = ray.tune.run(_objective, config=trainer.hp_space(None), num_samples=n_trials, **kwargs) best_trial = analysis.get_best_trial(metric="objective", mode=direction[:3]) best_run = BestRun(best_trial.trial_id, best_trial.last_result["objective"], best_trial.config) if _tb_writer is not None: trainer.add_callback(_tb_writer) return best_run class TensorBoardCallback(TrainerCallback): """ A :class:`~transformers.TrainerCallback` that sends the logs to `TensorBoard <https://www.tensorflow.org/tensorboard>`__. Args: tb_writer (:obj:`SummaryWriter`, `optional`): The writer to use. Will instantiate one if not set. """ def __init__(self, tb_writer=None): assert ( _has_tensorboard ), "TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or install tensorboardX." self.tb_writer = tb_writer def on_init_end(self, args, state, control, **kwargs): if self.tb_writer is None and state.is_world_process_zero: self.tb_writer = SummaryWriter(log_dir=args.logging_dir) def on_train_begin(self, args, state, control, **kwargs): if self.tb_writer is not None: self.tb_writer.add_text("args", args.to_json_string()) self.tb_writer.add_hparams(args.to_sanitized_dict(), metric_dict={}) def on_log(self, args, state, control, logs=None, **kwargs): if self.tb_writer: for k, v in logs.items(): if isinstance(v, (int, float)): self.tb_writer.add_scalar(k, v, state.global_step) else: logger.warning( "Trainer is attempting to log a value of " '"%s" of type %s for key "%s" as a scalar. ' "This invocation of Tensorboard's writer.add_scalar() " "is incorrect so we dropped this attribute.", v, type(v), k, ) self.tb_writer.flush() def on_train_end(self, args, state, control, **kwargs): if self.tb_writer: self.tb_writer.close() class WandbCallback(TrainerCallback): """ A :class:`~transformers.TrainerCallback` that sends the logs to `Weight and Biases <https://www.wandb.com/>`__. """ def __init__(self): assert _has_wandb, "WandbCallback requires wandb to be installed. Run `pip install wandb`." self._initialized = False def setup(self, args, state, model): """ Setup the optional Weights & Biases (`wandb`) integration. One can subclass and override this method to customize the setup if needed. Find more information `here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables: Environment: WANDB_WATCH (:obj:`str`, `optional` defaults to :obj:`"gradients"`): Can be :obj:`"gradients"`, :obj:`"all"` or :obj:`"false"`. Set to :obj:`"false"` to disable gradient logging or :obj:`"all"` to log gradients and parameters. WANDB_PROJECT (:obj:`str`, `optional`, defaults to :obj:`"huggingface"`): Set this to a custom string to store results in a different project. WANDB_DISABLED (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to disable wandb entirely. """ self._initialized = True if state.is_world_process_zero: logger.info( 'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"' ) combined_dict = {**args.to_sanitized_dict()} if getattr(model, "config", None) is not None: combined_dict = {**model.config.to_dict(), **combined_dict} wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=args.run_name) # keep track of model topology and gradients, unsupported on TPU if not is_torch_tpu_available() and os.getenv("WANDB_WATCH") != "false": wandb.watch(model, log=os.getenv("WANDB_WATCH", "gradients"), log_freq=max(100, args.logging_steps)) def on_train_begin(self, args, state, control, model=None, **kwargs): if not self._initialized: self.setup(args, state, model) def on_log(self, args, state, control, model=None, logs=None, **kwargs): if not self._initialized: self.setup(args, state, model) if state.is_world_process_zero: wandb.log(logs, step=state.global_step) class CometCallback(TrainerCallback): """ A :class:`~transformers.TrainerCallback` that sends the logs to `Comet ML <https://www.comet.ml/site/>`__. """ def __init__(self): assert _has_comet, "CometCallback requires comet-ml to be installed. Run `pip install comet-ml`." self._initialized = False def setup(self, args, state, model): """ Setup the optional Comet.ml integration. Environment: COMET_MODE (:obj:`str`, `optional`): "OFFLINE", "ONLINE", or "DISABLED" COMET_PROJECT_NAME (:obj:`str`, `optional`): Comet.ml project name for experiments COMET_OFFLINE_DIRECTORY (:obj:`str`, `optional`): Folder to use for saving offline experiments when :obj:`COMET_MODE` is "OFFLINE" For a number of configurable items in the environment, see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__. """ self._initialized = True if state.is_world_process_zero: comet_mode = os.getenv("COMET_MODE", "ONLINE").upper() args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")} experiment = None if comet_mode == "ONLINE": experiment = comet_ml.Experiment(**args) logger.info("Automatic Comet.ml online logging enabled") elif comet_mode == "OFFLINE": args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./") experiment = comet_ml.OfflineExperiment(**args) logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished") if experiment is not None: experiment._set_model_graph(model, framework="transformers") experiment._log_parameters(args, prefix="args/", framework="transformers") if hasattr(model, "config"): experiment._log_parameters(model.config, prefix="config/", framework="transformers") def on_train_begin(self, args, state, control, model=None, **kwargs): if not self._initialized: self.setup(args, state, model) def on_log(self, args, state, control, model=None, logs=None, **kwargs): if not self._initialized: self.setup(args, state, model) if state.is_world_process_zero: experiment = comet_ml.config.get_global_experiment() if experiment is not None: experiment._log_metrics(logs, step=state.global_step, epoch=state.epoch, framework="transformers")
14,363
40.514451
130
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_t5_fast.py
# coding=utf-8 # Copyright 2018 T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model T5.""" import os from shutil import copyfile from typing import List, Optional, Tuple from .file_utils import add_start_docstrings, is_sentencepiece_available from .tokenization_utils import BatchEncoding from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING from .tokenization_utils_fast import PreTrainedTokenizerFast from .utils import logging if is_sentencepiece_available(): from .tokenization_t5 import T5Tokenizer else: T5Tokenizer = None logger = logging.get_logger(__name__) #################################################### # Mapping from the keyword arguments names of Tokenizer `__init__` # to file names for serializing Tokenizer instances #################################################### VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} #################################################### # Mapping from the keyword arguments names of Tokenizer `__init__` # to pretrained vocabulary URL for all the model shortcut names. #################################################### PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "t5-small": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-spiece.model", "t5-base": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-spiece.model", "t5-large": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-spiece.model", "t5-3b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-spiece.model", "t5-11b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-spiece.model", }, "tokenizer_file": { "t5-small": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-tokenizer.json", "t5-base": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-tokenizer.json", "t5-large": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-tokenizer.json", "t5-3b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-tokenizer.json", "t5-11b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-tokenizer.json", }, } #################################################### # Mapping from model shortcut names to max length of inputs #################################################### PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "t5-small": 512, "t5-base": 512, "t5-large": 512, "t5-3b": 512, "t5-11b": 512, } class T5TokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" T5 tokenizer (backed by HuggingFace's `tokenizers` library). Based on `SentencePiece <https://github.com/google/sentencepiece>`__ . This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the :obj:`sep_token`. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`): The token used for padding, for example when batching sequences of different lengths. extra_ids (:obj:`int`, `optional`, defaults to 100): Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are indexed from the end of the vocabulary up to beginnning ("<extra_id_0>" is the last token in the vocabulary like in T5 preprocessing see `here <https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117>`__). additional_special_tokens (:obj:`List[str]`, `optional`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["attention_mask"] slow_tokenizer_class = T5Tokenizer prefix_tokens: List[int] = [] def __init__( self, vocab_file, tokenizer_file=None, eos_token="</s>", unk_token="<unk>", pad_token="<pad>", extra_ids=100, additional_special_tokens=None, **kwargs ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=extra_ids, additional_special_tokens=additional_special_tokens, **kwargs, ) if extra_ids > 0: all_extra_tokens = ["<extra_id_{}>".format(i) for i in range(extra_ids)] if all(tok not in self.additional_special_tokens for tok in all_extra_tokens): self.additional_special_tokens = self.additional_special_tokens + [ "<extra_id_{}>".format(i) for i in range(extra_ids) ] self.vocab_file = vocab_file self._extra_ids = extra_ids def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: ``X </s>`` - pair of sequences: ``A </s> B </s>`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ token_ids_0 = token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return self.prefix_tokens + token_ids_0 else: token_ids_1 = token_ids_1 + [self.eos_token_id] return self.prefix_tokens + token_ids_0 + token_ids_1 @add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: str = None, truncation: bool = True, **kwargs, ) -> BatchEncoding: if max_length is None: max_length = self.max_len self.prefix_tokens = [] model_inputs = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = max_length # set prefix_tokens for target text self.prefix_tokens = [self.pad_token_id] labels_and_decoder_mask = self( tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs, ) model_inputs["labels"] = labels_and_decoder_mask["input_ids"] self.prefix_tokens = [] return model_inputs
9,617
40.27897
164
py
SLT-FAI
SLT-FAI-main/transformers/convert_dpr_original_checkpoint_to_pytorch.py
import argparse import collections from pathlib import Path import torch from torch.serialization import default_restore_location from transformers import BertConfig, DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader CheckpointState = collections.namedtuple( "CheckpointState", ["model_dict", "optimizer_dict", "scheduler_dict", "offset", "epoch", "encoder_params"] ) def load_states_from_checkpoint(model_file: str) -> CheckpointState: print("Reading saved model from %s", model_file) state_dict = torch.load(model_file, map_location=lambda s, l: default_restore_location(s, "cpu")) return CheckpointState(**state_dict) class DPRState: def __init__(self, src_file: Path): self.src_file = src_file def load_dpr_model(self): raise NotImplementedError @staticmethod def from_type(comp_type: str, *args, **kwargs) -> "DPRState": if comp_type.startswith("c"): return DPRContextEncoderState(*args, **kwargs) if comp_type.startswith("q"): return DPRQuestionEncoderState(*args, **kwargs) if comp_type.startswith("r"): return DPRReaderState(*args, **kwargs) else: raise ValueError("Component type must be either 'ctx_encoder', 'question_encoder' or 'reader'.") class DPRContextEncoderState(DPRState): def load_dpr_model(self): model = DPRContextEncoder(DPRConfig(**BertConfig.get_config_dict("bert-base-uncased")[0])) print("Loading DPR biencoder from {}".format(self.src_file)) saved_state = load_states_from_checkpoint(self.src_file) encoder, prefix = model.ctx_encoder, "ctx_model." # Fix changes from https://github.com/huggingface/transformers/commit/614fef1691edb806de976756d4948ecbcd0c0ca3 state_dict = {"bert_model.embeddings.position_ids": model.ctx_encoder.bert_model.embeddings.position_ids} for key, value in saved_state.model_dict.items(): if key.startswith(prefix): key = key[len(prefix) :] if not key.startswith("encode_proj."): key = "bert_model." + key state_dict[key] = value encoder.load_state_dict(state_dict) return model class DPRQuestionEncoderState(DPRState): def load_dpr_model(self): model = DPRQuestionEncoder(DPRConfig(**BertConfig.get_config_dict("bert-base-uncased")[0])) print("Loading DPR biencoder from {}".format(self.src_file)) saved_state = load_states_from_checkpoint(self.src_file) encoder, prefix = model.question_encoder, "question_model." # Fix changes from https://github.com/huggingface/transformers/commit/614fef1691edb806de976756d4948ecbcd0c0ca3 state_dict = {"bert_model.embeddings.position_ids": model.question_encoder.bert_model.embeddings.position_ids} for key, value in saved_state.model_dict.items(): if key.startswith(prefix): key = key[len(prefix) :] if not key.startswith("encode_proj."): key = "bert_model." + key state_dict[key] = value encoder.load_state_dict(state_dict) return model class DPRReaderState(DPRState): def load_dpr_model(self): model = DPRReader(DPRConfig(**BertConfig.get_config_dict("bert-base-uncased")[0])) print("Loading DPR reader from {}".format(self.src_file)) saved_state = load_states_from_checkpoint(self.src_file) # Fix changes from https://github.com/huggingface/transformers/commit/614fef1691edb806de976756d4948ecbcd0c0ca3 state_dict = { "encoder.bert_model.embeddings.position_ids": model.span_predictor.encoder.bert_model.embeddings.position_ids } for key, value in saved_state.model_dict.items(): if key.startswith("encoder.") and not key.startswith("encoder.encode_proj"): key = "encoder.bert_model." + key[len("encoder.") :] state_dict[key] = value model.span_predictor.load_state_dict(state_dict) return model def convert(comp_type: str, src_file: Path, dest_dir: Path): dest_dir = Path(dest_dir) dest_dir.mkdir(exist_ok=True) dpr_state = DPRState.from_type(comp_type, src_file=src_file) model = dpr_state.load_dpr_model() model.save_pretrained(dest_dir) model.from_pretrained(dest_dir) # sanity check if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--type", type=str, help="Type of the component to convert: 'ctx_encoder', 'question_encoder' or 'reader'." ) parser.add_argument( "--src", type=str, help="Path to the dpr checkpoint file. They can be downloaded from the official DPR repo https://github.com/facebookresearch/DPR. Note that in the official repo, both encoders are stored in the 'retriever' checkpoints.", ) parser.add_argument("--dest", type=str, default=None, help="Path to the output PyTorch model directory.") args = parser.parse_args() src_file = Path(args.src) dest_dir = f"converted-{src_file.name}" if args.dest is None else args.dest dest_dir = Path(dest_dir) assert src_file.exists() assert ( args.type is not None ), "Please specify the component type of the DPR model to convert: 'ctx_encoder', 'question_encoder' or 'reader'." convert(args.type, src_file, dest_dir)
5,460
42.34127
228
py
SLT-FAI
SLT-FAI-main/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import WEIGHTS_NAME, logging from transformers.configuration_fsmt import FSMTConfig from transformers.modeling_fsmt import FSMTForConditionalGeneration from transformers.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE logging.set_verbosity_warning() json_indent = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model best_score_hparams = { # fairseq: "wmt19-ru-en": {"length_penalty": 1.1}, "wmt19-en-ru": {"length_penalty": 1.15}, "wmt19-en-de": {"length_penalty": 1.0}, "wmt19-de-en": {"length_penalty": 1.1}, # allenai: "wmt16-en-de-dist-12-1": {"length_penalty": 0.6}, "wmt16-en-de-dist-6-1": {"length_penalty": 0.6}, "wmt16-en-de-12-1": {"length_penalty": 0.8}, "wmt19-de-en-6-6-base": {"length_penalty": 0.6}, "wmt19-de-en-6-6-big": {"length_penalty": 0.6}, } # this remaps the different models to their organization names org_names = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: org_names[m] = "facebook" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: org_names[m] = "allenai" def rewrite_dict_keys(d): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} d2 = dict((re.sub(r"@@$", "", k), v) if k.endswith("@@") else (re.sub(r"$", "</w>", k), v) for k, v in d.items()) keep_keys = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del d2[f"{k}</w>"] d2[k] = d[k] # restore return d2 def convert_fsmt_checkpoint_to_pytorch(fsmt_checkpoint_path, pytorch_dump_folder_path): # prep assert os.path.exists(fsmt_checkpoint_path) os.makedirs(pytorch_dump_folder_path, exist_ok=True) print(f"Writing results to {pytorch_dump_folder_path}") # handle various types of models checkpoint_file = basename(fsmt_checkpoint_path) fsmt_folder_path = dirname(fsmt_checkpoint_path) cls = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel models = cls.hub_models() kwargs = {"bpe": "fastbpe", "tokenizer": "moses"} data_name_or_path = "." # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(f"using checkpoint {checkpoint_file}") chkpt = hub_utils.from_pretrained( fsmt_folder_path, checkpoint_file, data_name_or_path, archive_map=models, **kwargs ) args = dict(vars(chkpt["args"])) src_lang = args["source_lang"] tgt_lang = args["target_lang"] data_root = dirname(pytorch_dump_folder_path) model_dir = basename(pytorch_dump_folder_path) # dicts src_dict_file = os.path.join(fsmt_folder_path, f"dict.{src_lang}.txt") tgt_dict_file = os.path.join(fsmt_folder_path, f"dict.{tgt_lang}.txt") src_dict = Dictionary.load(src_dict_file) src_vocab = rewrite_dict_keys(src_dict.indices) src_vocab_size = len(src_vocab) src_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-src.json") print(f"Generating {src_vocab_file}") with open(src_vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent)) tgt_dict = Dictionary.load(tgt_dict_file) tgt_vocab = rewrite_dict_keys(tgt_dict.indices) tgt_vocab_size = len(tgt_vocab) tgt_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-tgt.json") print(f"Generating {tgt_vocab_file}") with open(tgt_vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(tgt_vocab, ensure_ascii=False, indent=json_indent)) # merges_file (bpecodes) merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["merges_file"]) fsmt_merges_file = os.path.join(fsmt_folder_path, "bpecodes") with open(fsmt_merges_file, encoding="utf-8") as fin: merges = fin.read() merges = re.sub(r" \d+$", "", merges, 0, re.M) # remove frequency number print(f"Generating {merges_file}") with open(merges_file, "w", encoding="utf-8") as fout: fout.write(merges) # model config fsmt_model_config_file = os.path.join(pytorch_dump_folder_path, "config.json") # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}" assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}" model_conf = { "architectures": ["FSMTForConditionalGeneration"], "model_type": "fsmt", "activation_dropout": args["activation_dropout"], "activation_function": "relu", "attention_dropout": args["attention_dropout"], "d_model": args["decoder_embed_dim"], "dropout": args["dropout"], "init_std": 0.02, "max_position_embeddings": args["max_source_positions"], "num_hidden_layers": args["encoder_layers"], "src_vocab_size": src_vocab_size, "tgt_vocab_size": tgt_vocab_size, "langs": [src_lang, tgt_lang], "encoder_attention_heads": args["encoder_attention_heads"], "encoder_ffn_dim": args["encoder_ffn_embed_dim"], "encoder_layerdrop": args["encoder_layerdrop"], "encoder_layers": args["encoder_layers"], "decoder_attention_heads": args["decoder_attention_heads"], "decoder_ffn_dim": args["decoder_ffn_embed_dim"], "decoder_layerdrop": args["decoder_layerdrop"], "decoder_layers": args["decoder_layers"], "bos_token_id": 0, "pad_token_id": 1, "eos_token_id": 2, "is_encoder_decoder": True, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_all_embeddings"], } # good hparam defaults to start with model_conf["num_beams"] = 5 model_conf["early_stopping"] = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: model_conf["length_penalty"] = best_score_hparams[model_dir]["length_penalty"] else: model_conf["length_penalty"] = 1.0 print(f"Generating {fsmt_model_config_file}") with open(fsmt_model_config_file, "w", encoding="utf-8") as f: f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent)) # tokenizer config fsmt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE) tokenizer_conf = { "langs": [src_lang, tgt_lang], "model_max_length": 1024, } print(f"Generating {fsmt_tokenizer_config_file}") with open(fsmt_tokenizer_config_file, "w", encoding="utf-8") as f: f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent)) # model model = chkpt["models"][0] model_state_dict = model.state_dict() # rename keys to start with 'model.' model_state_dict = OrderedDict(("model." + k, v) for k, v in model_state_dict.items()) # remove unneeded keys ignore_keys = [ "model.model", "model.encoder.version", "model.decoder.version", "model.encoder_embed_tokens.weight", "model.decoder_embed_tokens.weight", "model.encoder.embed_positions._float_tensor", "model.decoder.embed_positions._float_tensor", ] for k in ignore_keys: model_state_dict.pop(k, None) config = FSMTConfig.from_pretrained(pytorch_dump_folder_path) model_new = FSMTForConditionalGeneration(config) # check that it loads ok model_new.load_state_dict(model_state_dict, strict=False) # save pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME) print(f"Generating {pytorch_weights_dump_path}") torch.save(model_state_dict, pytorch_weights_dump_path) print("Conversion is done!") print("\nLast step is to upload the files to s3") print(f"cd {data_root}") print(f"transformers-cli upload {model_dir}") print( "Note: CDN caches files for up to 24h, so either use a local model path " "or use `from_pretrained(mname, use_cdn=False)` to use the non-cached version." ) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fsmt_checkpoint_path", default=None, type=str, required=True, help="Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts, bpecodes, etc.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
10,885
39.022059
131
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_distilbert.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for DistilBERT.""" from .tokenization_bert import BertTokenizer from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", "distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", "distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", "distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", "distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-vocab.txt", "distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "distilbert-base-uncased": 512, "distilbert-base-uncased-distilled-squad": 512, "distilbert-base-cased": 512, "distilbert-base-cased-distilled-squad": 512, "distilbert-base-german-cased": 512, "distilbert-base-multilingual-cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class DistilBertTokenizer(BertTokenizer): r""" Construct a DistilBERT tokenizer. :class:`~transformers.DistilBertTokenizer` is identical to :class:`~transformers.BertTokenizer` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION model_input_names = ["attention_mask"]
3,050
41.375
139
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_deberta.py
# coding=utf-8 # Copyright 2020 Microsoft and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model DeBERTa.""" import os import pathlib import random import unicodedata from functools import lru_cache from typing import Optional, Tuple from zipfile import ZipFile import tqdm import requests from .tokenization_utils import PreTrainedTokenizer from .utils import logging try: import regex as re except ImportError: raise ImportError("Please install regex with: pip install regex") logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "bpe_encoder.bin"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/deberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/microsoft/deberta-base/bpe_encoder.bin", "microsoft/deberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/microsoft/deberta-large/bpe_encoder.bin", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/deberta-base": 512, "microsoft/deberta-large": 512, } PRETRAINED_INIT_CONFIGURATION = { "microsoft/deberta-base": {"do_lower_case": False}, "microsoft/deberta-large": {"do_lower_case": False}, } __all__ = ["DebertaTokenizer"] @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class Encoder: def __init__(self, encoder, bpe_merges, errors="replace"): self.encoder = encoder self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} self.bpe_ranks = dict(zip([tuple(k) for k in bpe_merges], range(len(bpe_merges)))) self.cache = {} self.random = random.Random(0) # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except Exception: new_word.extend(word[i:]) break if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def split_to_words(self, text): return list(re.findall(self.pat, text)) def encode(self, text): bpe_tokens = [] for token in self.split_to_words(text): token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")) return bpe_tokens def decode(self, tokens): text = "".join([self.decoder[token] for token in tokens]) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text def get_encoder(encoder, vocab): return Encoder( encoder=encoder, bpe_merges=vocab, ) def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False def download_asset(name, tag=None, no_cache=False, cache_dir=None): _tag = tag if _tag is None: _tag = "latest" if not cache_dir: cache_dir = os.path.join(pathlib.Path.home(), f".~DeBERTa/assets/{_tag}/") os.makedirs(cache_dir, exist_ok=True) output = os.path.join(cache_dir, name) if os.path.exists(output) and (not no_cache): return output repo = "https://api.github.com/repos/microsoft/DeBERTa/releases" releases = requests.get(repo).json() if tag and tag != "latest": release = [r for r in releases if r["name"].lower() == tag.lower()] if len(release) != 1: raise Exception(f"{tag} can't be found in the repository.") else: release = releases[0] asset = [s for s in release["assets"] if s["name"].lower() == name.lower()] if len(asset) != 1: raise Exception(f"{name} can't be found in the release.") url = asset[0]["url"] headers = {} headers["Accept"] = "application/octet-stream" resp = requests.get(url, stream=True, headers=headers) if resp.status_code != 200: raise Exception(f"Request for {url} return {resp.status_code}, {resp.text}") try: with open(output, "wb") as fs: progress = tqdm( total=int(resp.headers["Content-Length"]) if "Content-Length" in resp.headers else -1, ncols=80, desc=f"Downloading {name}", ) for c in resp.iter_content(chunk_size=1024 * 1024): fs.write(c) progress.update(len(c)) progress.close() except Exception: os.remove(output) raise return output def load_vocab(name=None, tag=None, no_cache=False, cache_dir=None): import torch if name is None: name = "bpe_encoder" model_path = name if model_path and (not os.path.exists(model_path)) and not (("/" in model_path) or ("\\" in model_path)): _tag = tag if _tag is None: _tag = "latest" if not cache_dir: cache_dir = os.path.join(pathlib.Path.home(), f".~DeBERTa/assets/{_tag}/") os.makedirs(cache_dir, exist_ok=True) out_dir = os.path.join(cache_dir, name) model_path = os.path.join(out_dir, "bpe_encoder.bin") if (not os.path.exists(model_path)) or no_cache: asset = download_asset(name + ".zip", tag=tag, no_cache=no_cache, cache_dir=cache_dir) with ZipFile(asset, "r") as zipf: for zip_info in zipf.infolist(): if zip_info.filename[-1] == "/": continue zip_info.filename = os.path.basename(zip_info.filename) zipf.extract(zip_info, out_dir) elif not model_path: return None, None encoder_state = torch.load(model_path) return encoder_state class GPT2Tokenizer(object): """ A wrapper of GPT2 tokenizer with similar interface as BERT tokenizer Args: vocab_file (:obj:`str`, optional): The local path of vocabulary package or the release name of vocabulary in `DeBERTa GitHub releases <https://github.com/microsoft/DeBERTa/releases>`_, \ e.g. "bpe_encoder", default: `None`. If it's `None`, then it will download the vocabulary in the latest release from GitHub. The vocabulary file is a \ state dictionary with three items, "dict_map", "vocab", "encoder" which correspond to three files used in `RoBERTa`, i.e. `dict.txt`, `vocab.txt` and `encoder.json`. \ The difference between our wrapped GPT2 tokenizer and RoBERTa wrapped tokenizer are, - Special tokens, unlike `RoBERTa` which use `<s>`, `</s>` as the `start` token and `end` token of a sentence. We use `[CLS]` and `[SEP]` as the `start` and `end`\ token of input sentence which is the same as `BERT`. - We remapped the token ids in our dictionary with regarding to the new special tokens, `[PAD]` => 0, `[CLS]` => 1, `[SEP]` => 2, `[UNK]` => 3, `[MASK]` => 50264 do_lower_case (:obj:`bool`, optional): Whether to convert inputs to lower case. **Not used in GPT2 tokenizer**. special_tokens (:obj:`list`, optional): List of special tokens to be added to the end of the vocabulary. """ def __init__(self, vocab_file=None, do_lower_case=True, special_tokens=None): self.pad_token = "[PAD]" self.sep_token = "[SEP]" self.unk_token = "[UNK]" self.cls_token = "[CLS]" self.symbols = [] self.count = [] self.indices = {} self.pad_token_id = self.add_symbol(self.pad_token) self.cls_token_id = self.add_symbol(self.cls_token) self.sep_token_id = self.add_symbol(self.sep_token) self.unk_token_id = self.add_symbol(self.unk_token) self.gpt2_encoder = load_vocab(vocab_file) self.bpe = get_encoder(self.gpt2_encoder["encoder"], self.gpt2_encoder["vocab"]) for w, n in self.gpt2_encoder["dict_map"]: self.add_symbol(w, n) self.mask_token = "[MASK]" self.mask_id = self.add_symbol(self.mask_token) self.special_tokens = ["[MASK]", "[SEP]", "[PAD]", "[UNK]", "[CLS]"] if special_tokens is not None: for t in special_tokens: self.add_special_token(t) self.vocab = self.indices self.ids_to_tokens = self.symbols def tokenize(self, text): """Convert an input text to tokens. Args: text (:obj:`str`): input text to be tokenized. Returns: A list of byte tokens where each token represent the byte id in GPT2 byte dictionary Example:: >>> tokenizer = GPT2Tokenizer() >>> text = "Hello world!" >>> tokens = tokenizer.tokenize(text) >>> print(tokens) ['15496', '995', '0'] """ bpe = self._encode(text) return [t for t in bpe.split(" ") if t] def convert_tokens_to_ids(self, tokens): """Convert list of tokens to ids. Args: tokens (:obj:`list<str>`): list of tokens Returns: List of ids """ return [self.vocab[t] for t in tokens] def convert_ids_to_tokens(self, ids): """Convert list of ids to tokens. Args: ids (:obj:`list<int>`): list of ids Returns: List of tokens """ tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens def split_to_words(self, text): return self.bpe.split_to_words(text) def decode(self, tokens): """Decode list of tokens to text strings. Args: tokens (:obj:`list<str>`): list of tokens. Returns: Text string corresponds to the input tokens. Example:: >>> tokenizer = GPT2Tokenizer() >>> text = "Hello world!" >>> tokens = tokenizer.tokenize(text) >>> print(tokens) ['15496', '995', '0'] >>> tokenizer.decode(tokens) 'Hello world!' """ return self.bpe.decode([int(t) for t in tokens if t not in self.special_tokens]) def add_special_token(self, token): """Adds a special token to the dictionary. Args: token (:obj:`str`): Tthe new token/word to be added to the vocabulary. Returns: The id of new token in the vocabulary. """ self.special_tokens.append(token) return self.add_symbol(token) def part_of_whole_word(self, token, is_bos=False): if is_bos: return True s = self._decode(token) if len(s) == 1 and (_is_whitespace(list(s)[0]) or _is_control(list(s)[0]) or _is_punctuation(list(s)[0])): return False return not s.startswith(" ") def sym(self, id): return self.ids_to_tokens[id] def id(self, sym): return self.vocab[sym] def _encode(self, x: str) -> str: return " ".join(map(str, self.bpe.encode(x))) def _decode(self, x: str) -> str: return self.bpe.decode(map(int, x.split())) def add_symbol(self, word, n=1): """Adds a word to the dictionary. Args: word (:obj:`str`): Tthe new token/word to be added to the vocabulary. n (int, optional): The frequency of the word. Returns: The id of the new word. """ if word in self.indices: idx = self.indices[word] self.count[idx] = self.count[idx] + n return idx else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(n) return idx def save_pretrained(self, path: str, filename_prefix: str = None): import torch filename = VOCAB_FILES_NAMES[list(VOCAB_FILES_NAMES.keys())[0]] if filename_prefix is not None: filename = filename_prefix + "-" + filename full_path = os.path.join(path, filename) torch.save(self.gpt2_encoder, full_path) return (full_path,) class DebertaTokenizer(PreTrainedTokenizer): r""" Constructs a DeBERTa tokenizer, which runs end-to-end tokenization: punctuation splitting + wordpiece Args: vocab_file (:obj:`str`): File containing the vocabulary. do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to lowercase the input when tokenizing. unk_token (:obj:`str`, `optional`, defaults to :obj:`"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (:obj:`str`, `optional`, defaults to :obj:`"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=False, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", **kwargs ): super().__init__( unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs, ) if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = XxxTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file) ) self.do_lower_case = do_lower_case self.gpt2_tokenizer = GPT2Tokenizer(vocab_file) @property def vocab_size(self): return len(self.vocab) @property def vocab(self): return self.gpt2_tokenizer.vocab def get_vocab(self): vocab = self.vocab.copy() vocab.update(self.get_added_vocab()) return vocab def _tokenize(self, text): """Take as input a string and return a list of strings (tokens) for words/sub-words""" if self.do_lower_case: text = text.lower() return self.gpt2_tokenizer.tokenize(text) def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.gpt2_tokenizer.sym(index) if index < self.vocab_size else self.unk_token def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ return self.gpt2_tokenizer.decode(tokens) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: [CLS] X [SEP] - pair of sequences: [CLS] A [SEP] B [SEP] Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0: list of ids (must not contain special tokens) token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids for sequence pairs already_has_special_tokens: (default False) Set to True if the token list is already formated with special tokens for the model Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model." ) return list( map( lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0, ) ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 | first sequence | second sequence if token_ids_1 is None, only returns the first portion of the mask (0's). ~ Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", False) if is_split_into_words or add_prefix_space: text = " " + text return (text, kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: return self.gpt2_tokenizer.save_pretrained(save_directory, filename_prefix=filename_prefix)
24,305
35.605422
177
py
SLT-FAI
SLT-FAI-main/transformers/convert_xlm_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OpenAI GPT checkpoint.""" import argparse import json import numpy import torch from transformers import CONFIG_NAME, WEIGHTS_NAME from transformers.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import logging logging.set_verbosity_info() def convert_xlm_checkpoint_to_pytorch(xlm_checkpoint_path, pytorch_dump_folder_path): # Load checkpoint chkpt = torch.load(xlm_checkpoint_path, map_location="cpu") state_dict = chkpt["model"] # We have the base model one level deeper than the original XLM repository two_levels_state_dict = {} for k, v in state_dict.items(): if "pred_layer" in k: two_levels_state_dict[k] = v else: two_levels_state_dict["transformer." + k] = v config = chkpt["params"] config = dict((n, v) for n, v in config.items() if not isinstance(v, (torch.FloatTensor, numpy.ndarray))) vocab = chkpt["dico_word2id"] vocab = dict((s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@", ""), i) for s, i in vocab.items()) # Save pytorch-model pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print("Save PyTorch model to {}".format(pytorch_weights_dump_path)) torch.save(two_levels_state_dict, pytorch_weights_dump_path) print("Save configuration file to {}".format(pytorch_config_dump_path)) with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(json.dumps(config, indent=2) + "\n") print("Save vocab file to {}".format(pytorch_config_dump_path)) with open(pytorch_vocab_dump_path, "w", encoding="utf-8") as f: f.write(json.dumps(vocab, indent=2) + "\n") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
2,983
36.3
117
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_funnel.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for Funnel Transformer.""" from typing import List, Optional from .tokenization_bert import BertTokenizer from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} _model_names = [ "small", "small-base", "medium", "medium-base", "intermediate", "intermediate-base", "large", "large-base", "xlarge", "xlarge-base", ] PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "funnel-transformer/small": "https://s3.amazonaws.com/models.huggingface.co/bert/funnel-transformer/small/vocab.txt", "funnel-transformer/small-base": "https://s3.amazonaws.com/models.huggingface.co/bert/funnel-transformer/small-base/vocab.txt", "funnel-transformer/medium": "https://s3.amazonaws.com/models.huggingface.co/bert/funnel-transformer/medium/vocab.txt", "funnel-transformer/medium-base": "https://s3.amazonaws.com/models.huggingface.co/bert/funnel-transformer/medium-base/vocab.txt", "funnel-transformer/intermediate": "https://s3.amazonaws.com/models.huggingface.co/bert/funnel-transformer/intermediate/vocab.txt", "funnel-transformer/intermediate-base": "https://s3.amazonaws.com/models.huggingface.co/bert/funnel-transformer/intermediate-base/vocab.txt", "funnel-transformer/large": "https://s3.amazonaws.com/models.huggingface.co/bert/funnel-transformer/large/vocab.txt", "funnel-transformer/large-base": "https://s3.amazonaws.com/models.huggingface.co/bert/funnel-transformer/large-base/vocab.txt", "funnel-transformer/xlarge": "https://s3.amazonaws.com/models.huggingface.co/bert/funnel-transformer/xlarge/vocab.txt", "funnel-transformer/xlarge-base": "https://s3.amazonaws.com/models.huggingface.co/bert/funnel-transformer/xlarge-base/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {f"funnel-transformer/{name}": 512 for name in _model_names} PRETRAINED_INIT_CONFIGURATION = {f"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names} class FunnelTokenizer(BertTokenizer): r""" Construct a Funnel Transformer tokenizer. :class:`~transformers.FunnelTokenizer` is identical to :class:`~transformers.BertTokenizer` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION cls_token_type_id: int = 2 def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token="<unk>", sep_token="<sep>", pad_token="<pad>", cls_token="<cls>", mask_token="<mask>", bos_token="<s>", eos_token="</s>", tokenize_chinese_chars=True, strip_accents=None, **kwargs ): super().__init__( vocab_file, do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, bos_token=bos_token, eos_token=eos_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel Transformer sequence pair mask has the following format: :: 2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
5,526
39.050725
149
py
SLT-FAI
SLT-FAI-main/transformers/configuration_encoder_decoder.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from .configuration_utils import PretrainedConfig from .utils import logging logger = logging.get_logger(__name__) class EncoderDecoderConfig(PretrainedConfig): r""" :class:`~transformers.EncoderDecoderConfig` is the configuration class to store the configuration of a :class:`~transformers.EncoderDecoderModel`. It is used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder configs. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: kwargs (`optional`): Dictionary of keyword arguments. Notably: - **encoder** (:class:`~transformers.PretrainedConfig`, `optional`) -- An instance of a configuration object that defines the encoder config. - **decoder** (:class:`~transformers.PretrainedConfig`, `optional`) -- An instance of a configuration object that defines the decoder config. Examples:: >>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel >>> # Initializing a BERT bert-base-uncased style configuration >>> config_encoder = BertConfig() >>> config_decoder = BertConfig() >>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) >>> # Initializing a Bert2Bert model from the bert-base-uncased style configurations >>> model = EncoderDecoderModel(config=config) >>> # Accessing the model configuration >>> config_encoder = model.config.encoder >>> config_decoder = model.config.decoder >>> # set decoder config to causal lm >>> config_decoder.is_decoder = True >>> config_decoder.add_cross_attention = True >>> # Saving the model, including its configuration >>> model.save_pretrained('my-model') >>> # loading model and config from pretrained folder >>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained('my-model') >>> model = EncoderDecoderModel.from_pretrained('my-model', config=encoder_decoder_config) """ model_type = "encoder_decoder" def __init__(self, **kwargs): super().__init__(**kwargs) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" encoder_config = kwargs.pop("encoder") encoder_model_type = encoder_config.pop("model_type") decoder_config = kwargs.pop("decoder") decoder_model_type = decoder_config.pop("model_type") from .configuration_auto import AutoConfig self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config) self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config) self.is_encoder_decoder = True @classmethod def from_encoder_decoder_configs( cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs ) -> PretrainedConfig: r""" Instantiate a :class:`~transformers.EncoderDecoderConfig` (or a derived class) from a pre-trained encoder model configuration and decoder model configuration. Returns: :class:`EncoderDecoderConfig`: An instance of a configuration object """ logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config") decoder_config.is_decoder = True decoder_config.add_cross_attention = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs) def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig`. Returns: :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) output["encoder"] = self.encoder.to_dict() output["decoder"] = self.decoder.to_dict() output["model_type"] = self.__class__.model_type return output
5,002
41.398305
166
py
SLT-FAI
SLT-FAI-main/transformers/modeling_dpr.py
# coding=utf-8 # Copyright 2018 DPR Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch DPR model for Open Domain Question Answering.""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from torch import Tensor, nn from .configuration_dpr import DPRConfig from .file_utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings from .modeling_bert import BertModel from .modeling_outputs import BaseModelOutputWithPooling from .modeling_utils import PreTrainedModel from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "DPRConfig" DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/dpr-ctx_encoder-single-nq-base", "facebook/dpr-ctx_encoder-multiset-base", ] DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/dpr-question_encoder-single-nq-base", "facebook/dpr-question_encoder-multiset-base", ] DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/dpr-reader-single-nq-base", "facebook/dpr-reader-multiset-base", ] ########## # Outputs ########## @dataclass class DPRContextEncoderOutput(ModelOutput): """ Class for outputs of :class:`~transformers.DPRQuestionEncoder`. Args: pooler_output: (:obj:``torch.FloatTensor`` of shape ``(batch_size, embeddings_size)``): The DPR encoder outputs the `pooler_output` that corresponds to the context representation. Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer. This output is to be used to embed contexts for nearest neighbors queries with questions embeddings. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ pooler_output: torch.FloatTensor hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class DPRQuestionEncoderOutput(ModelOutput): """ Class for outputs of :class:`~transformers.DPRQuestionEncoder`. Args: pooler_output: (:obj:``torch.FloatTensor`` of shape ``(batch_size, embeddings_size)``): The DPR encoder outputs the `pooler_output` that corresponds to the question representation. Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer. This output is to be used to embed questions for nearest neighbors queries with context embeddings. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ pooler_output: torch.FloatTensor hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class DPRReaderOutput(ModelOutput): """ Class for outputs of :class:`~transformers.DPRQuestionEncoder`. Args: start_logits: (:obj:``torch.FloatTensor`` of shape ``(n_passages, sequence_length)``): Logits of the start index of the span for each passage. end_logits: (:obj:``torch.FloatTensor`` of shape ``(n_passages, sequence_length)``): Logits of the end index of the span for each passage. relevance_logits: (:obj:`torch.FloatTensor`` of shape ``(n_passages, )``): Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage to answer the question, compared to all the other passages. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ start_logits: torch.FloatTensor end_logits: torch.FloatTensor = None relevance_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class DPREncoder(PreTrainedModel): base_model_prefix = "bert_model" def __init__(self, config: DPRConfig): super().__init__(config) self.bert_model = BertModel(config) assert self.bert_model.config.hidden_size > 0, "Encoder hidden_size can't be zero" self.projection_dim = config.projection_dim if self.projection_dim > 0: self.encode_proj = nn.Linear(self.bert_model.config.hidden_size, config.projection_dim) self.init_weights() def forward( self, input_ids: Tensor, attention_mask: Optional[Tensor] = None, token_type_ids: Optional[Tensor] = None, inputs_embeds: Optional[Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = False, ) -> Union[BaseModelOutputWithPooling, Tuple[Tensor, ...]]: outputs = self.bert_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] pooled_output = sequence_output[:, 0, :] if self.projection_dim > 0: pooled_output = self.encode_proj(pooled_output) if not return_dict: return (sequence_output, pooled_output) + outputs[2:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @property def embeddings_size(self) -> int: if self.projection_dim > 0: return self.encode_proj.out_features return self.bert_model.config.hidden_size def init_weights(self): self.bert_model.init_weights() if self.projection_dim > 0: self.encode_proj.apply(self.bert_model._init_weights) class DPRSpanPredictor(PreTrainedModel): base_model_prefix = "encoder" def __init__(self, config: DPRConfig): super().__init__(config) self.encoder = DPREncoder(config) self.qa_outputs = nn.Linear(self.encoder.embeddings_size, 2) self.qa_classifier = nn.Linear(self.encoder.embeddings_size, 1) self.init_weights() def forward( self, input_ids: Tensor, attention_mask: Tensor, inputs_embeds: Optional[Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = False, ) -> Union[DPRReaderOutput, Tuple[Tensor, ...]]: # notations: N - number of questions in a batch, M - number of passages per questions, L - sequence length n_passages, sequence_length = input_ids.size() if input_ids is not None else inputs_embeds.size()[:2] # feed encoder outputs = self.encoder( input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # compute logits logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) relevance_logits = self.qa_classifier(sequence_output[:, 0, :]) # resize start_logits = start_logits.view(n_passages, sequence_length) end_logits = end_logits.view(n_passages, sequence_length) relevance_logits = relevance_logits.view(n_passages) if not return_dict: return (start_logits, end_logits, relevance_logits) + outputs[2:] return DPRReaderOutput( start_logits=start_logits, end_logits=end_logits, relevance_logits=relevance_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def init_weights(self): self.encoder.init_weights() ################## # PreTrainedModel ################## class DPRPretrainedContextEncoder(PreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DPRConfig load_tf_weights = None base_model_prefix = "ctx_encoder" authorized_missing_keys = [r"position_ids"] def init_weights(self): self.ctx_encoder.init_weights() class DPRPretrainedQuestionEncoder(PreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DPRConfig load_tf_weights = None base_model_prefix = "question_encoder" authorized_missing_keys = [r"position_ids"] def init_weights(self): self.question_encoder.init_weights() class DPRPretrainedReader(PreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DPRConfig load_tf_weights = None base_model_prefix = "span_predictor" authorized_missing_keys = [r"position_ids"] def init_weights(self): self.span_predictor.encoder.init_weights() self.span_predictor.qa_classifier.apply(self.span_predictor.encoder.bert_model._init_weights) self.span_predictor.qa_outputs.apply(self.span_predictor.encoder.bert_model._init_weights) ############### # Actual Models ############### DPR_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.DPRConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ DPR_ENCODERS_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be formatted with [CLS] and [SEP] tokens as follows: (a) For sequence pairs (for a pair title+text for example): ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` (b) For single sequences (for a question for example): ``tokens: [CLS] the dog is hairy . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0`` DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`~transformers.DPRTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ DPR_READER_INPUTS_DOCSTRING = r""" Args: input_ids: (:obj:`Tuple[torch.LongTensor]` of shapes :obj:`(n_passages, sequence_length)`): Indices of input sequence tokens in the vocabulary. It has to be a sequence triplet with 1) the question and 2) the passages titles and 3) the passages texts To match pretraining, DPR :obj:`input_ids` sequence should be formatted with [CLS] and [SEP] with the format: ``[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>`` DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`~transformers.DPRReaderTokenizer`. See this class documentation for more details. attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(n_passages, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(n_passages, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to rturn the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare DPRContextEncoder transformer outputting pooler outputs as context representations.", DPR_START_DOCSTRING, ) class DPRContextEncoder(DPRPretrainedContextEncoder): def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.ctx_encoder = DPREncoder(config) self.init_weights() @add_start_docstrings_to_callable(DPR_ENCODERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DPRContextEncoderOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[Tensor] = None, attention_mask: Optional[Tensor] = None, token_type_ids: Optional[Tensor] = None, inputs_embeds: Optional[Tensor] = None, output_attentions=None, output_hidden_states=None, return_dict=None, ) -> Union[DPRContextEncoderOutput, Tuple[Tensor, ...]]: r""" Return: Examples:: >>> from transformers import DPRContextEncoder, DPRContextEncoderTokenizer >>> tokenizer = DPRContextEncoderTokenizer.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base') >>> model = DPRContextEncoder.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base', return_dict=True) >>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors='pt')["input_ids"] >>> embeddings = model(input_ids).pooler_output """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = ( torch.ones(input_shape, device=device) if input_ids is None else (input_ids != self.config.pad_token_id) ) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) outputs = self.ctx_encoder( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return outputs[1:] return DPRContextEncoderOutput( pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @add_start_docstrings( "The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.", DPR_START_DOCSTRING, ) class DPRQuestionEncoder(DPRPretrainedQuestionEncoder): def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.question_encoder = DPREncoder(config) self.init_weights() @add_start_docstrings_to_callable(DPR_ENCODERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DPRQuestionEncoderOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[Tensor] = None, attention_mask: Optional[Tensor] = None, token_type_ids: Optional[Tensor] = None, inputs_embeds: Optional[Tensor] = None, output_attentions=None, output_hidden_states=None, return_dict=None, ) -> Union[DPRQuestionEncoderOutput, Tuple[Tensor, ...]]: r""" Return: Examples:: >>> from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer >>> tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base') >>> model = DPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base', return_dict=True) >>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors='pt')["input_ids"] >>> embeddings = model(input_ids).pooler_output """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = ( torch.ones(input_shape, device=device) if input_ids is None else (input_ids != self.config.pad_token_id) ) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) outputs = self.question_encoder( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return outputs[1:] return DPRQuestionEncoderOutput( pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @add_start_docstrings( "The bare DPRReader transformer outputting span predictions.", DPR_START_DOCSTRING, ) class DPRReader(DPRPretrainedReader): def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.span_predictor = DPRSpanPredictor(config) self.init_weights() @add_start_docstrings_to_callable(DPR_READER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DPRReaderOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[Tensor] = None, attention_mask: Optional[Tensor] = None, inputs_embeds: Optional[Tensor] = None, output_attentions: bool = None, output_hidden_states: bool = None, return_dict=None, ) -> Union[DPRReaderOutput, Tuple[Tensor, ...]]: r""" Return: Examples:: >>> from transformers import DPRReader, DPRReaderTokenizer >>> tokenizer = DPRReaderTokenizer.from_pretrained('facebook/dpr-reader-single-nq-base') >>> model = DPRReader.from_pretrained('facebook/dpr-reader-single-nq-base', return_dict=True) >>> encoded_inputs = tokenizer( ... questions=["What is love ?"], ... titles=["Haddaway"], ... texts=["'What Is Love' is a song recorded by the artist Haddaway"], ... return_tensors='pt' ... ) >>> outputs = model(**encoded_inputs) >>> start_logits = outputs.stat_logits >>> end_logits = outputs.end_logits >>> relevance_logits = outputs.relevance_logits """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) return self.span_predictor( input_ids, attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
28,460
42.65184
168
py
SLT-FAI
SLT-FAI-main/transformers/configuration_dpr.py
# coding=utf-8 # Copyright 2010, DPR authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DPR model configuration """ from .configuration_utils import PretrainedConfig from .utils import logging logger = logging.get_logger(__name__) DPR_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/dpr-ctx_encoder-single-nq-base": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/dpr-ctx_encoder-single-nq-base/config.json", "facebook/dpr-question_encoder-single-nq-base": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/dpr-question_encoder-single-nq-base/config.json", "facebook/dpr-reader-single-nq-base": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/dpr-reader-single-nq-base/config.json", "facebook/dpr-ctx_encoder-multiset-base": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/dpr-ctx_encoder-multiset-base/config.json", "facebook/dpr-question_encoder-multiset-base": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/dpr-question_encoder-multiset-base/config.json", "facebook/dpr-reader-multiset-base": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/dpr-reader-multiset-base/config.json", } class DPRConfig(PretrainedConfig): r""" :class:`~transformers.DPRConfig` is the configuration class to store the configuration of a `DPRModel`. This is the configuration class to store the configuration of a :class:`~transformers.DPRContextEncoder`, :class:`~transformers.DPRQuestionEncoder`, or a :class:`~transformers.DPRReader`. It is used to instantiate the components of the DPR model. This class is a subclass of :class:`~transformers.BertConfig`. Please check the superclass for the documentation of all kwargs. Args: vocab_size (:obj:`int`, `optional`, defaults to 30522): Vocabulary size of the DPR model. Defines the different tokens that can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`. hidden_size (:obj:`int`, `optional`, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (:obj:`int`, `optional`, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (:obj:`int`, `optional`, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"` and :obj:`"gelu_new"` are supported. hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, `optional`, defaults to 2): The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. projection_dim (:obj:`int`, `optional`, defaults to 0): Dimension of the projection for the context and question encoders. If it is set to zero (default), then no projection is done. """ model_type = "dpr" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, gradient_checkpointing=False, projection_dim: int = 0, **kwargs ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.gradient_checkpointing = gradient_checkpointing self.projection_dim = projection_dim
6,206
52.508621
163
py
SLT-FAI
SLT-FAI-main/transformers/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py
import argparse import os import torch from transformers.file_utils import WEIGHTS_NAME DIALOGPT_MODELS = ["small", "medium", "large"] OLD_KEY = "lm_head.decoder.weight" NEW_KEY = "lm_head.weight" def convert_dialogpt_checkpoint(checkpoint_path: str, pytorch_dump_folder_path: str): d = torch.load(checkpoint_path) d[NEW_KEY] = d.pop(OLD_KEY) os.makedirs(pytorch_dump_folder_path, exist_ok=True) torch.save(d, os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) args = parser.parse_args() for MODEL in DIALOGPT_MODELS: checkpoint_path = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") pytorch_dump_folder_path = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
935
27.363636
85
py
SLT-FAI
SLT-FAI-main/transformers/modeling_reformer.py
# coding=utf-8 # Copyright 2020 The Trax Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch REFORMER model. """ import sys from collections import namedtuple from dataclasses import dataclass from functools import reduce from operator import mul from typing import List, Optional, Tuple import numpy as np import torch from torch import nn from torch.autograd.function import Function from torch.nn import CrossEntropyLoss, MSELoss from .activations import ACT2FN from .configuration_reformer import ReformerConfig from .file_utils import ( DUMMY_INPUTS, DUMMY_MASK, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable, ) from .modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput from .modeling_utils import PreTrainedModel, apply_chunking_to_forward from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "ReformerConfig" _TOKENIZER_FOR_DOC = "ReformerTokenizer" REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/reformer-crime-and-punishment", "google/reformer-enwik8", # See all Reformer models at https://huggingface.co/models?filter=reformer ] # Define named tuples for nn.Modules here LSHSelfAttentionOutput = namedtuple("LSHSelfAttentionOutput", ["hidden_states", "attention_probs", "buckets"]) LocalSelfAttentionOutput = namedtuple("LocalSelfAttentionOutput", ["hidden_states", "attention_probs"]) AttentionOutput = namedtuple("AttentionOutput", ["hidden_states", "attention_probs", "buckets"]) ReformerOutput = namedtuple("ReformerOutput", ["hidden_states", "attn_output", "attention_probs", "buckets"]) ReformerBackwardOutput = namedtuple( "ReformerBackwardOutput", ["attn_output", "hidden_states", "grad_attn_output", "grad_hidden_states"] ) ReformerEncoderOutput = namedtuple( "ReformerEncoderOutput", ["hidden_states", "all_hidden_states", "all_attentions", "past_buckets_states"], ) def _stable_argsort(vector, dim): # this function scales the vector so that torch.argsort is stable. # torch.argsort is not stable on its own scale_offset = torch.arange(vector.shape[dim], device=vector.device).view(1, 1, -1) scale_offset = scale_offset.expand(vector.shape) scaled_vector = vector.shape[dim] * vector + (scale_offset % vector.shape[dim]) return torch.argsort(scaled_vector, dim=dim) def _get_least_common_mult_chunk_len(config): attn_types = config.attn_layers attn_types_set = set(attn_types) if len(attn_types_set) == 1 and attn_types[0] == "lsh": return config.lsh_attn_chunk_length elif len(attn_types_set) == 1 and attn_types[0] == "local": return config.local_attn_chunk_length elif len(attn_types_set) == 2 and attn_types_set == set(["lsh", "local"]): return np.lcm(config.lsh_attn_chunk_length, config.local_attn_chunk_length) else: raise NotImplementedError( "Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.".format( config.attn_layers ) ) def _get_min_chunk_len(config): attn_types = config.attn_layers attn_types_set = set(attn_types) if len(attn_types_set) == 1 and attn_types[0] == "lsh": return config.lsh_attn_chunk_length elif len(attn_types_set) == 1 and attn_types[0] == "local": return config.local_attn_chunk_length elif len(attn_types_set) == 2 and attn_types_set == set(["lsh", "local"]): return min(config.lsh_attn_chunk_length, config.local_attn_chunk_length) else: raise NotImplementedError( "Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.".format( config.attn_layers ) ) class AxialPositionEmbeddings(nn.Module): """Constructs axial position embeddings. Useful for very long input sequences to save memory and time. """ def __init__(self, config): super().__init__() self.axial_pos_shape = config.axial_pos_shape self.axial_pos_embds_dim = config.axial_pos_embds_dim self.dropout = config.hidden_dropout_prob self.least_common_mult_chunk_length = _get_least_common_mult_chunk_len(config) self.weights = nn.ParameterList() assert ( sum(self.axial_pos_embds_dim) == config.hidden_size ), "Make sure that config.axial_pos_embds factors: {} sum to config.hidden_size: {}".format( self.axial_pos_embds_dim, config.hidden_size ) # create weights for axis, axial_pos_embd_dim in enumerate(self.axial_pos_embds_dim): # create expanded shapes ax_shape = [1] * len(self.axial_pos_shape) ax_shape[axis] = self.axial_pos_shape[axis] ax_shape = tuple(ax_shape) + (axial_pos_embd_dim,) # create tensor and init self.weights.append(nn.Parameter(torch.ones(ax_shape, dtype=torch.float32))) def forward(self, position_ids): # broadcast weights to correct shape batch_size = position_ids.shape[0] sequence_length = position_ids.shape[1] broadcasted_weights = [ weight.expand((batch_size,) + self.axial_pos_shape + weight.shape[-1:]) for weight in self.weights ] if self.training is True: assert ( reduce(mul, self.axial_pos_shape) == sequence_length ), "If training, make sure that config.axial_pos_shape factors: {} multiply to sequence length. Got prod({}) != sequence_length: {}. You might want to consider padding your sequence length to {} or changing config.axial_pos_shape.".format( self.axial_pos_shape, self.axial_pos_shape, sequence_length, reduce(mul, self.axial_pos_shape) ) if self.dropout > 0: weights = torch.cat(broadcasted_weights, dim=-1) # permute weights so that 2D correctly drops dims 1 and 2 transposed_weights = weights.transpose(2, 1) # drop entire matrix of last two dims (prev dims 1 and 2) dropped_transposed_weights = nn.functional.dropout2d( transposed_weights, p=self.dropout, training=self.training ) dropped_weights = dropped_transposed_weights.transpose(2, 1) position_encodings = torch.reshape(dropped_weights, (batch_size, sequence_length, -1)) else: position_encodings = torch.cat( [torch.reshape(weight, (batch_size, sequence_length, -1)) for weight in broadcasted_weights], dim=-1, ) else: assert ( reduce(mul, self.axial_pos_shape) >= sequence_length ), "Make sure that config.axial_pos_shape factors: {} multiply at least to max(sequence_length, least_common_mult_chunk_length): max({}, {})".format( self.axial_pos_shape, sequence_length, self.least_common_mult_chunk_length, ) # compute how many columns are needed max_position_id = position_ids.max().item() required_pos_encodings_columns = -(-(max_position_id + 1) // self.axial_pos_shape[1]) # cut to columns that are needed position_encodings = torch.cat( [weight[:, :required_pos_encodings_columns] for weight in broadcasted_weights], dim=-1 ) position_encodings = torch.reshape(position_encodings, (batch_size, -1, position_encodings.shape[-1])) # select correct position encodings position_encodings = torch.cat( [ torch.index_select(position_encodings[i], 0, position_ids[i]).unsqueeze(0) for i in range(batch_size) ], dim=0, ) return position_encodings class PositionEmbeddings(nn.Module): """Constructs conventional position embeddings of shape `[max_pos_embeddings, hidden_size]`.""" def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob self.embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size) def forward(self, position_ids): position_embeddings = self.embedding(position_ids) position_embeddings = nn.functional.dropout(position_embeddings, p=self.dropout, training=self.training) return position_embeddings class ReformerEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.max_position_embeddings = config.max_position_embeddings self.dropout = config.hidden_dropout_prob self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = ( AxialPositionEmbeddings(config) if config.axial_pos_embds else PositionEmbeddings(config) ) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0): if input_ids is not None: input_shape = input_ids.size() device = input_ids.device else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device seq_length = input_shape[1] if position_ids is None: position_ids = torch.arange( start_idx_pos_encodings, start_idx_pos_encodings + seq_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).expand(input_shape) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) assert ( position_ids.shape[-1] <= self.max_position_embeddings ), "Sequence Length: {} has to be larger equal than config.max_position_embeddings: {}".format( position_ids.shape[-1], self.max_position_embeddings ) # dropout embeddings = nn.functional.dropout(inputs_embeds, p=self.dropout, training=self.training) # add positional embeddings position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings return embeddings class EfficientAttentionMixin: """ A few utilities for nn.Modules in Reformer, to be used as a mixin. """ def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after): """Used to implement attention between consecutive chunks. Args: vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...] num_chunks_before: chunks before current chunk to include in attention num_chunks_after: chunks after current chunk to include in attention Returns: tensor of shape [num_chunks, N * chunk_length, ...], where N = (1 + num_chunks_before + num_chunks_after). """ if num_chunks_before == 0 and num_chunks_after == 0: return vectors slices = [] for i in range(-num_chunks_before, num_chunks_after + 1): if i == 0: slices.append(vectors) else: slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2)) return torch.cat(slices, dim=3) def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size): """ splits hidden_size dim into attn_head_size and num_attn_heads """ new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size) x = x.view(*new_x_shape) return x.transpose(2, 1) def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size): """ merges attn_head_size dim and num_attn_heads dim into hidden_size """ x = x.permute(0, 2, 1, 3) return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size)) def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None): """ splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims """ batch_size = vectors.shape[0] split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2) if len(vectors.shape) == 4: return torch.reshape(vectors, split_dim_shape + (attn_head_size,)) elif len(vectors.shape) == 3: return torch.reshape(vectors, split_dim_shape) else: raise ValueError("Input vector rank should be one of [3, 4], but is: {}".format(len(vectors.shape))) class LSHSelfAttention(nn.Module, EfficientAttentionMixin): def __init__(self, config): super().__init__() self.config = config self.chunk_length = config.lsh_attn_chunk_length self.num_hashes = config.num_hashes self.num_buckets = config.num_buckets self.num_chunks_before = config.lsh_num_chunks_before self.num_chunks_after = config.lsh_num_chunks_after self.hash_seed = config.hash_seed self.is_decoder = config.is_decoder self.max_position_embeddings = config.max_position_embeddings self.dropout = config.lsh_attention_probs_dropout_prob self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.hidden_size = config.hidden_size # projection matrices self.query_key = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False) # save mask value here. Need fp32 and fp16 mask values self.register_buffer("self_mask_value_float16", torch.tensor(-1e3)) self.register_buffer("self_mask_value_float32", torch.tensor(-1e5)) self.register_buffer("mask_value_float16", torch.tensor(-1e4)) self.register_buffer("mask_value_float32", torch.tensor(-1e9)) def forward( self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, buckets=None, past_buckets_states=None, use_cache=False, output_attentions=False, **kwargs ): sequence_length = hidden_states.shape[1] batch_size = hidden_states.shape[0] # num hashes can optionally be overwritten by user num_hashes = num_hashes if num_hashes is not None else self.num_hashes do_cached_attention = use_cache and past_buckets_states[1] is not None # check if cache shall be used and that hidden states are already cached if do_cached_attention: assert ( sequence_length == 1 ), f"At the moment, auto-regressive language generation is only possible one word at a time. Make sure that input sequence length {sequence_length} equals 1, when `past_buckets_states` is passed." past_buckets = past_buckets_states[0] past_states = past_buckets_states[1] # get query vector query_vectors = self.query_key(hidden_states) query_vectors = self._split_hidden_size_dim( query_vectors, self.num_attention_heads, self.attention_head_size ) if past_buckets is not None: key_value_hidden_states, sorted_bucket_idx, buckets = self._get_relevant_hid_states_and_buckets( query_vectors=query_vectors, attention_mask=attention_mask, num_hashes=num_hashes, hidden_states=hidden_states, past_states=past_states, past_buckets=past_buckets, ) query_key_vectors = self._query_per_attn_head(key_value_hidden_states) value_vectors = self._value_per_attn_head(key_value_hidden_states) # split key & value vectors by num hashes to apply # self attention on each separately query_key_vectors = self._split_seq_length_dim_to( query_key_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size, ) value_vectors = self._split_seq_length_dim_to( value_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size, ) # repeat query vectors across hash dimension query_vectors = query_vectors.unsqueeze(2).repeat(1, 1, num_hashes, 1, 1) else: key_value_hidden_states = torch.cat([past_states, hidden_states], dim=1) query_key_vectors = self.query_key(key_value_hidden_states) value_vectors = self.value(key_value_hidden_states) else: # project hidden_states to query_key and value query_vectors = None query_key_vectors = self.query_key(hidden_states) value_vectors = self.value(hidden_states) # if query key is not already split if not do_cached_attention or past_buckets is None: query_key_vectors = self._split_hidden_size_dim( query_key_vectors, self.num_attention_heads, self.attention_head_size ) value_vectors = self._split_hidden_size_dim( value_vectors, self.num_attention_heads, self.attention_head_size ) # cache buckets for next incremental decoding if do_cached_attention and past_buckets is None and key_value_hidden_states.shape[1] >= self.chunk_length: buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask) # free memory del hidden_states assert ( query_key_vectors.shape[-1] == self.attention_head_size ), "last dim of query_key_vectors is {} but should be {}.".format( query_key_vectors.shape[-1], self.attention_head_size ) assert ( value_vectors.shape[-1] == self.attention_head_size ), "last dim of value_vectors is {} but should be {}.".format( value_vectors.shape[-1], self.attention_head_size ) do_standard_self_attention = (sequence_length <= self.chunk_length) or ( use_cache and past_buckets_states[1] is not None ) # LSH attention only makes sense if chunked attention should be performed if not do_standard_self_attention: # set `num_buckets` on the fly, recommended way to do it if self.num_buckets is None: self._set_num_buckets(sequence_length) # use cached buckets for backprop only if buckets is None: # hash query key vectors into buckets buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask) else: # make sure buckets has correct shape for LSH attention buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes * sequence_length) assert ( int(buckets.shape[-1]) == num_hashes * sequence_length ), "last dim of buckets is {}, but should be {}".format(buckets.shape[-1], num_hashes * sequence_length) sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx( sequence_length, buckets, num_hashes ) # make sure bucket idx is not longer then sequence length sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length # cluster query key value vectors according to hashed buckets query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes) value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes) query_key_vectors = self._split_seq_length_dim_to( query_key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) value_vectors = self._split_seq_length_dim_to( value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) if self.chunk_length is None: assert ( self.num_chunks_before == 0 and self.num_chunks_after == 0 ), "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0." elif do_cached_attention and past_buckets is not None: # use max sequence length sorted_bucket_idx_per_hash = sorted_bucket_idx else: # get sequence length indices sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat( batch_size, self.num_attention_heads, 1 ) # scale key vectors key_vectors = self._len_and_dim_norm(query_key_vectors) # set query_vectors to query key vectors if LSH self attention query_vectors = query_vectors if query_vectors is not None else query_key_vectors # free memory del query_key_vectors # get attention probs out_vectors, logits, attention_probs = self._attend( query_vectors=query_vectors, key_vectors=key_vectors, value_vectors=value_vectors, sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash, attention_mask=attention_mask, head_mask=head_mask, do_standard_self_attention=do_standard_self_attention, do_cached_attention=do_cached_attention, ) # free memory del key_vectors, value_vectors # re-order out_vectors and logits if not do_standard_self_attention: # sort clusters back to correct ordering out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx) if not do_standard_self_attention or (do_cached_attention and past_buckets is not None): # sum up all hash rounds if num_hashes > 1: out_vectors = self._split_seq_length_dim_to( out_vectors, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size, ) logits = self._split_seq_length_dim_to( logits, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size, ).unsqueeze(-1) probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True)) out_vectors = torch.sum(out_vectors * probs_vectors, dim=2) # free memory del probs_vectors # free memory del logits assert out_vectors.shape == ( batch_size, self.num_attention_heads, sequence_length, self.attention_head_size, ), "out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length, config.attention_head_size]`." out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size) if output_attentions is False: attention_probs = () if buckets is not None: buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes, -1) return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets) def _query_per_attn_head(self, hidden_states): per_head_query_key = self.query_key.weight.reshape( self.num_attention_heads, self.attention_head_size, self.hidden_size ).transpose(-2, -1) # only relevant for inference and no bias => we can use einsum here query_key_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_query_key) return query_key_vectors def _value_per_attn_head(self, hidden_states): per_head_value = self.value.weight.reshape( self.num_attention_heads, self.attention_head_size, self.hidden_size ).transpose(-2, -1) # only relevant for inference and no bias => we can use einsum here value_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_value) return value_vectors def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False): batch_size = vectors.shape[0] # See https://arxiv.org/pdf/1509.02897.pdf # We sample a different random rotation for each round of hashing to # decrease the probability of hash misses. if isinstance(self.num_buckets, int): assert ( self.num_buckets % 2 == 0 ), "There should be an even number of bucktes, but `self.num_bucktes`: {}".format(self.num_buckets) rotation_size = self.num_buckets num_buckets = self.num_buckets else: # Factorize the hash if self.num_buckets is a list or tuple rotation_size, num_buckets = 0, 1 for bucket_factor in self.num_buckets: assert bucket_factor % 2 == 0, "The number of buckets should be even, but `num_bucket`: {}".format( bucket_factor ) rotation_size = rotation_size + bucket_factor num_buckets = num_buckets * bucket_factor # remove gradient vectors = vectors.detach() if self.hash_seed is not None: # for determinism torch.manual_seed(self.hash_seed) rotations_shape = (self.num_attention_heads, vectors.shape[-1], num_hashes, rotation_size // 2) # create a random self.attention_head_size x num_hashes x num_buckets/2 random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype) # Output dim: Batch_Size x Num_Attn_Heads x Num_Hashes x Seq_Len x Num_Buckets/2 rotated_vectors = torch.einsum("bmtd,mdhr->bmhtr", vectors, random_rotations) if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1: rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1) buckets = torch.argmax(rotated_vectors, dim=-1) else: # Get the buckets for them and combine. buckets, cur_sum, cur_product = None, 0, 1 for bucket_factor in self.num_buckets: rotated_vectors_factor = rotated_vectors[..., cur_sum : cur_sum + (bucket_factor // 2)] cur_sum = cur_sum + bucket_factor // 2 rotated_vectors_factor = torch.cat([rotated_vectors_factor, -rotated_vectors_factor], dim=-1) if buckets is None: buckets = torch.argmax(rotated_vectors_factor, dim=-1) else: buckets = buckets + (cur_product * torch.argmax(rotated_vectors_factor, dim=-1)) cur_product = cur_product * bucket_factor if attention_mask is not None and (attention_mask.sum().item() < batch_size * attention_mask.shape[-1]): # add an extra bucket for padding tokens only num_buckets = num_buckets + 1 # assign padding tokens extra bucket buckets_mask = attention_mask.to(torch.uint8)[:, None, None, :].expand(buckets.shape) buckets = torch.where( buckets_mask, buckets, torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device) ) elif increase_num_buckets: num_buckets = num_buckets + 1 # buckets is now (Batch_size x Num_Attn_Heads x Num_Hashes x Seq_Len). # Next we add offsets so that bucket numbers from different hashing rounds don't overlap. offsets = torch.arange(num_hashes, device=vectors.device) offsets = (offsets * num_buckets).view((1, 1, -1, 1)) # expand to batch size and num attention heads offsets = offsets.expand((batch_size, self.num_attention_heads) + offsets.shape[-2:]) offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3) return offset_buckets def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes): # no gradients are needed with torch.no_grad(): # hash-based sort sorted_bucket_idx = _stable_argsort(buckets, dim=-1) # create simple indices to scatter to, to have undo sort indices = ( torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device) .view(1, 1, -1) .expand(sorted_bucket_idx.shape) ) # get undo sort undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size()) undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices) return sorted_bucket_idx, undo_sorted_bucket_idx def _set_num_buckets(self, sequence_length): # `num_buckets` should be set to 2 * sequence_length // chunk_length as recommended in paper num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1 # make sure buckets are power of 2 num_buckets = 2 ** num_buckets_pow_2 # factorize `num_buckets` if `num_buckets` becomes too large num_buckets_limit = 2 * max( int((self.max_position_embeddings // self.chunk_length) ** (0.5)), self.chunk_length, ) if num_buckets > num_buckets_limit: num_buckets = [2 ** (num_buckets_pow_2 // 2), 2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2)] logger.warning("config.num_buckets is not set. Setting config.num_buckets to {}...".format(num_buckets)) # set num buckets in config to be properly saved self.config.num_buckets = num_buckets self.num_buckets = num_buckets def _attend( self, query_vectors, key_vectors, value_vectors, sorted_bucket_idx_per_hash, attention_mask, head_mask, do_standard_self_attention, do_cached_attention, ): # look at previous and following chunks if chunked attention if not do_standard_self_attention: key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after) value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after) # get logits and dots # (BS, NumAttn, NumHash x NumChunk, Chunk_L x Hidden),(BS, NumAttn, NumHash x NumChunk, Chunk_L * (Num_bef + Num_aft + 1) x Hidden) -> (BS, NumAttn, NumHash x NumChunk, Chunk_L, Chunk_L * (1 + Num_bef + Num_aft)) query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2)) # free memory del query_vectors, key_vectors # if chunked attention split bucket idxs to query and key if not do_standard_self_attention: query_bucket_idx = self._split_seq_length_dim_to( sorted_bucket_idx_per_hash, -1, self.chunk_length, self.num_attention_heads ) key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after) elif do_cached_attention and query_key_dots.ndim > 4: key_value_bucket_idx = sorted_bucket_idx_per_hash query_bucket_idx = ( key_value_bucket_idx.new_ones(key_value_bucket_idx.shape[:-1] + (1,)) * key_value_bucket_idx.max() ) elif do_cached_attention and query_key_dots.ndim <= 4: query_bucket_idx = (query_key_dots.shape[-1] - 1) * torch.ones_like(query_key_dots)[:, :, :, -1] key_value_bucket_idx = torch.arange( query_key_dots.shape[-1], dtype=torch.long, device=query_key_dots.device )[None, None, :].expand(query_bucket_idx.shape[:2] + (-1,)) else: query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash # get correct mask values depending on precision if query_key_dots.dtype == torch.float16: self_mask_value = self.self_mask_value_float16.half() mask_value = self.mask_value_float16.half() else: self_mask_value = self.self_mask_value_float32 mask_value = self.mask_value_float32 if not do_cached_attention: mask = self._compute_attn_mask( query_bucket_idx, key_value_bucket_idx, attention_mask, query_key_dots.shape, do_standard_self_attention, ) if mask is not None: query_key_dots = torch.where(mask, query_key_dots, mask_value) # free memory del mask # Self mask is ALWAYS applied. # From the reformer paper (https://arxiv.org/pdf/2001.04451.pdf): # " While attention to the future is not allowed, typical implementations of the # Transformer do allow a position to attend to itself. # Such behavior is undesirable in a shared-QK formulation because the dot-product # of a query vector with itself will almost always be greater than the dot product of a # query vector with a vector at another position. We therefore modify the masking # to forbid a token from attending to itself, except in situations # where a token has no other valid attention targets (e.g. the first token in a sequence) " self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to( query_bucket_idx.device ) # apply self_mask query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value) # free memory del self_mask logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True) # dots shape is `[batch_size, num_attn_heads, num_hashes * seq_len // chunk_length, chunk_length, chunk_length * (1 + num_chunks_before + num_chunks_after)]` attention_probs = torch.exp(query_key_dots - logits) # free memory del query_key_dots # dropout attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask # attend values out_vectors = torch.matmul(attention_probs, value_vectors) # free memory del value_vectors # merge chunk length if out_vectors.ndim > 4: logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1) out_vectors = out_vectors.flatten(start_dim=2, end_dim=3) return out_vectors, logits, attention_probs def _compute_attn_mask( self, query_indices, key_indices, attention_mask, query_key_dot_shape, do_standard_self_attention ): # attention mask for LSH if attention_mask is not None: # if chunked attention, the attention mask has to correspond to LSH order attention_mask = attention_mask.to(torch.uint8)[:, None, :] if not do_standard_self_attention: # expand attn_mask to fit with key_value_bucket_idx shape attention_mask = attention_mask[:, None, :] attention_mask = attention_mask.expand(query_indices.shape[:-1] + (-1,)) # extract attention mask from LSH sorted key_indices attention_mask = torch.gather(attention_mask, -1, key_indices) attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dot_shape) # Causal mask if self.is_decoder is True: causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device) # add attention mask if not None if attention_mask is not None: attention_mask = causal_mask * attention_mask else: attention_mask = causal_mask return attention_mask def _get_relevant_hid_states_and_buckets( self, query_vectors, attention_mask, num_hashes, hidden_states, past_states, past_buckets ): # concat hidden states hidden_states = torch.cat([past_states, hidden_states], dim=1) # batch_size hidden batch_size = hidden_states.shape[0] sequence_length = hidden_states.shape[1] # check if cached buckets include pad bucket max_bucket = self.num_buckets if isinstance(self.num_buckets, int) else reduce(mul, self.num_buckets) # if pad bucket was cached => need to increase num buckets for caching increase_num_buckets = past_buckets.max() > num_hashes * max_bucket - 1 # retrieve query buckets query_buckets = self._hash_vectors( query_vectors, num_hashes, attention_mask, increase_num_buckets=increase_num_buckets ) # concat buckets concat_buckets = torch.cat([past_buckets, query_buckets.unsqueeze(-1)], dim=-1) # hash-based sort bucket_idx = _stable_argsort(concat_buckets, dim=-1) # bucket_idx has shape: BatchSize x NumAttnHeads x NumHashes x SequenceLength assert bucket_idx.shape == ( batch_size, self.num_attention_heads, num_hashes, sequence_length, ), f"bucket_idx should have shape {(batch_size, self.num_attention_heads, num_hashes, sequence_length)}, but has shape {bucket_idx.shape}." # find indices of new bucket indices relevant_bucket_idx = (bucket_idx == (bucket_idx.shape[-1] - 1)).nonzero() # expand relevant bucket indices to its chunks relevant_bucket_idx_chunk = self._expand_to_indices_in_relevant_chunk(relevant_bucket_idx, sequence_length) relevant_bucket_idx_chunk = bucket_idx[tuple(relevant_bucket_idx_chunk.transpose(0, 1))] # adapt bucket_idx for batch and hidden states for index select bucket_idx_batch_offset = sequence_length * ( batch_size * torch.arange(relevant_bucket_idx_chunk.shape[-1], device=hidden_states.device, dtype=torch.long) // relevant_bucket_idx_chunk.shape[-1] ) # add batch offset relevant_bucket_idx_chunk_all_batch = relevant_bucket_idx_chunk + bucket_idx_batch_offset hidden_states = hidden_states.reshape((-1, self.hidden_size)) # select all relevant hidden states relevant_hidden_states = hidden_states.index_select(0, relevant_bucket_idx_chunk_all_batch) # reshape hidden states and bucket_idx to correct output relevant_hidden_states = relevant_hidden_states.reshape( batch_size, self.num_attention_heads, -1, self.hidden_size ) relevant_bucket_idx_chunk = relevant_bucket_idx_chunk.reshape( batch_size, self.num_attention_heads, num_hashes, -1 ) assert ( relevant_hidden_states.shape[2] == (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes ), f"There should be {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes} `hidden_states`, there are {relevant_hidden_states.shape[2]} `hidden_states`." assert ( relevant_bucket_idx_chunk.shape[-1] == (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length ), f"There should be {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length} `hidden_states`, there are {relevant_bucket_idx_chunk.shape[-1]} `bucket_idx`." return relevant_hidden_states, relevant_bucket_idx_chunk, query_buckets def _expand_to_indices_in_relevant_chunk(self, indices, sequence_length): # get relevant indices of where chunk starts and its size start_indices_chunk = ((indices[:, -1] // self.chunk_length) - self.num_chunks_before) * self.chunk_length total_chunk_size = self.chunk_length * (1 + self.num_chunks_before + self.num_chunks_after) # expand start indices and add correct chunk offset via arange expanded_start_indices = start_indices_chunk.unsqueeze(-1).expand(indices.shape[0], total_chunk_size) chunk_sequence_indices = expanded_start_indices + torch.arange( total_chunk_size, device=indices.device, dtype=torch.long ).unsqueeze(0).expand(indices.shape[0], total_chunk_size) # make sure that circular logic holds via % seq len chunk_sequence_indices = chunk_sequence_indices.flatten() % sequence_length # expand indices and set indices correctly indices = indices.unsqueeze(1).expand((indices.shape[0], total_chunk_size, -1)).flatten(0, 1).clone() indices[:, -1] = chunk_sequence_indices return indices def _len_and_dim_norm(self, vectors): """ length and attention head size dim normalization """ vectors = self._len_norm(vectors) vectors = vectors * torch.rsqrt( torch.tensor(self.attention_head_size, device=vectors.device, dtype=vectors.dtype) ) return vectors def _len_norm(self, x, epsilon=1e-6): """ length normalization """ variance = torch.mean(x ** 2, -1, keepdim=True) norm_x = x * torch.rsqrt(variance + epsilon) return norm_x def _gather_by_expansion(self, vectors, idxs, num_hashes): """ expand dims of idxs and vectors for all hashes and gather """ expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.attention_head_size) vectors = vectors.repeat(1, 1, num_hashes, 1) return torch.gather(vectors, 2, expanded_idxs) class ReverseSort(Function): """ After chunked attention is applied which sorted clusters, original ordering has to be restored. Since customized backward function is used for Reformer, the gradients of the output vectors have to be explicitely sorted here. """ @staticmethod def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx): # save sorted_bucket_idx for backprop with torch.no_grad(): ctx.sorted_bucket_idx = sorted_bucket_idx # undo sort to have correct order for next layer expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape) out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices) logits = torch.gather(logits, 2, undo_sorted_bucket_idx) return out_vectors, logits @staticmethod def backward(ctx, grad_out_vectors, grad_logits): # get parameters saved in ctx sorted_bucket_idx = ctx.sorted_bucket_idx expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape) # reverse sort of forward grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices) grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx) # return grad and `None` fillers for last 2 forward args return grad_out_vectors, grad_logits, None, None class LocalSelfAttention(nn.Module, EfficientAttentionMixin): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.chunk_length = config.local_attn_chunk_length self.num_chunks_before = config.local_num_chunks_before self.num_chunks_after = config.local_num_chunks_after self.is_decoder = config.is_decoder self.pad_token_id = config.pad_token_id self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.hidden_size = config.hidden_size # projection matrices self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.dropout = config.local_attention_probs_dropout_prob # save mask value here self.register_buffer("mask_value_float16", torch.tensor(-1e4)) self.register_buffer("mask_value_float32", torch.tensor(-1e9)) def forward( self, hidden_states, attention_mask=None, head_mask=None, past_buckets_states=None, use_cache=False, output_attentions=False, **kwargs ): sequence_length = hidden_states.shape[1] batch_size = hidden_states.shape[0] # check if cache shall be used and that hidden states are already cached if use_cache and past_buckets_states[1] is not None: assert ( past_buckets_states[0] is None ), "LocalSelfAttention should not make use of `buckets`. There seems to be an error when caching hidden_states_and_buckets." key_value_hidden_states = self._retrieve_relevant_hidden_states( past_buckets_states[1], self.chunk_length, self.num_chunks_before ) key_value_hidden_states = torch.cat([key_value_hidden_states, hidden_states], dim=1) # only query vector for last token query_vectors = self.query(hidden_states) # compute key and value for relevant chunk key_vectors = self.key(key_value_hidden_states) value_vectors = self.value(key_value_hidden_states) # free memory del key_value_hidden_states else: # project hidden_states to query, key and value query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) # split last dim into `config.num_attention_heads` and `config.attention_head_size` query_vectors = self._split_hidden_size_dim(query_vectors, self.num_attention_heads, self.attention_head_size) key_vectors = self._split_hidden_size_dim(key_vectors, self.num_attention_heads, self.attention_head_size) value_vectors = self._split_hidden_size_dim(value_vectors, self.num_attention_heads, self.attention_head_size) assert ( query_vectors.shape[-1] == self.attention_head_size ), "last dim of query_key_vectors is {} but should be {}.".format( query_vectors.shape[-1], self.attention_head_size ) assert ( key_vectors.shape[-1] == self.attention_head_size ), "last dim of query_key_vectors is {} but should be {}.".format( key_vectors.shape[-1], self.attention_head_size ) assert ( value_vectors.shape[-1] == self.attention_head_size ), "last dim of query_key_vectors is {} but should be {}.".format( value_vectors.shape[-1], self.attention_head_size ) if self.chunk_length is None: assert ( self.num_chunks_before == 0 and self.num_chunks_after == 0 ), "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0." # normalize key vectors key_vectors = key_vectors / torch.sqrt( torch.tensor(self.attention_head_size, device=key_vectors.device, dtype=key_vectors.dtype) ) # get sequence length indices indices = torch.arange(sequence_length, device=query_vectors.device).repeat( batch_size, self.num_attention_heads, 1 ) # if one should do normal n^2 self-attention do_standard_self_attention = sequence_length <= self.chunk_length # if input should be chunked if not do_standard_self_attention: # chunk vectors # B x Num_Attn_Head x Seq_Len // chunk_len x chunk_len x attn_head_size query_vectors = self._split_seq_length_dim_to( query_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) key_vectors = self._split_seq_length_dim_to( key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) value_vectors = self._split_seq_length_dim_to( value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) # chunk indices query_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads) key_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads) # append chunks before and after key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after) value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after) key_indices = self._look_adjacent(key_indices, self.num_chunks_before, self.num_chunks_after) else: query_indices = key_indices = indices # query-key matmul: QK^T query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2)) # free memory del query_vectors, key_vectors mask = self._compute_attn_mask( query_indices, key_indices, attention_mask, query_key_dots.shape, do_standard_self_attention ) if mask is not None: # get mask tensor depending on half precision or not if query_key_dots.dtype == torch.float16: mask_value = self.mask_value_float16.half() else: mask_value = self.mask_value_float32 query_key_dots = torch.where(mask, query_key_dots, mask_value) # free memory del mask # softmax logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True) attention_probs = torch.exp(query_key_dots - logits) # free memory del logits # dropout attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask # attend values out_vectors = torch.matmul(attention_probs, value_vectors) # free memory del value_vectors # merge chunk length if not do_standard_self_attention: out_vectors = out_vectors.flatten(start_dim=2, end_dim=3) assert out_vectors.shape == ( batch_size, self.num_attention_heads, sequence_length, self.attention_head_size, ) out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size) if output_attentions is False: attention_probs = () return LocalSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs) def _compute_attn_mask( self, query_indices, key_indices, attention_mask, query_key_dots_shape, do_standard_self_attention ): # chunk attention mask and look before and after if attention_mask is not None: attention_mask = attention_mask.to(torch.uint8)[:, None, :] if not do_standard_self_attention: attention_mask = self._split_seq_length_dim_to(attention_mask, -1, self.chunk_length, 1) attention_mask = self._look_adjacent(attention_mask, self.num_chunks_before, self.num_chunks_after) # create attn_mask attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dots_shape) # Causal mask if self.is_decoder is True: causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device) # add attention mask if not None if attention_mask is not None: attention_mask = causal_mask * attention_mask else: attention_mask = causal_mask return attention_mask @staticmethod def _retrieve_relevant_hidden_states(previous_hidden_states, chunk_length, num_chunks_before): start_position = ((previous_hidden_states.shape[1] // chunk_length) - num_chunks_before) * chunk_length return previous_hidden_states[:, start_position:] class ReformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() all_head_size = config.num_attention_heads * config.attention_head_size self.dropout = config.hidden_dropout_prob self.dense = nn.Linear(all_head_size, config.hidden_size, bias=False) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states class ReformerAttention(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.layer_id = layer_id self.attn_layers = config.attn_layers self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "lsh": self.self_attention = LSHSelfAttention(config) elif len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "local": self.self_attention = LocalSelfAttention(config) elif len(set(self.attn_layers)) == 2 and set(self.attn_layers) == set(["lsh", "local"]): # get correct attn layers if self.attn_layers[self.layer_id] == "lsh": self.self_attention = LSHSelfAttention(config) else: self.self_attention = LocalSelfAttention(config) else: raise NotImplementedError( "Only attn layer types 'lsh' and 'local' exist, but got `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.".format( self.attn_layers ) ) self.output = ReformerSelfOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False, buckets=None, ): hidden_states = self.layer_norm(hidden_states) # make sure cached hidden states is set to None for backward pass if past_buckets_states is not None: past_buckets_states_layer = past_buckets_states[self.layer_id] else: past_buckets_states_layer = None # use cached buckets for backprob if buckets not None for LSHSelfAttention self_attention_outputs = self.self_attention( hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states_layer, use_cache=use_cache, output_attentions=output_attentions, buckets=buckets, ) # add buckets if necessary if hasattr(self_attention_outputs, "buckets"): buckets = self_attention_outputs.buckets else: buckets = None # cache hidden states for future use if use_cache: if past_buckets_states[self.layer_id][0] is None: # padded input should not be cached past_buckets = ( buckets[:, :, :, :orig_sequence_length] if (buckets is not None and orig_sequence_length > 1) else buckets ) else: past_buckets = torch.cat([past_buckets_states[self.layer_id][0], buckets], dim=-1) if past_buckets_states[self.layer_id][1] is None: # padded input should not be cached past_states = hidden_states[:, :orig_sequence_length] else: past_states = torch.cat([past_buckets_states[self.layer_id][1], hidden_states], dim=1) past_buckets_states[self.layer_id] = (past_buckets, past_states) # compute attention feed forward output attention_output = self.output(self_attention_outputs.hidden_states) return AttentionOutput( hidden_states=attention_output, attention_probs=self_attention_outputs.attention_probs, buckets=buckets, ) class ReformerFeedForwardDense(nn.Module): def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob if isinstance(config.hidden_act, str): self.act_fn = ACT2FN[config.hidden_act] else: self.act_fn = config.hidden_act self.dense = nn.Linear(config.hidden_size, config.feed_forward_size) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = self.act_fn(hidden_states) return hidden_states class ReformerFeedForwardOutput(nn.Module): def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob self.dense = nn.Linear(config.feed_forward_size, config.hidden_size) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states class ChunkReformerFeedForward(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense = ReformerFeedForwardDense(config) self.output = ReformerFeedForwardOutput(config) def forward(self, attention_output): return apply_chunking_to_forward( self.forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) def forward_chunk(self, hidden_states): hidden_states = self.layer_norm(hidden_states) hidden_states = self.dense(hidden_states) return self.output(hidden_states) class ReformerLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.attention = ReformerAttention(config, layer_id) # dropout requires to have the same # seed for forward and backward pass self.attention_seed = None self.feed_forward_seed = None self.feed_forward = ChunkReformerFeedForward(config) def _init_attention_seed(self): """ This function sets a new seed for the attention layer to make dropout deterministic for both forward calls: 1 normal forward call and 1 forward call in backward to recalculate activations. """ # randomize seeds # use cuda generator if available if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0: # GPU device_idx = torch.cuda.current_device() self.attention_seed = torch.cuda.default_generators[device_idx].seed() else: # CPU self.attention_seed = int(torch.seed() % sys.maxsize) torch.manual_seed(self.attention_seed) def _init_feed_forward_seed(self): """ This function sets a new seed for the feed forward layer to make dropout deterministic for both forward calls: 1 normal forward call and 1 forward call in backward to recalculate activations. """ # randomize seeds # use cuda generator if available if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0: # GPU device_idx = torch.cuda.current_device() self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed() else: # CPU self.feed_forward_seed = int(torch.seed() % sys.maxsize) torch.manual_seed(self.feed_forward_seed) def forward( self, prev_attn_output, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False, ): with torch.no_grad(): # every forward pass we sample a different seed # for dropout and save for forward fn in backward pass # to have correct dropout self._init_attention_seed() attn_outputs = self.attention( hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_attentions=output_attentions, ) attn_output = attn_outputs.hidden_states # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0) # Y_1 = X_1 + f(X_2) attn_output = prev_attn_output + attn_output # free memory del prev_attn_output # every forward pass we sample a different seed # for dropout and save seed for forward fn in backward # to have correct dropout self._init_feed_forward_seed() # Y_2 = X_2 + g(Y_1) hidden_states = hidden_states + self.feed_forward(attn_output) return ReformerOutput( attn_output=attn_output, hidden_states=hidden_states, attention_probs=attn_outputs.attention_probs, buckets=attn_outputs.buckets, ) def backward_pass( self, next_attn_output, hidden_states, grad_attn_output, grad_hidden_states, attention_mask=None, head_mask=None, buckets=None, ): # Implements the backward pass for reversible ResNets. # A good blog post on how this works can be found here: # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0) # This code is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py with torch.enable_grad(): next_attn_output.requires_grad = True # set seed to have correct dropout torch.manual_seed(self.feed_forward_seed) # g(Y_1) res_hidden_states = self.feed_forward(next_attn_output) res_hidden_states.backward(grad_hidden_states, retain_graph=True) with torch.no_grad(): # X_2 = Y_2 - g(Y_1) hidden_states = hidden_states - res_hidden_states del res_hidden_states grad_attn_output = grad_attn_output + next_attn_output.grad next_attn_output.grad = None with torch.enable_grad(): hidden_states.requires_grad = True # set seed to have correct dropout torch.manual_seed(self.attention_seed) # f(X_2) # use cached buckets for backprob if buckets not None for LSHSelfAttention output = self.attention( hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, buckets=buckets, ).hidden_states output.backward(grad_attn_output, retain_graph=True) with torch.no_grad(): # X_1 = Y_1 - f(X_2) attn_output = next_attn_output - output del output, next_attn_output grad_hidden_states = grad_hidden_states + hidden_states.grad hidden_states.grad = None hidden_states = hidden_states.detach() return ReformerBackwardOutput( attn_output=attn_output, hidden_states=hidden_states, grad_attn_output=grad_attn_output, grad_hidden_states=grad_hidden_states, ) class _ReversibleFunction(Function): """ To prevent PyTorch from performing the usual backpropagation, a customized backward function is implemented here. This way it is made sure that no memory expensive activations are saved during the forward pass. This function is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py """ @staticmethod def forward( ctx, hidden_states, layers, attention_mask, head_mask, num_hashes, all_hidden_states, all_attentions, past_buckets_states, use_cache, orig_sequence_length, output_hidden_states, output_attentions, ): all_buckets = () # split duplicated tensor hidden_states, attn_output = torch.chunk(hidden_states, 2, dim=-1) for layer_id, (layer, layer_head_mask) in enumerate(zip(layers, head_mask)): if output_hidden_states is True: all_hidden_states.append(hidden_states) layer_outputs = layer( prev_attn_output=attn_output, hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_attentions=output_attentions, ) attn_output = layer_outputs.attn_output hidden_states = layer_outputs.hidden_states all_buckets = all_buckets + (layer_outputs.buckets,) if output_attentions: all_attentions.append(layer_outputs.attention_probs) # Add last layer if output_hidden_states is True: all_hidden_states.append(hidden_states) # attach params to ctx for backward ctx.save_for_backward(attn_output.detach(), hidden_states.detach()) ctx.layers = layers ctx.all_buckets = all_buckets ctx.head_mask = head_mask ctx.attention_mask = attention_mask # Concatenate 2 RevNet outputs return torch.cat([attn_output, hidden_states], dim=-1) @staticmethod def backward(ctx, grad_hidden_states): grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1) # retrieve params from ctx for backward attn_output, hidden_states = ctx.saved_tensors # create tuple output = ReformerBackwardOutput( attn_output=attn_output, hidden_states=hidden_states, grad_attn_output=grad_attn_output, grad_hidden_states=grad_hidden_states, ) # free memory del grad_attn_output, grad_hidden_states, attn_output, hidden_states layers = ctx.layers all_buckets = ctx.all_buckets head_mask = ctx.head_mask attention_mask = ctx.attention_mask for idx, layer in enumerate(layers[::-1]): # pop last buckets from stack buckets = all_buckets[-1] all_buckets = all_buckets[:-1] # backprop output = layer.backward_pass( next_attn_output=output.attn_output, hidden_states=output.hidden_states, grad_attn_output=output.grad_attn_output, grad_hidden_states=output.grad_hidden_states, head_mask=head_mask[len(layers) - idx - 1], attention_mask=attention_mask, buckets=buckets, ) assert all_buckets == (), "buckets have to be empty after backpropagation" grad_hidden_states = torch.cat([output.grad_attn_output, output.grad_hidden_states], dim=-1) # num of return vars has to match num of forward() args # return gradient for hidden_states arg and None for other args return grad_hidden_states, None, None, None, None, None, None, None, None, None, None, None class ReformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob self.layers = nn.ModuleList([ReformerLayer(config, i) for i in range(config.num_hidden_layers)]) # Reformer is using Rev Nets, thus last layer outputs are concatenated and # Layer Norm is done over 2 * hidden_size self.layer_norm = nn.LayerNorm(2 * config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_hidden_states=False, output_attentions=False, ): # hidden_states and attention lists to be filled if wished all_hidden_states = [] all_attentions = [] # init cached hidden states if necessary if past_buckets_states is None: past_buckets_states = [((None), (None)) for i in range(len(self.layers))] # concat same tensor for reversible ResNet hidden_states = torch.cat([hidden_states, hidden_states], dim=-1) hidden_states = _ReversibleFunction.apply( hidden_states, self.layers, attention_mask, head_mask, num_hashes, all_hidden_states, all_attentions, past_buckets_states, use_cache, orig_sequence_length, output_hidden_states, output_attentions, ) # Apply layer norm to concatenated hidden states hidden_states = self.layer_norm(hidden_states) # Apply dropout hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return ReformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states, all_attentions=all_attentions, past_buckets_states=past_buckets_states, ) class ReformerOnlyLMHead(nn.Module): def __init__(self, config): super().__init__() # Reformer is using Rev Nets, thus last layer outputs are concatenated and # Layer Norm is done over 2 * hidden_size self.seq_len_dim = 1 self.chunk_size_lm_head = config.chunk_size_lm_head self.decoder = nn.Linear(2 * config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states class ReformerPreTrainedModel(PreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ReformerConfig base_model_prefix = "reformer" @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "input_ids": input_ids, "attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, AxialPositionEmbeddings): for weight in module.weights: torch.nn.init.normal_(weight, std=self.config.axial_norm_std) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @dataclass class ReformerModelOutput(ModelOutput): """ Output type of :class:`~transformers.ReformerModel`. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict`` corresponds to ``sequence_length``. past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): List of :obj:`Tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with the first element being the previous `buckets` of shape :obj:`(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous `hidden_states` of shape :obj:`(batch_size, sequence_length, hidden_size)`). Contains precomputed buckets and hidden-states that can be used (see ``past_buckets_states`` input) to speed up sequential decoding. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class ReformerModelWithLMHeadOutput(ModelOutput): """ Output type of :class:`~transformers.ReformerModelWithLMHead`. Args: loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided) Language modeling loss (for next-token prediction). logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict`` corresponds to ``sequence_length``. past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): List of :obj:`Tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with the first element being the previous `buckets` of shape :obj:`(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous `hidden_states` of shape :obj:`(batch_size, sequence_length, hidden_size)`). Contains precomputed buckets and hidden-states that can be used (see ``past_buckets_states`` input) to speed up sequential decoding. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): TTuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None REFORMER_START_DOCSTRING = r""" Reformer was proposed in `Reformer: The Efficient Transformer <https://arxiv.org/abs/2001.0445>`__ by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya. This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.ReformerConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ REFORMER_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length. Indices can be obtained using :class:`~transformers.ReformerTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`__ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. num_hashes (:obj:`int`, `optional`): The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in :obj:`config.num_hashes`. For more information, see :obj:`num_hashes` in :class:`~transformers.ReformerConfig`. past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`): List of :obj:`Tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with the first element being the previous `buckets` of shape :obj:`(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous `hidden_states` of shape :obj:`(batch_size, sequence_length, hidden_size)`). Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed up sequential decoding. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare Reformer Model transformer outputting raw hidden-states" "without any specific head on top.", REFORMER_START_DOCSTRING, ) class ReformerModel(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config assert ( self.config.num_hidden_layers > 0 ), "`config.attn_layers` is empty. Select at least one attn layer form ['lsh', 'local']" self.embeddings = ReformerEmbeddings(config) self.encoder = ReformerEncoder(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/reformer-crime-and-punishment", output_type=ReformerModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, num_hashes=None, past_buckets_states=None, use_cache=None, output_hidden_states=None, output_attentions=None, return_dict=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() # noqa: F841 device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] # noqa: F841 device = inputs_embeds.device else: raise ValueError("You have to specify either input_ids or inputs_embeds") assert ( len(input_shape) == 2 ), "`input_ids` have be of shape `[batch_size, sequence_length]`, but got shape: {}".format(input_shape) if past_buckets_states is not None: assert not self.training, "`past_buckets_states` can only be used for inference, not for training`." # prepare head mask head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers, is_attention_chunked=True) # original sequence length for padding orig_sequence_length = input_shape[-1] # if needs padding least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config) min_chunk_length = _get_min_chunk_len(self.config) must_pad_to_match_chunk_length = ( input_shape[-1] % least_common_mult_chunk_length != 0 and input_shape[-1] > min_chunk_length and past_buckets_states is None ) if must_pad_to_match_chunk_length: padding_length = least_common_mult_chunk_length - input_shape[-1] % least_common_mult_chunk_length if self.training is True: raise ValueError( "If training, sequence Length {} has to be a multiple of least common multiple chunk_length {}. Please consider padding the input to a length of {}.".format( input_shape[-1], least_common_mult_chunk_length, input_shape[-1] + padding_length ) ) # pad input input_ids, inputs_embeds, attention_mask, position_ids, input_shape = self._pad_to_mult_of_chunk_length( input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, input_shape=input_shape, padding_length=padding_length, padded_seq_length=least_common_mult_chunk_length, device=device, ) # start index for postion encoding depends on incremental decoding if past_buckets_states is not None: start_idx_pos_encodings = past_buckets_states[0][1].shape[1] else: start_idx_pos_encodings = 0 embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, start_idx_pos_encodings=start_idx_pos_encodings, ) encoder_outputs = self.encoder( hidden_states=embedding_output, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_hidden_states=output_hidden_states, output_attentions=output_attentions, ) sequence_output = encoder_outputs.hidden_states # if padding was applied if must_pad_to_match_chunk_length: sequence_output = sequence_output[:, :orig_sequence_length] past_buckets_states = encoder_outputs.past_buckets_states if use_cache else None hidden_states = encoder_outputs.all_hidden_states if output_hidden_states else None attentions = encoder_outputs.all_attentions if output_attentions else None if not return_dict: return tuple(v for v in [sequence_output, past_buckets_states, hidden_states, attentions] if v is not None) return ReformerModelOutput( last_hidden_state=sequence_output, past_buckets_states=past_buckets_states, hidden_states=hidden_states, attentions=attentions, ) def _pad_to_mult_of_chunk_length( self, input_ids, inputs_embeds=None, attention_mask=None, position_ids=None, input_shape=None, padding_length=None, padded_seq_length=None, device=None, ): logger.info( "Input ids are automatically padded from {} to {} to be a multiple of `config.chunk_length`: {}".format( input_shape[-1], input_shape[-1] + padding_length, padded_seq_length ) ) padded_input_ids = torch.full( (input_shape[0], padding_length), self.config.pad_token_id, device=device, dtype=torch.long, ) # Extend `attention_mask` if attention_mask is not None: pad_attention_mask = torch.zeros(input_shape[0], padding_length, device=device, dtype=attention_mask.dtype) attention_mask = torch.cat([attention_mask, pad_attention_mask], dim=-1) else: attention_mask = torch.cat( [ torch.ones(input_shape, device=device, dtype=torch.uint8), torch.zeros((input_shape[0], padding_length), device=device, dtype=torch.uint8), ], dim=-1, ) # Extend `input_ids` with padding to match least common multiple chunk_length if input_ids is not None: input_ids = torch.cat([input_ids, padded_input_ids], dim=-1) input_shape = input_ids.size() # Pad position ids if given if position_ids is not None: padded_position_ids = torch.arange(input_shape[-1], padded_seq_length, dtype=torch.long, device=device) padded_position_ids = position_ids.unsqueeze(0).expand(input_shape[0], padding_length) position_ids = torch.cat([position_ids, padded_position_ids], dim=-1) # Extend `inputs_embeds` with padding to match least common multiple chunk_length if inputs_embeds is not None: padded_inputs_embeds = self.embeddings(padded_input_ids, position_ids) inputs_embeds = torch.cat([inputs_embeds, padded_inputs_embeds], dim=-2) input_shape = inputs_embeds.size() return input_ids, inputs_embeds, attention_mask, position_ids, input_shape @add_start_docstrings("""Reformer Model with a `language modeling` head on top. """, REFORMER_START_DOCSTRING) class ReformerModelWithLMHead(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) assert config.is_decoder, "If you want to use `ReformerModelWithLMHead` make sure that `is_decoder=True`." assert ( "local" not in self.config.attn_layers or config.local_num_chunks_after == 0 ), f"If causal mask is enabled, make sure that `config.local_num_chunks_after` is set to 0 and not {config.local_num_chunks_after}." assert ( "lsh" not in self.config.attn_layers or config.lsh_num_chunks_after == 0 ), f"If causal mask is enabled, make sure that `config.lsh_num_chunks_after` is set to 1 and not {config.lsh_num_chunks_after}." self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) self.init_weights() def get_output_embeddings(self): return self.lm_head.decoder @add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/reformer-crime-and-punishment", output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, position_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, num_hashes=None, past_buckets_states=None, use_cache=None, output_hidden_states=None, output_attentions=None, return_dict=None, labels=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ..., config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict reformer_outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = reformer_outputs[0] logits = self.lm_head(sequence_output) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1)) if not return_dict: output = (logits,) + reformer_outputs[1:] return ((loss,) + output) if loss is not None else output return ReformerModelWithLMHeadOutput( loss=loss, logits=logits, past_buckets_states=reformer_outputs.past_buckets_states, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, past, **kwargs): # only last token for inputs_ids if past is defined in kwargs if past is not None: input_ids = input_ids[:, -1:] inputs_dict = { "input_ids": input_ids, "past_buckets_states": past, "use_cache": kwargs["use_cache"], } if "num_hashes" in kwargs: inputs_dict["num_hashes"] = kwargs["num_hashes"] return inputs_dict def _reorder_cache(self, past, beam_idx): reord_past_buckets_states = [] for layer_past in past: # buckets if layer_past[0] is not None: reord_buckets = layer_past[0].index_select(0, beam_idx) else: reord_buckets = None # hidden states reord_hidden_states = layer_past[1].index_select(0, beam_idx) reord_past_buckets_states.append((reord_buckets, reord_hidden_states)) return reord_past_buckets_states @add_start_docstrings("""Reformer Model with a `language modeling` head on top. """, REFORMER_START_DOCSTRING) class ReformerForMaskedLM(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) assert ( not config.is_decoder ), "If you want to use `ReformerForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention." self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) self.init_weights() def get_output_embeddings(self): return self.lm_head.decoder @add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/reformer-crime-and-punishment", output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, position_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, num_hashes=None, labels=None, output_hidden_states=None, output_attentions=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict reformer_outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, use_cache=False, # no causal mask output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = reformer_outputs[0] logits = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + reformer_outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=logits, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, ) @add_start_docstrings( """Reformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, REFORMER_START_DOCSTRING, ) class ReformerForSequenceClassification(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.reformer = ReformerModel(config) self.classifier = ReformerClassificationHead(config) if config.is_decoder is True: logger.warning("You might want to disable causal masking for sequence classification") self.init_weights() @add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/reformer-crime-and-punishment", output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, position_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, num_hashes=None, labels=None, output_hidden_states=None, output_attentions=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class ReformerClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(2 * config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, hidden_states, **kwargs): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states @add_start_docstrings( """Reformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA ( a linear layer on top of hidden-states output to compute `span start logits` and `span end logits`. """, REFORMER_START_DOCSTRING, ) class ReformerForQuestionAnswering(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.reformer = ReformerModel(config) # 2 * config.hidden_size because we use reversible residual layers self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/reformer-crime-and-punishment", output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, position_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, num_hashes=None, start_positions=None, end_positions=None, output_hidden_states=None, output_attentions=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict reformer_outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, use_cache=False, # no causal mask output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = reformer_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + reformer_outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, )
109,823
41.468677
251
py
SLT-FAI
SLT-FAI-main/transformers/modeling_tf_xlm.py
# coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 XLM model. """ import itertools import warnings from dataclasses import dataclass from typing import Optional, Tuple import numpy as np import tensorflow as tf from .activations_tf import get_tf_activation from .configuration_xlm import XLMConfig from .file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable, ) from .modeling_tf_outputs import ( TFBaseModelOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from .modeling_tf_utils import ( TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFSharedEmbeddings, TFTokenClassificationLoss, get_initializer, keras_serializable, shape_list, ) from .tokenization_utils import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "XLMConfig" _TOKENIZER_FOR_DOC = "XLMTokenizer" TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "xlm-mlm-en-2048", "xlm-mlm-ende-1024", "xlm-mlm-enfr-1024", "xlm-mlm-enro-1024", "xlm-mlm-tlm-xnli15-1024", "xlm-mlm-xnli15-1024", "xlm-clm-enfr-1024", "xlm-clm-ende-1024", "xlm-mlm-17-1280", "xlm-mlm-100-1280", # See all XLM models at https://huggingface.co/models?filter=xlm ] def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out[:, 0::2] = tf.constant(np.sin(position_enc[:, 0::2])) out[:, 1::2] = tf.constant(np.cos(position_enc[:, 1::2])) def get_masks(slen, lengths, causal, padding_mask=None, dtype=tf.float32): """ Generate hidden states mask, and optionally an attention mask. """ bs = shape_list(lengths)[0] if padding_mask is not None: mask = padding_mask else: # assert lengths.max().item() <= slen alen = tf.range(slen) mask = tf.math.less(alen, lengths[:, tf.newaxis]) # attention mask is the same as mask, or triangular inferior attention (causal) if causal: attn_mask = tf.less_equal( tf.tile(alen[tf.newaxis, tf.newaxis, :], (bs, slen, 1)), alen[tf.newaxis, :, tf.newaxis] ) else: attn_mask = mask # sanity check # assert shape_list(mask) == [bs, slen] tf.debugging.assert_equal(shape_list(mask), [bs, slen]) assert causal is False or shape_list(attn_mask) == [bs, slen, slen] mask = tf.cast(mask, dtype=dtype) attn_mask = tf.cast(attn_mask, dtype=dtype) return mask, attn_mask class TFXLMMultiHeadAttention(tf.keras.layers.Layer): NEW_ID = itertools.count() def __init__(self, n_heads, dim, config, **kwargs): super().__init__(**kwargs) self.layer_id = next(TFXLMMultiHeadAttention.NEW_ID) self.dim = dim self.n_heads = n_heads self.output_attentions = config.output_attentions assert self.dim % self.n_heads == 0 self.q_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="q_lin") self.k_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="k_lin") self.v_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="v_lin") self.out_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="out_lin") self.dropout = tf.keras.layers.Dropout(config.attention_dropout) self.pruned_heads = set() def prune_heads(self, heads): raise NotImplementedError def call(self, input, mask, kv, cache, head_mask, output_attentions, training=False): """ Self-attention (if kv is None) or attention over source sentence (provided by kv). """ # Input is (bs, qlen, dim) # Mask is (bs, klen) (non-causal) or (bs, klen, klen) bs, qlen, dim = shape_list(input) if kv is None: klen = qlen if cache is None else cache["slen"] + qlen else: klen = shape_list(kv)[1] # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim) dim_per_head = tf.math.divide(self.dim, self.n_heads) dim_per_head = tf.cast(dim_per_head, dtype=tf.int32) mask_reshape = (bs, 1, qlen, klen) if len(shape_list(mask)) == 3 else (bs, 1, 1, klen) def shape(x): """ projection """ return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3)) def unshape(x): """ compute context """ return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head)) q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head) if kv is None: k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head) v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head) elif cache is None or self.layer_id not in cache: k = v = kv k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head) v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head) if cache is not None: if self.layer_id in cache: if kv is None: k_, v_ = cache[self.layer_id] k = tf.concat([k_, k], axis=2) # (bs, n_heads, klen, dim_per_head) v = tf.concat([v_, v], axis=2) # (bs, n_heads, klen, dim_per_head) else: k, v = cache[self.layer_id] cache[self.layer_id] = (k, v) q = tf.cast(q, dtype=tf.float32) q = tf.multiply(q, tf.math.rsqrt(tf.cast(dim_per_head, dtype=tf.float32))) # (bs, n_heads, qlen, dim_per_head) k = tf.cast(k, dtype=q.dtype) scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, qlen, klen) mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen) # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen) mask = tf.cast(mask, dtype=scores.dtype) scores = scores - 1e30 * (1.0 - mask) weights = tf.nn.softmax(scores, axis=-1) # (bs, n_heads, qlen, klen) weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen) # Mask heads if we want to if head_mask is not None: weights = weights * head_mask context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head) context = unshape(context) # (bs, qlen, dim) outputs = (self.out_lin(context),) if output_attentions: outputs = outputs + (weights,) return outputs class TFXLMTransformerFFN(tf.keras.layers.Layer): def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs): super().__init__(**kwargs) self.lin1 = tf.keras.layers.Dense(dim_hidden, kernel_initializer=get_initializer(config.init_std), name="lin1") self.lin2 = tf.keras.layers.Dense(out_dim, kernel_initializer=get_initializer(config.init_std), name="lin2") self.act = get_tf_activation("gelu") if config.gelu_activation else get_tf_activation("relu") self.dropout = tf.keras.layers.Dropout(config.dropout) def call(self, input, training=False): x = self.lin1(input) x = self.act(x) x = self.lin2(x) x = self.dropout(x, training=training) return x @keras_serializable class TFXLMMainLayer(tf.keras.layers.Layer): config_class = XLMConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.return_dict = config.use_return_dict # encoder / decoder, output layer self.is_encoder = config.is_encoder self.is_decoder = not config.is_encoder if self.is_decoder: raise NotImplementedError("Currently XLM can only be used as an encoder") # self.with_output = with_output self.causal = config.causal # dictionary / languages self.n_langs = config.n_langs self.use_lang_emb = config.use_lang_emb self.n_words = config.n_words self.eos_index = config.eos_index self.pad_index = config.pad_index # self.dico = dico # self.id2lang = config.id2lang # self.lang2id = config.lang2id # assert len(self.dico) == self.n_words # assert len(self.id2lang) == len(self.lang2id) == self.n_langs # model parameters self.dim = config.emb_dim # 512 by default self.hidden_dim = self.dim * 4 # 2048 by default self.n_heads = config.n_heads # 8 by default self.n_layers = config.n_layers assert self.dim % self.n_heads == 0, "transformer dim must be a multiple of n_heads" # embeddings self.dropout = tf.keras.layers.Dropout(config.dropout) self.attention_dropout = tf.keras.layers.Dropout(config.attention_dropout) self.position_embeddings = tf.keras.layers.Embedding( config.max_position_embeddings, self.dim, embeddings_initializer=get_initializer(config.embed_init_std), name="position_embeddings", ) if config.sinusoidal_embeddings: raise NotImplementedError # create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight) if config.n_langs > 1 and config.use_lang_emb: self.lang_embeddings = tf.keras.layers.Embedding( self.n_langs, self.dim, embeddings_initializer=get_initializer(config.embed_init_std), name="lang_embeddings", ) self.embeddings = TFSharedEmbeddings( self.n_words, self.dim, initializer_range=config.embed_init_std, name="embeddings" ) # padding_idx=self.pad_index) self.layer_norm_emb = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm_emb") # transformer layers self.attentions = [] self.layer_norm1 = [] self.ffns = [] self.layer_norm2 = [] # if self.is_decoder: # self.layer_norm15 = [] # self.encoder_attn = [] for i in range(self.n_layers): self.attentions.append( TFXLMMultiHeadAttention(self.n_heads, self.dim, config=config, name="attentions_._{}".format(i)) ) self.layer_norm1.append( tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1_._{}".format(i)) ) # if self.is_decoder: # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout)) self.ffns.append( TFXLMTransformerFFN(self.dim, self.hidden_dim, self.dim, config=config, name="ffns_._{}".format(i)) ) self.layer_norm2.append( tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2_._{}".format(i)) ) if hasattr(config, "pruned_heads"): pruned_heads = config.pruned_heads.copy().items() config.pruned_heads = {} for layer, heads in pruned_heads: if self.attentions[int(layer)].n_heads == config.n_heads: self.prune_heads({int(layer): list(map(int, heads))}) def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = value.shape[0] def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError def call( self, inputs, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, lengths=None, cache=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): # removed: src_enc=None, src_len=None if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask langs = inputs[2] if len(inputs) > 2 else langs token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids position_ids = inputs[4] if len(inputs) > 4 else position_ids lengths = inputs[5] if len(inputs) > 5 else lengths cache = inputs[6] if len(inputs) > 6 else cache head_mask = inputs[7] if len(inputs) > 7 else head_mask inputs_embeds = inputs[8] if len(inputs) > 8 else inputs_embeds output_attentions = inputs[9] if len(inputs) > 9 else output_attentions output_hidden_states = inputs[10] if len(inputs) > 10 else output_hidden_states return_dict = inputs[11] if len(inputs) > 11 else return_dict assert len(inputs) <= 12, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) langs = inputs.get("langs", langs) token_type_ids = inputs.get("token_type_ids", token_type_ids) position_ids = inputs.get("position_ids", position_ids) lengths = inputs.get("lengths", lengths) cache = inputs.get("cache", cache) head_mask = inputs.get("head_mask", head_mask) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) assert len(inputs) <= 12, "Too many inputs." else: input_ids = inputs output_attentions = output_attentions if output_attentions is not None else self.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states return_dict = return_dict if return_dict is not None else self.return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: bs, slen = shape_list(input_ids) elif inputs_embeds is not None: bs, slen = shape_list(inputs_embeds)[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if lengths is None: if input_ids is not None: lengths = tf.reduce_sum(tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=tf.int32), axis=1) else: lengths = tf.convert_to_tensor([slen] * bs, tf.int32) # mask = input_ids != self.pad_index # check inputs # assert shape_list(lengths)[0] == bs tf.debugging.assert_equal( shape_list(lengths)[0], bs ), f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched" # assert lengths.max().item() <= slen # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0 # assert (src_enc is None) == (src_len is None) # if src_enc is not None: # assert self.is_decoder # assert src_enc.size(0) == bs # generate masks mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask) # if self.is_decoder and src_enc is not None: # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None] # position_ids if position_ids is None: position_ids = tf.expand_dims(tf.range(slen), axis=0) else: # assert shape_list(position_ids) == [bs, slen] # (slen, bs) tf.debugging.assert_equal( shape_list(position_ids), [bs, slen] ), f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched" # position_ids = position_ids.transpose(0, 1) # langs if langs is not None: # assert shape_list(langs) == [bs, slen] # (slen, bs) tf.debugging.assert_equal( shape_list(langs), [bs, slen] ), f"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched" # langs = langs.transpose(0, 1) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.n_layers # do not recompute cached elements if cache is not None and input_ids is not None: _slen = slen - cache["slen"] input_ids = input_ids[:, -_slen:] position_ids = position_ids[:, -_slen:] if langs is not None: langs = langs[:, -_slen:] mask = mask[:, -_slen:] attn_mask = attn_mask[:, -_slen:] # embeddings if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) tensor = inputs_embeds + self.position_embeddings(position_ids) if langs is not None and self.use_lang_emb and self.n_langs > 1: tensor = tensor + self.lang_embeddings(langs) if token_type_ids is not None: tensor = tensor + self.embeddings(token_type_ids) tensor = self.layer_norm_emb(tensor) tensor = self.dropout(tensor, training=training) tensor = tensor * mask[..., tf.newaxis] # transformer layers hidden_states = () if output_hidden_states else None attentions = () if output_attentions else None for i in range(self.n_layers): if output_hidden_states: hidden_states = hidden_states + (tensor,) # self attention attn_outputs = self.attentions[i]( tensor, attn_mask, None, cache, head_mask[i], output_attentions, training=training ) attn = attn_outputs[0] if output_attentions: attentions = attentions + (attn_outputs[1],) attn = self.dropout(attn, training=training) tensor = tensor + attn tensor = self.layer_norm1[i](tensor) # encoder attention (for decoder only) # if self.is_decoder and src_enc is not None: # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache) # attn = F.dropout(attn, p=self.dropout, training=self.training) # tensor = tensor + attn # tensor = self.layer_norm15[i](tensor) # FFN tensor = tensor + self.ffns[i](tensor) tensor = self.layer_norm2[i](tensor) tensor = tensor * mask[..., tf.newaxis] # Add last hidden state if output_hidden_states: hidden_states = hidden_states + (tensor,) # update cache length if cache is not None: cache["slen"] += tensor.size(1) # move back sequence length to dimension 0 # tensor = tensor.transpose(0, 1) if not return_dict: return tuple(v for v in [tensor, hidden_states, attentions] if v is not None) return TFBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions) class TFXLMPreTrainedModel(TFPreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XLMConfig base_model_prefix = "transformer" @property def dummy_inputs(self): # Sometimes XLM has language embeddings so don't forget to build them as well if needed inputs_list = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]) attns_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]]) if self.config.use_lang_emb and self.config.n_langs > 1: langs_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]]) else: langs_list = None return {"input_ids": inputs_list, "attention_mask": attns_list, "langs": langs_list} # Remove when XLMWithLMHead computes loss like other LM models @dataclass class TFXLMWithLMHeadModelOutput(ModelOutput): """ Base class for :class:`~transformers.TFXLMWithLMHeadModel` outputs. Args: logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None XLM_START_DOCSTRING = r""" This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. note:: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all the tensors in the first argument of the model call function: :obj:`model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: :obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Parameters: config (:class:`~transformers.XLMConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ XLM_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ langs (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`({0})`, `optional`): A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the `language name to language id` mapping is in :obj:`model.config.lang2id` (which is a dictionary strring to int) and the `language id to language name` mapping is in :obj:`model.config.id2lang` (dictionary int to string). See usage examples detailed in the :doc:`multilingual documentation <../multilingual>`. ttoken_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`__ position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`__ lengths (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size,)`, `optional`): Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use `attention_mask` for the same result (see above), kept here for compatbility. Indices selected in ``[0, ..., input_ids.size(-1)]``. cache (:obj:`Dict[str, tf.Tensor]`, `optional`): Dictionary string to ``torch.FloatTensor`` that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see :obj:`cache` output below). Can be used to speed up sequential decoding. The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states. head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare XLM Model transformer outputing raw hidden-states without any specific head on top.", XLM_START_DOCSTRING, ) class TFXLMModel(TFXLMPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLMMainLayer(config, name="transformer") @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlm-mlm-en-2048", output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) return outputs class TFXLMPredLayer(tf.keras.layers.Layer): """ Prediction layer (cross_entropy or adaptive_softmax). """ def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.asm = config.asm self.n_words = config.n_words self.pad_index = config.pad_index if config.asm is False: self.input_embeddings = input_embeddings else: raise NotImplementedError # self.proj = nn.AdaptiveLogSoftmaxWithLoss( # in_features=dim, # n_classes=config.n_words, # cutoffs=config.asm_cutoffs, # div_value=config.asm_div_value, # head_bias=True, # default is False # ) def build(self, input_shape): # The output weights are the same as the input embeddings, but there is an output-only bias for each token. self.bias = self.add_weight(shape=(self.n_words,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def call(self, hidden_states): hidden_states = self.input_embeddings(hidden_states, mode="linear") hidden_states = hidden_states + self.bias return hidden_states @add_start_docstrings( """The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XLM_START_DOCSTRING, ) class TFXLMWithLMHeadModel(TFXLMPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLMMainLayer(config, name="transformer") self.pred_layer = TFXLMPredLayer(config, self.transformer.embeddings, name="pred_layer_._proj") def get_output_embeddings(self): return self.pred_layer.input_embeddings def prepare_inputs_for_generation(self, inputs, **kwargs): mask_token_id = self.config.mask_token_id lang_id = self.config.lang_id effective_batch_size = inputs.shape[0] mask_token = tf.ones((effective_batch_size, 1), dtype=tf.int32) * mask_token_id inputs = tf.concat([inputs, mask_token], axis=1) if lang_id is not None: langs = tf.ones_like(inputs) * lang_id else: langs = None return {"inputs": inputs, "langs": langs} @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlm-mlm-en-2048", output_type=TFXLMWithLMHeadModelOutput, config_class=_CONFIG_FOR_DOC, ) def call(self, inputs, **kwargs): return_dict = kwargs.get("return_dict") return_dict = return_dict if return_dict is not None else self.transformer.return_dict transformer_outputs = self.transformer(inputs, **kwargs) output = transformer_outputs[0] outputs = self.pred_layer(output) if not return_dict: return (outputs,) + transformer_outputs[1:] return TFXLMWithLMHeadModelOutput( logits=outputs, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions ) @add_start_docstrings( """XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLM_START_DOCSTRING, ) class TFXLMForSequenceClassification(TFXLMPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXLMMainLayer(config, name="transformer") self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary") @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlm-mlm-en-2048", output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, lengths=None, cache=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.transformer.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[12] if len(inputs) > 12 else labels if len(inputs) > 12: inputs = inputs[:12] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) transformer_outputs = self.transformer( inputs, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) output = transformer_outputs[0] logits = self.sequence_summary(output) loss = None if labels is None else self.compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """XLM Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, XLM_START_DOCSTRING, ) class TFXLMForMultipleChoice(TFXLMPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLMMainLayer(config, name="transformer") self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary") self.logits_proj = tf.keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj" ) @property def dummy_inputs(self): """Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ return { "input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS), "langs": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS), } @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlm-mlm-en-2048", output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, lengths=None, cache=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask langs = inputs[2] if len(inputs) > 2 else langs token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids position_ids = inputs[4] if len(inputs) > 4 else position_ids lengths = inputs[5] if len(inputs) > 5 else lengths cache = inputs[6] if len(inputs) > 6 else cache head_mask = inputs[7] if len(inputs) > 7 else head_mask inputs_embeds = inputs[8] if len(inputs) > 8 else inputs_embeds output_attentions = inputs[9] if len(inputs) > 9 else output_attentions output_hidden_states = inputs[10] if len(inputs) > 10 else output_hidden_states return_dict = inputs[11] if len(inputs) > 11 else return_dict labels = inputs[12] if len(inputs) > 12 else labels assert len(inputs) <= 13, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) langs = inputs.get("langs", langs) token_type_ids = inputs.get("token_type_ids", token_type_ids) position_ids = inputs.get("position_ids", position_ids) lengths = inputs.get("lengths", lengths) cache = inputs.get("cache", cache) head_mask = inputs.get("head_mask", head_mask) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) labels = inputs.get("labels", labels) assert len(inputs) <= 13, "Too many inputs." else: input_ids = inputs return_dict = return_dict if return_dict is not None else self.transformer.return_dict if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_langs = tf.reshape(langs, (-1, seq_length)) if langs is not None else None flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) if lengths is not None: warnings.warn( "The `lengths` parameter cannot be used with the XLM multiple choice models. Please use the " "attention mask instead.", FutureWarning, ) lengths = None transformer_outputs = self.transformer( flat_input_ids, flat_attention_mask, flat_langs, flat_token_type_ids, flat_position_ids, lengths, cache, head_mask, flat_inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) output = transformer_outputs[0] logits = self.sequence_summary(output) logits = self.logits_proj(logits) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """XLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XLM_START_DOCSTRING, ) class TFXLMForTokenClassification(TFXLMPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXLMMainLayer(config, name="transformer") self.dropout = tf.keras.layers.Dropout(config.dropout) self.classifier = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.init_std), name="classifier" ) @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlm-mlm-en-2048", output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, lengths=None, cache=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.transformer.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[12] if len(inputs) > 12 else labels if len(inputs) > 12: inputs = inputs[:12] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) transformer_outputs = self.transformer( inputs, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = transformer_outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLM_START_DOCSTRING, ) class TFXLMForQuestionAnsweringSimple(TFXLMPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLMMainLayer(config, name="transformer") self.qa_outputs = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.init_std), name="qa_outputs" ) @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlm-mlm-en-2048", output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, lengths=None, cache=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, start_positions=None, end_positions=None, training=False, ): r""" start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.transformer.return_dict if isinstance(inputs, (tuple, list)): start_positions = inputs[12] if len(inputs) > 12 else start_positions end_positions = inputs[13] if len(inputs) > 13 else end_positions if len(inputs) > 12: inputs = inputs[:12] elif isinstance(inputs, (dict, BatchEncoding)): start_positions = inputs.pop("start_positions", start_positions) end_positions = inputs.pop("end_positions", start_positions) transformer_outputs = self.transformer( inputs, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = transformer_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
51,920
41.523342
160
py
SLT-FAI
SLT-FAI-main/transformers/configuration_blenderbot.py
#!/usr/bin/env python3 # coding=utf-8 # Copyright (c) Facebook, Inc. and Huggingface, 2020 # # This source code is licensed under the MIT license found in the; # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # LICENSE file in the root directory of this source tree. """BlenderbotConfig has the same signature as BartConfig. We only rewrite the signature in order to document blenderbot-90M defaults.""" from .configuration_bart import BartConfig BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/blenderbot-3B": "https://cdn.huggingface.co/facebook/blenderbot-3B/config.json", "facebook/blenderbot-90M": "https://cdn.huggingface.co/facebook/blenderbot-90M/config.json", } class BlenderbotConfig(BartConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.BlenderbotForConditionalGeneration`. It inherits from :class:`~transformers.BartConfig` and has the same signature with different defaults. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 54944): Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.BlenderbotForConditionalGeneration`. d_model (:obj:`int`, `optional`, defaults to 512): Dimensionality of the layers and the pooler layer. encoder_layers (:obj:`int`, `optional`, defaults to 8): Number of encoder layers, 6 are used for the `blenderbot-90M` model. decoder_layers (:obj:`int`, `optional`, defaults to 8): Number of decoder layers, 6 are used for the `blenderbot-90M` model. encoder_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (:obj:`int`, `optional`, defaults to 2048): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (:obj:`int`, `optional`, defaults to 2048): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"` and :obj:`"gelu_new"` are supported. dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. add_bias_logits (:obj:`bool`, `optional`, defaults to :obj:`False`): This should be completed, specific to marian. normalize_before (:obj:`bool`, `optional`, defaults to :obj:`False`): Call layernorm before attention ops. normalize_embedding (:obj:`bool`, `optional`, defaults to :obj:`True`): Call layernorm after embeddings. static_position_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`): Don't learn positional embeddings, use sinusoidal. add_final_layer_norm (:obj:`bool`, `optional`, defaults to :obj:`False`): Why not add another layernorm? do_blenderbot_90_layernorm (:obj:`bool`, `optional`, defaults to :obj:`True`): Blenderbot-90m checkpoint uses `layernorm_embedding` one line earlier in the decoder. scale_embedding (:obj:`bool`, `optional`, defaults to :obj:`False`): Scale embeddings by diving by sqrt(d_model). eos_token_id (:obj:`int`, `optional`, defaults to 2) End of stream token id. pad_token_id (:obj:`int`, `optional`, defaults to 1) Padding token id. bos_token_id (:obj:`int`, `optional`, defaults to 0) Beginning of stream token id. encoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): The LayerDrop probability for the encoder. See the `LayerDrop paper <see https://arxiv.org/abs/1909.11556>`__ for more details. decoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): The LayerDrop probability for the decoder. See the `LayerDrop paper <see https://arxiv.org/abs/1909.11556>`__ for more details. extra_pos_embeddings: (:obj:`int`, `optional`, defaults to 2): How many extra learned positional embeddings to use. Should be set to :obj:`pad_token_id+1`. is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether this is an encoder/decoder model. force_bos_token_to_be_generated (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force BOS token to be generated at step 1 (after ``decoder_start_token_id``), """ model_type = "blenderbot" def __init__( self, activation_dropout=0.0, extra_pos_embeddings=0, activation_function="gelu", vocab_size=54944, d_model=512, encoder_ffn_dim=2048, encoder_layers=8, encoder_attention_heads=16, decoder_ffn_dim=2048, decoder_layers=8, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, attention_dropout=0.0, dropout=0.1, max_position_embeddings=512, classifier_dropout=0.0, is_encoder_decoder=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, normalize_before=False, add_final_layer_norm=False, do_blenderbot_90_layernorm=True, scale_embedding=False, normalize_embedding=True, static_position_embeddings=False, add_bias_logits=False, force_bos_token_to_be_generated=False, **common_kwargs ): r""" Examples:: >>> from transformers import BlenderbotConfig >>> config = BlenderbotConfig.from_pretrained('facebook/blenderbot-90M') """ if "hidden_size" in common_kwargs: raise ValueError("hidden size is called d_model") super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, vocab_size=vocab_size, d_model=d_model, encoder_ffn_dim=encoder_ffn_dim, encoder_layers=encoder_layers, encoder_layerdrop=encoder_layerdrop, encoder_attention_heads=encoder_attention_heads, decoder_layerdrop=decoder_layerdrop, decoder_ffn_dim=decoder_ffn_dim, decoder_layers=decoder_layers, normalize_before=normalize_before, normalize_embedding=normalize_embedding, static_position_embeddings=static_position_embeddings, add_bias_logits=add_bias_logits, force_bos_token_to_be_generated=force_bos_token_to_be_generated, do_blenderbot_90_layernorm=do_blenderbot_90_layernorm, add_final_layer_norm=add_final_layer_norm, scale_embedding=scale_embedding, attention_dropout=attention_dropout, dropout=dropout, classifier_dropout=classifier_dropout, activation_dropout=activation_dropout, max_position_embeddings=max_position_embeddings, extra_pos_embeddings=extra_pos_embeddings, activation_function=activation_function, decoder_attention_heads=decoder_attention_heads, **common_kwargs, )
9,322
51.083799
136
py
SLT-FAI
SLT-FAI-main/transformers/configuration_bart.py
# coding=utf-8 # Copyright 2020 The Fairseq Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BART configuration """ from .configuration_utils import PretrainedConfig from .utils import logging logger = logging.get_logger(__name__) BART_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/bart-base": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-base/config.json", "facebook/bart-large": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large/config.json", "facebook/bart-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-mnli/config.json", "facebook/bart-large-cnn": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-cnn/config.json", "facebook/bart-large-xsum": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-xsum/config.json", "facebook/mbart-large-en-ro": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/mbart-large-en-ro/config.json", "yjernite/bart_eli5": "https://s3.amazonaws.com/models.huggingface.co/bert/yjernite/bart_eli5/config.json", } class BartConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.BartModel`. It is used to instantiate a BART model according to the specified arguments, defining the model architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 50265): Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.BartModel`. d_model (:obj:`int`, `optional`, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (:obj:`int`, `optional`, defaults to 12): Number of encoder layers, 6 are used for the `bart-base` model. decoder_layers (:obj:`int`, `optional`, defaults to 12): Number of decoder layers, 6 are used for the `bart-base` model. encoder_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"` and :obj:`"gelu_new"` are supported. dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (:obj:`int`, `optional`, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. add_bias_logits (:obj:`bool`, `optional`, defaults to :obj:`False`): This should be completed, specific to marian. normalize_before (:obj:`bool`, `optional`, defaults to :obj:`False`): Call layernorm before attention ops. normalize_embedding (:obj:`bool`, `optional`, defaults to :obj:`True`): Call layernorm after embeddings. static_position_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`): Don't learn positional embeddings, use sinusoidal. add_final_layer_norm (:obj:`bool`, `optional`, defaults to :obj:`False`): Why not add another layernorm? do_blenderbot_90_layernorm (:obj:`bool`, `optional`, defaults to :obj:`False`): Blenderbot-90m checkpoint uses `layernorm_embedding` one line earlier in the decoder. scale_embedding (:obj:`bool`, `optional`, defaults to :obj:`False`): Scale embeddings by diving by sqrt(d_model). eos_token_id (:obj:`int`, `optional`, defaults to 2) End of stream token id. pad_token_id (:obj:`int`, `optional`, defaults to 1) Padding token id. bos_token_id (:obj:`int`, `optional`, defaults to 0) Beginning of stream token id. encoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): The LayerDrop probability for the encoder. See the `LayerDrop paper <see https://arxiv.org/abs/1909.11556>`__ for more details. decoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): The LayerDrop probability for the decoder. See the `LayerDrop paper <see https://arxiv.org/abs/1909.11556>`__ for more details. extra_pos_embeddings: (:obj:`int`, `optional`, defaults to 2): How many extra learned positional embeddings to use. Should be set to :obj:`pad_token_id+1`. num_labels: (:obj:`int`, `optional`, defaults to 3): The number of labels to use in :class:`~transformers.BartForSequenceClassification`. is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether this is an encoder/decoder model. force_bos_token_to_be_generated (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force BOS token to be generated at step 1 (after ``decoder_start_token_id``), only :obj:`True` for `bart-large-cnn`. """ model_type = "bart" def __init__( self, activation_dropout=0.0, extra_pos_embeddings=2, activation_function="gelu", vocab_size=50265, d_model=1024, encoder_ffn_dim=4096, encoder_layers=12, encoder_attention_heads=16, decoder_ffn_dim=4096, decoder_layers=12, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, attention_dropout=0.0, dropout=0.1, max_position_embeddings=1024, init_std=0.02, classifier_dropout=0.0, num_labels=3, is_encoder_decoder=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, normalize_before=False, add_final_layer_norm=False, do_blenderbot_90_layernorm=False, scale_embedding=False, normalize_embedding=True, static_position_embeddings=False, add_bias_logits=False, force_bos_token_to_be_generated=False, **common_kwargs ): r""" :class:`~transformers.BartConfig` is the configuration class for `BartModel`. Examples:: >>> from transformers import BartConfig, BartModel >>> config = BartConfig.from_pretrained('facebook/bart-large') >>> model = BartModel(config) """ if "hidden_size" in common_kwargs: raise ValueError("hidden size is called d_model") super().__init__( num_labels=num_labels, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **common_kwargs, ) self.vocab_size = vocab_size self.d_model = d_model # encoder_embed_dim and decoder_embed_dim self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = self.num_hidden_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std # Normal(0, this parameter) self.activation_function = activation_function # Params introduced for Mbart self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.normalize_embedding = normalize_embedding # True for mbart, False otherwise self.normalize_before = normalize_before # combo of fairseq's encoder_ and decoder_normalize_before self.add_final_layer_norm = add_final_layer_norm # Params introduced for Marian self.add_bias_logits = add_bias_logits self.static_position_embeddings = static_position_embeddings # 3 Types of Dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout # Classifier stuff self.classifier_dropout = classifier_dropout # pos embedding offset self.extra_pos_embeddings = extra_pos_embeddings # bart has a hack that offsets positional embeddings by 2, other models don't do this self.force_bos_token_to_be_generated = force_bos_token_to_be_generated self.do_blenderbot_90_layernorm = do_blenderbot_90_layernorm @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model def is_valid_mbart(self) -> bool: """Is the configuration aligned with the MBART paper.""" if self.normalize_before and self.add_final_layer_norm and self.scale_embedding: return True if self.normalize_before or self.add_final_layer_norm or self.scale_embedding: logger.info("This configuration is a mixture of MBART and BART settings") return False
11,397
49.433628
127
py
SLT-FAI
SLT-FAI-main/transformers/modeling_bart.py
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BART model, ported from the fairseq repo.""" import math import random import warnings from typing import Dict, List, Optional, Tuple import numpy as np import torch import torch.nn.functional as F from torch import Tensor, nn from torch.nn import CrossEntropyLoss from .activations import ACT2FN from .configuration_bart import BartConfig from .file_utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings, ) from .modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, ) from .modeling_utils import PreTrainedModel from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "BartConfig" _TOKENIZER_FOR_DOC = "BartTokenizer" BART_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/bart-base", "facebook/bart-large", "facebook/bart-large-mnli", "facebook/bart-large-cnn", "facebook/bart-large-xsum", "facebook/mbart-large-en-ro", ] # This list is incomplete. See all BART models at https://huggingface.co/models?filter=bart BART_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BartConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BART_GENERATION_EXAMPLE = r""" Summarization example:: >>> from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig >>> # see ``examples/summarization/bart/run_eval.py`` for a longer example >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn') >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt') >>> # Generate Summary >>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True) >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]) """ BART_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Provide for translation and summarization training. By default, the model will create this tensor by shifting the :obj:`input_ids` to the right, following the paper. decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`): Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read :func:`modeling_bart._prepare_decoder_inputs` and modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more information on the default strategy. encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`): Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`: :obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids`` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ def invert_mask(attention_mask): """Turns 1->0, 0->1, False->True, True-> False""" assert attention_mask.dim() == 2 return attention_mask.eq(0) def _prepare_bart_decoder_inputs( config, input_ids, decoder_input_ids=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32 ): """Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if none are provided. This mimics the default behavior in fairseq. To override it pass in masks. Note: this is not called during generation """ pad_token_id = config.pad_token_id if decoder_input_ids is None: decoder_input_ids = shift_tokens_right(input_ids, pad_token_id) bsz, tgt_len = decoder_input_ids.size() if decoder_padding_mask is None: decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id) else: decoder_padding_mask = invert_mask(decoder_padding_mask) if decoder_padding_mask is not None and decoder_padding_mask.shape[1] > 1: # never mask leading token, even if it is pad decoder_padding_mask[:, 0] = decoder_padding_mask[:, 1] tmp = fill_with_neg_inf(torch.zeros(tgt_len, tgt_len)) mask = torch.arange(tmp.size(-1)) tmp.masked_fill_(mask < (mask + 1).view(tmp.size(-1), 1), 0) causal_mask = tmp.to(dtype=causal_mask_dtype, device=decoder_input_ids.device) return decoder_input_ids, decoder_padding_mask, causal_mask class PretrainedBartModel(PreTrainedModel): config_class = BartConfig base_model_prefix = "model" def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, SinusoidalPositionalEmbedding): pass elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs def _make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer # Helper Functions, mostly for making masks def _check_shapes(shape_1, shape2): if shape_1 != shape2: raise AssertionError("shape mismatch: {} != {}".format(shape_1, shape2)) def shift_tokens_right(input_ids, pad_token_id): """Shift input ids one token to the right, and wrap the last non pad token (usually <eos>).""" prev_output_tokens = input_ids.clone() index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1) prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze() prev_output_tokens[:, 1:] = input_ids[:, :-1] return prev_output_tokens def make_padding_mask(input_ids, padding_idx=1): """True for pad tokens""" padding_mask = input_ids.eq(padding_idx) if not padding_mask.any(): padding_mask = None return padding_mask # Helper Modules class EncoderLayer(nn.Module): def __init__(self, config: BartConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout) self.normalize_before = config.normalize_before self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def forward(self, x, encoder_padding_mask, output_attentions=False): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. for t_tgt, t_src is excluded (or masked out), =0 means it is included in attention Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) x, attn_weights = self.self_attn( query=x, key=x, key_padding_mask=encoder_padding_mask, output_attentions=output_attentions ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.final_layer_norm(x) if torch.isinf(x).any() or torch.isnan(x).any(): clamp_value = torch.finfo(x.dtype).max - 1000 x = torch.clamp(x, min=-clamp_value, max=clamp_value) return x, attn_weights class BartEncoder(nn.Module): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a :class:`EncoderLayer`. Args: config: BartConfig """ def __init__(self, config: BartConfig, embed_tokens): super().__init__() self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = embed_tokens.embedding_dim self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.padding_idx = embed_tokens.padding_idx self.max_source_positions = config.max_position_embeddings self.embed_tokens = embed_tokens if config.static_position_embeddings: self.embed_positions = SinusoidalPositionalEmbedding( config.max_position_embeddings, embed_dim, self.padding_idx ) else: self.embed_positions = LearnedPositionalEmbedding( config.max_position_embeddings, embed_dim, self.padding_idx, config.extra_pos_embeddings, ) self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = LayerNorm(embed_dim) if config.normalize_embedding else nn.Identity() # mbart has one extra layer_norm self.layer_norm = LayerNorm(config.d_model) if config.add_final_layer_norm else None def forward( self, input_ids, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=False ): """ Args: input_ids (LongTensor): tokens in the source language of shape `(batch, src_len)` attention_mask (torch.LongTensor): indicating which indices are padding tokens. Returns: BaseModelOutput or Tuple comprised of: - **x** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_states** (tuple(torch.FloatTensor)): all intermediate hidden states of shape `(src_len, batch, embed_dim)`. Only populated if *output_hidden_states:* is True. - **all_attentions** (tuple(torch.FloatTensor)): Attention weights for each layer. During training might not be of length n_layers because of layer dropout. """ # check attention mask and invert if attention_mask is not None: attention_mask = invert_mask(attention_mask) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_ids) x = inputs_embeds + embed_pos x = self.layernorm_embedding(x) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) encoder_states = [] if output_hidden_states else None all_attentions = () if output_attentions else None for encoder_layer in self.layers: if output_hidden_states: encoder_states.append(x) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): # skip the layer attn = None else: x, attn = encoder_layer(x, attention_mask, output_attentions=output_attentions) if output_attentions: all_attentions = all_attentions + (attn,) if self.layer_norm: x = self.layer_norm(x) if output_hidden_states: encoder_states.append(x) # T x B x C -> B x T x C encoder_states = tuple(hidden_state.transpose(0, 1) for hidden_state in encoder_states) # T x B x C -> B x T x C x = x.transpose(0, 1) if not return_dict: return tuple(v for v in [x, encoder_states, all_attentions] if v is not None) return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions) class DecoderLayer(nn.Module): def __init__(self, config: BartConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Attention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.normalize_before = config.normalize_before self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.encoder_attn = Attention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, encoder_decoder_attention=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def forward( self, x, encoder_hidden_states, encoder_attn_mask=None, layer_state=None, causal_mask=None, decoder_padding_mask=None, output_attentions=False, ): residual = x if layer_state is None: layer_state = {} if self.normalize_before: x = self.self_attn_layer_norm(x) # Self Attention x, self_attn_weights = self.self_attn( query=x, key=x, layer_state=layer_state, # adds keys to layer state key_padding_mask=decoder_padding_mask, attn_mask=causal_mask, output_attentions=output_attentions, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.self_attn_layer_norm(x) # Cross attention residual = x assert self.encoder_attn.cache_key != self.self_attn.cache_key if self.normalize_before: x = self.encoder_attn_layer_norm(x) x, _ = self.encoder_attn( query=x, key=encoder_hidden_states, key_padding_mask=encoder_attn_mask, layer_state=layer_state, # mutates layer state ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.encoder_attn_layer_norm(x) # Fully Connected residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.final_layer_norm(x) return ( x, self_attn_weights, layer_state, ) # just self_attn weights for now, following t5, layer_state = cache for decoding class BartDecoder(nn.Module): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`DecoderLayer`. Args: config: BartConfig embed_tokens (torch.nn.Embedding): output embedding """ def __init__(self, config: BartConfig, embed_tokens: nn.Embedding): super().__init__() self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.do_blenderbot_90_layernorm = config.do_blenderbot_90_layernorm # layernorm variant self.padding_idx = embed_tokens.padding_idx self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = embed_tokens if config.static_position_embeddings: self.embed_positions = SinusoidalPositionalEmbedding( config.max_position_embeddings, config.d_model, config.pad_token_id ) else: self.embed_positions = LearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, self.padding_idx, config.extra_pos_embeddings, ) self.layers = nn.ModuleList( [DecoderLayer(config) for _ in range(config.decoder_layers)] ) # type: List[DecoderLayer] self.layernorm_embedding = LayerNorm(config.d_model) if config.normalize_embedding else nn.Identity() self.layer_norm = LayerNorm(config.d_model) if config.add_final_layer_norm else None def forward( self, input_ids, encoder_hidden_states, encoder_padding_mask, decoder_padding_mask, decoder_causal_mask, past_key_values=None, use_cache=False, output_attentions=False, output_hidden_states=False, return_dict=False, **unused, ): """ Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: input_ids (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_hidden_states: output from the encoder, used for encoder-side attention encoder_padding_mask: for ignoring pad tokens past_key_values (dict or None): dictionary used for storing state during generation Returns: BaseModelOutputWithPast or tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - the cache - hidden states - attentions """ if "decoder_cached_states" in unused: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) past_key_values = unused.pop("decoder_cached_states") if "decoder_past_key_values" in unused: warnings.warn( "The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) past_key_values = unused.pop("decoder_past_key_values") # check attention mask and invert if encoder_padding_mask is not None: encoder_padding_mask = invert_mask(encoder_padding_mask) # embed positions positions = self.embed_positions(input_ids, use_cache=use_cache) if use_cache: input_ids = input_ids[:, -1:] positions = positions[:, -1:] x = self.embed_tokens(input_ids) * self.embed_scale if self.do_blenderbot_90_layernorm: x = self.layernorm_embedding(x) x += positions else: x += positions x = self.layernorm_embedding(x) x = F.dropout(x, p=self.dropout, training=self.training) # Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim) x = x.transpose(0, 1) encoder_hidden_states = encoder_hidden_states.transpose(0, 1) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = [] for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (x,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue layer_state = past_key_values[idx] if past_key_values is not None else None x, layer_self_attn, layer_past = decoder_layer( x, encoder_hidden_states, encoder_attn_mask=encoder_padding_mask, decoder_padding_mask=decoder_padding_mask, layer_state=layer_state, causal_mask=decoder_causal_mask, output_attentions=output_attentions, ) if use_cache: next_decoder_cache.append(layer_past.copy()) if output_attentions: all_self_attns += (layer_self_attn,) if self.layer_norm: # if config.add_final_layer_norm (mBART) x = self.layer_norm(x) # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim) if output_hidden_states: all_hidden_states = tuple(hidden_state.transpose(0, 1) for hidden_state in all_hidden_states) x = x.transpose(0, 1) encoder_hidden_states = encoder_hidden_states.transpose(0, 1) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [x, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns ) def _reorder_buffer(attn_cache, new_order): for k, input_buffer_k in attn_cache.items(): if input_buffer_k is not None: attn_cache[k] = input_buffer_k.index_select(0, new_order) return attn_cache class Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim, num_heads, dropout=0.0, bias=True, encoder_decoder_attention=False, # otherwise self_attention ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.encoder_decoder_attention = encoder_decoder_attention self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self" def _shape(self, tensor, seq_len, bsz): return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) def forward( self, query, key: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, layer_state: Optional[Dict[str, Optional[Tensor]]] = None, attn_mask: Optional[Tensor] = None, output_attentions=False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time(SeqLen) x Batch x Channel""" static_kv: bool = self.encoder_decoder_attention tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] # get here for encoder decoder cause of static_kv if layer_state is not None: # reuse k,v and encoder_padding_mask saved_state = layer_state.get(self.cache_key, {}) if "prev_key" in saved_state and static_kv: # previous time steps are cached - no need to recompute key and value if they are static key = None else: saved_state = None layer_state = {} q = self.q_proj(query) * self.scaling if static_kv: if key is None: k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: k = self.k_proj(query) v = self.v_proj(query) q = self._shape(q, tgt_len, bsz) if k is not None: k = self._shape(k, -1, bsz) if v is not None: v = self._shape(v, -1, bsz) if saved_state is not None: k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz) # Update cache layer_state[self.cache_key] = { "prev_key": k.view(bsz, self.num_heads, -1, self.head_dim), "prev_value": v.view(bsz, self.num_heads, -1, self.head_dim), "prev_key_padding_mask": key_padding_mask if not static_kv else None, } assert k is not None src_len = k.size(1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len) if attn_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) # This is part of a workaround to get around fork/join parallelism not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None assert key_padding_mask is None or key_padding_mask.size()[:2] == ( bsz, src_len, ) if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2) attn_weights = attn_weights.masked_fill(reshaped, float("-inf")) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = F.softmax(attn_weights, dim=-1) attn_probs = F.dropout( attn_weights, p=self.dropout, training=self.training, ) assert v is not None attn_output = torch.bmm(attn_probs, v) assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = self.out_proj(attn_output) if output_attentions: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) else: attn_weights = None return attn_output, attn_weights def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz): # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) assert k is not None and v is not None prev_key_padding_mask: Optional[Tensor] = saved_state.get("prev_key_padding_mask", None) if prev_key_padding_mask is not None: if static_kv: new_key_padding_mask = prev_key_padding_mask else: new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1) else: new_key_padding_mask = key_padding_mask return k, v, new_key_padding_mask class BartClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" # This can trivially be shared with RobertaClassificationHead def __init__( self, input_dim, inner_dim, num_classes, pooler_dropout, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, x): x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x class LearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, offset): # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models dont have this hack self.offset = offset assert padding_idx is not None num_embeddings += offset super().__init__(num_embeddings, embedding_dim, padding_idx=padding_idx) def forward(self, input_ids, use_cache=False): """Input is expected to be of size [bsz x seqlen].""" bsz, seq_len = input_ids.shape[:2] if use_cache: positions = input_ids.data.new(1, 1).fill_(seq_len - 1) # called before slicing else: # starts at 0, ends at 1-seq_len positions = torch.arange(seq_len, dtype=torch.long, device=self.weight.device) return super().forward(positions + self.offset) def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True): if torch.cuda.is_available(): try: from apex.normalization import FusedLayerNorm return FusedLayerNorm(normalized_shape, eps, elementwise_affine) except ImportError: pass return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) def fill_with_neg_inf(t): """FP16-compatible function that fills a input_ids with -inf.""" return t.float().fill_(float("-inf")).type_as(t) # Public API def _get_shape(t): return getattr(t, "shape", None) @add_start_docstrings( "The bare BART Model outputting raw hidden-states without any specific head on top.", BART_START_DOCSTRING, ) class BartModel(PretrainedBartModel): def __init__(self, config: BartConfig): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = BartEncoder(config, self.shared) self.decoder = BartDecoder(config, self.shared) self.init_weights() @add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="facebook/bart-large", output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs: Optional[Tuple] = None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): if "decoder_past_key_values" in kwargs: warnings.warn( "The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) past_key_values = kwargs.pop("decoder_past_key_values") if decoder_input_ids is None: use_cache = False output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # make masks if user doesn't supply if not use_cache: decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_bart_decoder_inputs( self.config, input_ids, decoder_input_ids=decoder_input_ids, decoder_padding_mask=decoder_attention_mask, causal_mask_dtype=self.shared.weight.dtype, ) else: decoder_padding_mask, causal_mask = None, None assert decoder_input_ids is not None if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOuput when return_dict=False elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) decoder_outputs = self.decoder( decoder_input_ids, encoder_outputs[0], attention_mask, decoder_padding_mask, decoder_causal_mask=causal_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_output_embeddings(self): return _make_linear_from_emb(self.shared) # make it on the fly @add_start_docstrings( "The BART Model with a language modeling head. Can be used for summarization.", BART_START_DOCSTRING ) class BartForConditionalGeneration(PretrainedBartModel): base_model_prefix = "model" authorized_missing_keys = [r"final_logits_bias", r"encoder\.version", r"decoder\.version"] def __init__(self, config: BartConfig): super().__init__(config) base_model = BartModel(config) self.model = base_model self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: old_num_tokens = self.model.shared.num_embeddings new_embeddings = super().resize_token_embeddings(new_num_tokens) self.model.shared = new_embeddings self._resize_final_logits_bias(new_num_tokens, old_num_tokens) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int, old_num_tokens: int) -> None: if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) @add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BART_GENERATION_EXAMPLE) def forward( self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, past_key_values=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, **unused, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``. Returns: Conditional generation example:: >>> # Mask filling only works for bart-large >>> from transformers import BartTokenizer, BartForConditionalGeneration >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') >>> TXT = "My friends are <mask> but they eat too many carbs." >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large') >>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids'] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() >>> # ['good', 'great', 'all', 'really', 'very'] """ if "lm_labels" in unused: warnings.warn( "The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.", FutureWarning, ) labels = unused.pop("lm_labels") if "decoder_cached_states" in unused: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) past_key_values = unused.pop("decoder_cached_states") if "decoder_past_key_values" in unused: warnings.warn( "The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) past_key_values = unused.pop("decoder_past_key_values") return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if decoder_input_ids is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # TODO(SS): do we need to ignore pad tokens in labels? masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs ): return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def adjust_logits_during_generation(self, logits, cur_len, max_length): if cur_len == 1 and self.config.force_bos_token_to_be_generated: self._force_token_ids_generation(logits, self.config.bos_token_id) elif cur_len == max_length - 1 and self.config.eos_token_id is not None: self._force_token_ids_generation(logits, self.config.eos_token_id) return logits def _force_token_ids_generation(self, scores, token_id) -> None: """force one of token_ids to be generated by setting prob of all other tokens to 0 (logprob=-float("inf"))""" scores[:, [x for x in range(self.config.vocab_size) if x != token_id]] = -float("inf") @staticmethod def _reorder_cache(past, beam_idx): reordered_past = [] for layer_past in past: # get the correct batch idx from decoder layer's batch dim for cross and self-attn layer_past_new = { attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items() } reordered_past.append(layer_past_new) return reordered_past def get_encoder(self): return self.model.encoder def get_output_embeddings(self): return _make_linear_from_emb(self.model.shared) # make it on the fly @add_start_docstrings( """Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BART_START_DOCSTRING, ) class BartForSequenceClassification(PretrainedBartModel): def __init__(self, config: BartConfig, **kwargs): super().__init__(config, **kwargs) self.model = BartModel(config) self.classification_head = BartClassificationHead( config.d_model, config.d_model, config.num_labels, config.classifier_dropout, ) self.model._init_weights(self.classification_head.dense) self.model._init_weights(self.classification_head.out_proj) @add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="facebook/bart-large", output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) x = outputs[0] # last hidden state eos_mask = input_ids.eq(self.config.eos_token_id) if len(torch.unique(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") sentence_representation = x[eos_mask, :].view(x.size(0), -1, x.size(-1))[:, -1, :] logits = self.classification_head(sentence_representation) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BART_START_DOCSTRING, ) class BartForQuestionAnswering(PretrainedBartModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = BartModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.model._init_weights(self.qa_outputs) @add_start_docstrings_to_callable(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="facebook/bart-large", output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, start_positions=None, end_positions=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = ( start_logits, end_logits, ) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) class SinusoidalPositionalEmbedding(nn.Embedding): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions, embedding_dim, padding_idx=None): super().__init__(num_positions, embedding_dim) if embedding_dim % 2 != 0: raise NotImplementedError(f"odd embedding_dim {embedding_dim} not supported") self.weight = self._init_weight(self.weight) @staticmethod def _init_weight(out: nn.Parameter): """Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in the 2nd half of the vector. [dim // 2:] """ n_pos, dim = out.shape position_enc = np.array( [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)] ) out[:, 0 : dim // 2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) # This line breaks for odd n_pos out[:, dim // 2 :] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() out.requires_grad = False return out @torch.no_grad() def forward(self, input_ids, use_cache=False): """Input is expected to be of size [bsz x seqlen].""" bsz, seq_len = input_ids.shape[:2] if use_cache: positions = input_ids.data.new(1, 1).fill_(seq_len - 1) # called before slicing else: # starts at 0, ends at 1-seq_len positions = torch.arange(seq_len, dtype=torch.long, device=self.weight.device) return super().forward(positions)
58,368
40.662384
213
py
SLT-FAI
SLT-FAI-main/transformers/configuration_xlm_prophetnet.py
# coding=utf-8 # Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XLM-ProphetNet model configuration """ from .configuration_prophetnet import ProphetNetConfig from .utils import logging logger = logging.get_logger(__name__) XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/xprophetnet-large-wiki100-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/microsoft/xprophetnet-large-wiki100-cased/config.json", } class XLMProphetNetConfig(ProphetNetConfig): """ This class overrides :class:`~transformers.ProphetNetConfig`. Please check the superclass for the appropriate documentation alongside usage examples. """ model_type = "xlm-prophetnet"
1,264
34.138889
157
py
SLT-FAI
SLT-FAI-main/transformers/configuration_transfo_xl.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Transformer XL configuration """ import warnings from .configuration_utils import PretrainedConfig from .utils import logging logger = logging.get_logger(__name__) TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP = { "transfo-xl-wt103": "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-config.json", } class TransfoXLConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a :class:`~transformers.TransfoXLModel` or a :class:`~transformers.TFTransfoXLModel`. It is used to instantiate a Transformer-XL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `Transformer XL <https://huggingface.co/transfo-xl-wt103>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 267735): Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.TransfoXLModel` or :class:`~transformers.TFTransfoXLModel`. cutoffs (:obj:`List[int]`, `optional`, defaults to :obj:`[20000, 40000, 200000]`): Cutoffs for the adaptive softmax. d_model (:obj:`int`, `optional`, defaults to 1024): Dimensionality of the model's hidden states. d_embed (:obj:`int`, `optional`, defaults to 1024): Dimensionality of the embeddings n_head (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. d_head (:obj:`int`, `optional`, defaults to 64): Dimensionality of the model's heads. d_inner (:obj:`int`, `optional`, defaults to 4096): Inner dimension in FF div_val (:obj:`int`, `optional`, defaults to 4): Divident value for adapative input and softmax pre_lnorm (:obj:`boolean`, `optional`, defaults to :obj:`False`): Whether or not to apply LayerNorm to the input instead of the output in the blocks. n_layer (:obj:`int`, `optional`, defaults to 18): Number of hidden layers in the Transformer encoder. mem_len (:obj:`int`, `optional`, defaults to 1600): Length of the retained previous heads. clamp_len (:obj:`int`, `optional`, defaults to 1000): Use the same pos embeddings after clamp_len. same_length (:obj:`boolean`, `optional`, defaults to :obj:`True`): Whether or not to use the same attn length for all tokens proj_share_all_but_first (:obj:`boolean`, `optional`, defaults to :obj:`True`): True to share all but first projs, False not to share. attn_type (:obj:`int`, `optional`, defaults to 0): Attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al. sample_softmax (:obj:`int`, `optional`, defaults to -1): Number of samples in the sampled softmax. adaptive (:obj:`boolean`, `optional`, defaults to :obj:`True`): Whether or not to use adaptive softmax. dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. dropatt (:obj:`float`, `optional`, defaults to 0): The dropout ratio for the attention probabilities. untie_r (:obj:`boolean`, `optional`, defaults to :obj:`True`): Whether ot not to untie relative position biases. init (:obj:`str`, `optional`, defaults to :obj:`"normal"`): Parameter initializer to use. init_range (:obj:`float`, `optional`, defaults to 0.01): Parameters initialized by U(-init_range, init_range). proj_init_std (:obj:`float`, `optional`, defaults to 0.01): Parameters initialized by N(0, init_std) init_std (:obj:`float`, `optional`, defaults to 0.02): Parameters initialized by N(0, init_std) layer_norm_epsilon (:obj:`float`, `optional`, defaults to 1e-5): The epsilon to use in the layer normalization layers Examples:: >>> from transformers import TransfoXLConfig, TransfoXLModel >>> # Initializing a Transformer XL configuration >>> configuration = TransfoXLConfig() >>> # Initializing a model from the configuration >>> model = TransfoXLModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "transfo-xl" def __init__( self, vocab_size=267735, cutoffs=[20000, 40000, 200000], d_model=1024, d_embed=1024, n_head=16, d_head=64, d_inner=4096, div_val=4, pre_lnorm=False, n_layer=18, mem_len=1600, clamp_len=1000, same_length=True, proj_share_all_but_first=True, attn_type=0, sample_softmax=-1, adaptive=True, dropout=0.1, dropatt=0.0, untie_r=True, init="normal", init_range=0.01, proj_init_std=0.01, init_std=0.02, layer_norm_epsilon=1e-5, eos_token_id=0, **kwargs ): if "tie_weight" in kwargs: warnings.warn( "The config parameter `tie_weight` is deprecated. Please use `tie_word_embeddings` instead.", FutureWarning, ) kwargs["tie_word_embeddings"] = kwargs["tie_weight"] super().__init__(eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.cutoffs = [] self.cutoffs.extend(cutoffs) if proj_share_all_but_first: self.tie_projs = [False] + [True] * len(self.cutoffs) else: self.tie_projs = [False] + [False] * len(self.cutoffs) self.d_model = d_model self.d_embed = d_embed self.d_head = d_head self.d_inner = d_inner self.div_val = div_val self.pre_lnorm = pre_lnorm self.n_layer = n_layer self.n_head = n_head self.mem_len = mem_len self.same_length = same_length self.attn_type = attn_type self.clamp_len = clamp_len self.sample_softmax = sample_softmax self.adaptive = adaptive self.dropout = dropout self.dropatt = dropatt self.untie_r = untie_r self.init = init self.init_range = init_range self.proj_init_std = proj_init_std self.init_std = init_std self.layer_norm_epsilon = layer_norm_epsilon @property def max_position_embeddings(self): # Message copied from Transformer-XL documentation logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.") return -1 @property def n_token(self): # Backward compatibility return self.vocab_size @n_token.setter def n_token(self, value): # Backward compatibility self.vocab_size = value @property def hidden_size(self): return self.d_model @property def num_attention_heads(self): return self.n_head @property def num_hidden_layers(self): return self.n_layer
8,405
39.805825
120
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_longformer_fast.py
# coding=utf-8 # Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .tokenization_longformer import LongformerTokenizer from .tokenization_roberta_fast import RobertaTokenizerFast from .utils import logging logger = logging.get_logger(__name__) # vocab and merges same as roberta vocab_url = "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json" merges_url = "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt" tokenizer_url = "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-tokenizer.json" _all_longformer_models = [ "allenai/longformer-base-4096", "allenai/longformer-large-4096", "allenai/longformer-large-4096-finetuned-triviaqa", "allenai/longformer-base-4096-extra.pos.embd.only", "allenai/longformer-large-4096-extra.pos.embd.only", ] PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "allenai/longformer-base-4096": 4096, "allenai/longformer-large-4096": 4096, "allenai/longformer-large-4096-finetuned-triviaqa": 4096, "allenai/longformer-base-4096-extra.pos.embd.only": 4096, "allenai/longformer-large-4096-extra.pos.embd.only": 4096, } class LongformerTokenizerFast(RobertaTokenizerFast): r""" Construct a "fast" Longformer tokenizer (backed by HuggingFace's `tokenizers` library). :class:`~transformers.LongformerTokenizerFast` is identical to :class:`~transformers.RobertaTokenizerFast`. Refer to the superclass for usage examples and documentation concerning parameters. """ # merges and vocab same as Roberta max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_vocab_files_map = { "vocab_file": {m: vocab_url for m in _all_longformer_models}, "merges_file": {m: merges_url for m in _all_longformer_models}, "tokenizer_file": {m: tokenizer_url for m in _all_longformer_models}, } slow_tokenizer_class = LongformerTokenizer
2,514
40.229508
117
py
SLT-FAI
SLT-FAI-main/transformers/file_utils.py
""" Utilities for working with the local dataset cache. This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp Copyright by the AllenNLP authors. """ import fnmatch import json import os import re import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from dataclasses import fields from functools import partial, wraps from hashlib import sha256 from pathlib import Path from typing import Any, Dict, Optional, Tuple, Union from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import numpy as np from tqdm.auto import tqdm import requests from filelock import FileLock from . import __version__ from .utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name ENV_VARS_TRUE_VALUES = {"1", "ON", "YES"} ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) try: USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: import torch _torch_available = True # pylint: disable=invalid-name logger.info("PyTorch version {} available.".format(torch.__version__)) else: logger.info("Disabling PyTorch because USE_TF is set") _torch_available = False except ImportError: _torch_available = False # pylint: disable=invalid-name try: USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: import tensorflow as tf assert hasattr(tf, "__version__") and int(tf.__version__[0]) >= 2 _tf_available = True # pylint: disable=invalid-name logger.info("TensorFlow version {} available.".format(tf.__version__)) else: logger.info("Disabling Tensorflow because USE_TORCH is set") _tf_available = False except (ImportError, AssertionError): _tf_available = False # pylint: disable=invalid-name try: USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: import flax import jax logger.info("JAX version {}, Flax: available".format(jax.__version__)) logger.info("Flax available: {}".format(flax)) _flax_available = True else: _flax_available = False except ImportError: _flax_available = False # pylint: disable=invalid-name try: import datasets # noqa: F401 # Check we're not importing a "datasets" directory somewhere _datasets_available = hasattr(datasets, "__version__") and hasattr(datasets, "load_dataset") if _datasets_available: logger.debug(f"Succesfully imported datasets version {datasets.__version__}") else: logger.debug("Imported a datasets object but this doesn't seem to be the 🤗 datasets library.") except ImportError: _datasets_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) try: import torch_xla.core.xla_model as xm # noqa: F401 if _torch_available: _torch_tpu_available = True # pylint: disable= else: _torch_tpu_available = False except ImportError: _torch_tpu_available = False try: import psutil # noqa: F401 _psutil_available = True except ImportError: _psutil_available = False try: import py3nvml # noqa: F401 _py3nvml_available = True except ImportError: _py3nvml_available = False try: from apex import amp # noqa: F401 _has_apex = True except ImportError: _has_apex = False try: import faiss # noqa: F401 _faiss_available = True logger.debug(f"Succesfully imported faiss version {faiss.__version__}") except ImportError: _faiss_available = False try: import sklearn.metrics # noqa: F401 import scipy.stats # noqa: F401 _has_sklearn = True except (AttributeError, ImportError): _has_sklearn = False try: # Test copied from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py get_ipython = sys.modules["IPython"].get_ipython if "IPKernelApp" not in get_ipython().config: raise ImportError("console") if "VSCODE_PID" in os.environ: raise ImportError("vscode") import IPython # noqa: F401 _in_notebook = True except (AttributeError, ImportError, KeyError): _in_notebook = False try: import sentencepiece # noqa: F401 _sentencepiece_available = True except ImportError: _sentencepiece_available = False try: import tokenizers # noqa: F401 _tokenizers_available = True except ImportError: _tokenizers_available = False default_cache_path = os.path.join(torch_cache_home, "transformers") PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) WEIGHTS_NAME = "pytorch_model.bin" TF2_WEIGHTS_NAME = "tf_model.h5" TF_WEIGHTS_NAME = "model.ckpt" CONFIG_NAME = "config.json" MODEL_CARD_NAME = "modelcard.json" SENTENCEPIECE_UNDERLINE = "▁" SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility MULTIPLE_CHOICE_DUMMY_INPUTS = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" PRESET_MIRROR_DICT = { "tuna": "https://mirrors.tuna.tsinghua.edu.cn/hugging-face-models", "bfsu": "https://mirrors.bfsu.edu.cn/hugging-face-models", } def is_torch_available(): return _torch_available def is_tf_available(): return _tf_available def is_flax_available(): return _flax_available def is_torch_tpu_available(): return _torch_tpu_available def is_datasets_available(): return _datasets_available def is_psutil_available(): return _psutil_available def is_py3nvml_available(): return _py3nvml_available def is_apex_available(): return _has_apex def is_faiss_available(): return _faiss_available def is_sklearn_available(): return _has_sklearn def is_sentencepiece_available(): return _sentencepiece_available def is_tokenizers_available(): return _tokenizers_available def is_in_notebook(): return _in_notebook def torch_only_method(fn): def wrapper(*args, **kwargs): if not _torch_available: raise ImportError( "You need to install pytorch to use this method or class, " "or activate it with environment variables USE_TORCH=1 and USE_TF=0." ) else: return fn(*args, **kwargs) return wrapper DATASETS_IMPORT_ERROR = """ {0} requires the 🤗 Datasets library but it was not found in your enviromnent. You can install it with: ``` pip install datasets ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install datasets ``` then restarting your kernel. Note that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current working directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or that python file if that's the case. """ TOKENIZERS_IMPORT_ERROR = """ {0} requires the 🤗 Tokenizers library but it was not found in your enviromnent. You can install it with: ``` pip install tokenizers ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install tokenizers ``` """ SENTENCEPIECE_IMPORT_ERROR = """ {0} requires the SentencePiece library but it was not found in your enviromnent. Checkout the instructions on the installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones that match your enviromnent. """ FAISS_IMPORT_ERROR = """ {0} requires the faiss library but it was not found in your enviromnent. Checkout the instructions on the installation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones that match your enviromnent. """ PYTORCH_IMPORT_ERROR = """ {0} requires the PyTorch library but it was not found in your enviromnent. Checkout the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your enviromnent. """ SKLEARN_IMPORT_ERROR = """ {0} requires the scikit-learn library but it was not found in your enviromnent. You can install it with: ``` pip install -U scikit-learn ``` In a notebook or a colab, you can install it by executing a cell with ``` !pip install -U scikit-learn ``` """ TENSORFLOW_IMPORT_ERROR = """ {0} requires the TensorFlow library but it was not found in your enviromnent. Checkout the instructions on the installation page: https://www.tensorflow.org/install and follow the ones that match your enviromnent. """ FLAX_IMPORT_ERROR = """ {0} requires the FLAX library but it was not found in your enviromnent. Checkout the instructions on the installation page: https://github.com/google/flax and follow the ones that match your enviromnent. """ def requires_datasets(obj): name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ if not is_datasets_available(): raise ImportError(DATASETS_IMPORT_ERROR.format(name)) def requires_faiss(obj): name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ if not is_faiss_available(): raise ImportError(FAISS_IMPORT_ERROR.format(name)) def requires_pytorch(obj): name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ if not is_torch_available(): raise ImportError(PYTORCH_IMPORT_ERROR.format(name)) def requires_sklearn(obj): name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ if not is_sklearn_available(): raise ImportError(SKLEARN_IMPORT_ERROR.format(name)) def requires_tf(obj): name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ if not is_tf_available(): raise ImportError(TENSORFLOW_IMPORT_ERROR.format(name)) def requires_flax(obj): name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ if not is_flax_available(): raise ImportError(FLAX_IMPORT_ERROR.format(name)) def requires_tokenizers(obj): name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ if not is_tokenizers_available(): raise ImportError(TOKENIZERS_IMPORT_ERROR.format(name)) def requires_sentencepiece(obj): name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ if not is_sentencepiece_available(): raise ImportError(SENTENCEPIECE_IMPORT_ERROR.format(name)) def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_start_docstrings_to_callable(*docstr): def docstring_decorator(fn): class_name = ":class:`~transformers.{}`".format(fn.__qualname__.split(".")[0]) intro = " The {} forward method, overrides the :func:`__call__` special method.".format(class_name) note = r""" .. note:: Although the recipe for forward pass needs to be defined within this function, one should call the :class:`Module` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them. """ fn.__doc__ = intro + note + "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = fn.__doc__ + "".join(docstr) return fn return docstring_decorator PT_RETURN_INTRODUCTION = r""" Returns: :class:`~{full_output_type}` or :obj:`tuple(torch.FloatTensor)`: A :class:`~{full_output_type}` (if ``return_dict=True`` is passed or when ``config.return_dict=True``) or a tuple of :obj:`torch.FloatTensor` comprising various elements depending on the configuration (:class:`~transformers.{config_class}`) and inputs. """ TF_RETURN_INTRODUCTION = r""" Returns: :class:`~{full_output_type}` or :obj:`tuple(tf.Tensor)`: A :class:`~{full_output_type}` (if ``return_dict=True`` is passed or when ``config.return_dict=True``) or a tuple of :obj:`tf.Tensor` comprising various elements depending on the configuration (:class:`~transformers.{config_class}`) and inputs. """ def _get_indent(t): """Returns the indentation in the first line of t""" search = re.search(r"^(\s*)\S", t) return "" if search is None else search.groups()[0] def _convert_output_args_doc(output_args_doc): """Convert output_args_doc to display properly.""" # Split output_arg_doc in blocks argument/description indent = _get_indent(output_args_doc) blocks = [] current_block = "" for line in output_args_doc.split("\n"): # If the indent is the same as the beginning, the line is the name of new arg. if _get_indent(line) == indent: if len(current_block) > 0: blocks.append(current_block[:-1]) current_block = f"{line}\n" else: # Otherwise it's part of the description of the current arg. # We need to remove 2 spaces to the indentation. current_block += f"{line[2:]}\n" blocks.append(current_block[:-1]) # Format each block for proper rendering for i in range(len(blocks)): blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i]) blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i]) return "\n".join(blocks) def _prepare_output_docstrings(output_type, config_class): """ Prepares the return part of the docstring using `output_type`. """ docstrings = output_type.__doc__ # Remove the head of the docstring to keep the list of args only lines = docstrings.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: i += 1 if i < len(lines): docstrings = "\n".join(lines[(i + 1) :]) docstrings = _convert_output_args_doc(docstrings) # Add the return introduction full_output_type = f"{output_type.__module__}.{output_type.__name__}" intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION intro = intro.format(full_output_type=full_output_type, config_class=config_class) return intro + docstrings PT_TOKEN_CLASSIFICATION_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import torch >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits """ PT_QUESTION_ANSWERING_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import torch >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors='pt') >>> start_positions = torch.tensor([1]) >>> end_positions = torch.tensor([3]) >>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) >>> loss = outputs.loss >>> start_scores = outputs.start_logits >>> end_scores = outputs.end_logits """ PT_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import torch >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits """ PT_MASKED_LM_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import torch >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt") >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits """ PT_BASE_MODEL_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import torch >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state """ PT_MULTIPLE_CHOICE_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import torch >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 >>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='pt', padding=True) >>> outputs = model(**{{k: v.unsqueeze(0) for k,v in encoding.items()}}, labels=labels) # batch size is 1 >>> # the linear classifier still needs to be trained >>> loss = outputs.loss >>> logits = outputs.logits """ PT_CAUSAL_LM_SAMPLE = r""" Example:: >>> import torch >>> from transformers import {tokenizer_class}, {model_class} >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs, labels=inputs["input_ids"]) >>> loss = outputs.loss >>> logits = outputs.logits """ TF_TOKEN_CLASSIFICATION_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> input_ids = inputs["input_ids"] >>> inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 >>> outputs = model(inputs) >>> loss = outputs.loss >>> logits = outputs.logits """ TF_QUESTION_ANSWERING_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)) >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> input_dict = tokenizer(question, text, return_tensors='tf') >>> outputs = model(input_dict) >>> start_logits = outputs.start_logits >>> end_logits = outputs.end_logits >>> all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) >>> answer = ' '.join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0]+1]) """ TF_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 >>> outputs = model(inputs) >>> loss = outputs.loss >>> logits = outputs.logits """ TF_MASKED_LM_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)) >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf") >>> inputs["labels"] = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] >>> outputs = model(inputs) >>> loss = outputs.loss >>> logits = outputs.logits """ TF_BASE_MODEL_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> last_hidden_states = outputs.last_hidden_states """ TF_MULTIPLE_CHOICE_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)) >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='tf', padding=True) >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}} >>> outputs = model(inputs) # batch size is 1 >>> # the linear classifier still needs to be trained >>> logits = outputs.logits """ TF_CAUSAL_LM_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> logits = outputs.logits """ def add_code_sample_docstrings( *docstr, tokenizer_class=None, checkpoint=None, output_type=None, config_class=None, mask=None ): def docstring_decorator(fn): model_class = fn.__qualname__.split(".")[0] is_tf_class = model_class[:2] == "TF" doc_kwargs = dict(model_class=model_class, tokenizer_class=tokenizer_class, checkpoint=checkpoint) if "SequenceClassification" in model_class: code_sample = TF_SEQUENCE_CLASSIFICATION_SAMPLE if is_tf_class else PT_SEQUENCE_CLASSIFICATION_SAMPLE elif "QuestionAnswering" in model_class: code_sample = TF_QUESTION_ANSWERING_SAMPLE if is_tf_class else PT_QUESTION_ANSWERING_SAMPLE elif "TokenClassification" in model_class: code_sample = TF_TOKEN_CLASSIFICATION_SAMPLE if is_tf_class else PT_TOKEN_CLASSIFICATION_SAMPLE elif "MultipleChoice" in model_class: code_sample = TF_MULTIPLE_CHOICE_SAMPLE if is_tf_class else PT_MULTIPLE_CHOICE_SAMPLE elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]: doc_kwargs["mask"] = "[MASK]" if mask is None else mask code_sample = TF_MASKED_LM_SAMPLE if is_tf_class else PT_MASKED_LM_SAMPLE elif "LMHead" in model_class: code_sample = TF_CAUSAL_LM_SAMPLE if is_tf_class else PT_CAUSAL_LM_SAMPLE elif "Model" in model_class or "Encoder" in model_class: code_sample = TF_BASE_MODEL_SAMPLE if is_tf_class else PT_BASE_MODEL_SAMPLE else: raise ValueError(f"Docstring can't be built for model {model_class}") output_doc = _prepare_output_docstrings(output_type, config_class) if output_type is not None else "" built_doc = code_sample.format(**doc_kwargs) fn.__doc__ = (fn.__doc__ or "") + "".join(docstr) + output_doc + built_doc return fn return docstring_decorator def replace_return_docstrings(output_type=None, config_class=None): def docstring_decorator(fn): docstrings = fn.__doc__ lines = docstrings.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None: i += 1 if i < len(lines): lines[i] = _prepare_output_docstrings(output_type, config_class) docstrings = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, current docstring is:\n{docstrings}" ) fn.__doc__ = docstrings return fn return docstring_decorator def is_remote_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") def hf_bucket_url(model_id: str, filename: str, use_cdn=True, mirror=None) -> str: """ Resolve a model identifier, and a file name, to a HF-hosted url on either S3 or Cloudfront (a Content Delivery Network, or CDN). Cloudfront is replicated over the globe so downloads are way faster for the end user (and it also lowers our bandwidth costs). However, it is more aggressively cached by default, so may not always reflect the latest changes to the underlying file (default TTL is 24 hours). In terms of client-side caching from this library, even though Cloudfront relays the ETags from S3, using one or the other (or switching from one to the other) will affect caching: cached files are not shared between the two because the cached file's name contains a hash of the url. """ endpoint = ( PRESET_MIRROR_DICT.get(mirror, mirror) if mirror else CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX ) legacy_format = "/" not in model_id if legacy_format: return f"{endpoint}/{model_id}-{filename}" else: return f"{endpoint}/{model_id}/{filename}" def url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name so that TF 2.0 can identify it as a HDF5 file (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) """ url_bytes = url.encode("utf-8") url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = sha256(etag_bytes) filename += "." + etag_hash.hexdigest() if url.endswith(".h5"): filename += ".h5" return filename def filename_to_url(filename, cache_dir=None): """ Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise EnvironmentError("file {} not found".format(cache_path)) meta_path = cache_path + ".json" if not os.path.exists(meta_path): raise EnvironmentError("file {} not found".format(meta_path)) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] return url, etag def cached_path( url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent: Union[Dict, str, None] = None, extract_compressed_file=False, force_extract=False, local_files_only=False, ) -> Optional[str]: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. Args: cache_dir: specify a cache directory to save the file to (overwrite the default cache dir). force_download: if True, re-dowload the file even if it's already cached in the cache dir. resume_download: if True, resume the download if incompletly recieved file is found. user_agent: Optional string or dict that will be appended to the user-agent on remote requests. extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed file in a folder along the archive. force_extract: if True when extract_compressed_file is True and the archive was already extracted, re-extract the archive and overide the folder where it was extracted. Return: None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk). Local path (string) otherwise """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, user_agent=user_agent, local_files_only=local_files_only, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif urlparse(url_or_filename).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) if extract_compressed_file: if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" output_dir, output_file = os.path.split(output_path) output_extract_dir_name = output_file.replace(".", "-") + "-extracted" output_path_extracted = os.path.join(output_dir, output_extract_dir_name) if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract: return output_path_extracted # Prevent parallel extractions lock_path = output_path + ".lock" with FileLock(lock_path): shutil.rmtree(output_path_extracted, ignore_errors=True) os.makedirs(output_path_extracted) if is_zipfile(output_path): with ZipFile(output_path, "r") as zip_file: zip_file.extractall(output_path_extracted) zip_file.close() elif tarfile.is_tarfile(output_path): tar_file = tarfile.open(output_path) tar_file.extractall(output_path_extracted) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(output_path)) return output_path_extracted return output_path def http_get(url, temp_file, proxies=None, resume_size=0, user_agent: Union[Dict, str, None] = None): ua = "transformers/{}; python/{}".format(__version__, sys.version.split()[0]) if is_torch_available(): ua += "; torch/{}".format(torch.__version__) if is_tf_available(): ua += "; tensorflow/{}".format(tf.__version__) if isinstance(user_agent, dict): ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent headers = {"user-agent": ua} if resume_size > 0: headers["Range"] = "bytes=%d-" % (resume_size,) response = requests.get(url, stream=True, proxies=proxies, headers=headers) if response.status_code == 416: # Range not satisfiable return content_length = response.headers.get("Content-Length") total = resume_size + int(content_length) if content_length is not None else None progress = tqdm( unit="B", unit_scale=True, total=total, initial=resume_size, desc="Downloading", disable=bool(logging.get_verbosity() == logging.NOTSET), ) for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache( url, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False, user_agent: Union[Dict, str, None] = None, local_files_only=False, ) -> Optional[str]: """ Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the path to the cached file. Return: None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk). Local path (string) otherwise """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) etag = None if not local_files_only: try: response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout) if response.status_code == 200: etag = response.headers.get("ETag") except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(cache_path): return cache_path else: matching_files = [ file for file in fnmatch.filter(os.listdir(cache_dir), filename.split(".")[0] + ".*") if not file.endswith(".json") and not file.endswith(".lock") ] if len(matching_files) > 0: return os.path.join(cache_dir, matching_files[-1]) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(cache_path) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. lock_path = cache_path + ".lock" with FileLock(lock_path): # If the download just completed while the lock was activated. if os.path.exists(cache_path) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: incomplete_path = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(incomplete_path, "a+b") as f: yield f temp_file_manager = _resumable_file_manager if os.path.exists(incomplete_path): resume_size = os.stat(incomplete_path).st_size else: resume_size = 0 else: temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False) resume_size = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name) http_get(url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent) logger.info("storing %s in cache at %s", url, cache_path) os.replace(temp_file.name, cache_path) logger.info("creating metadata file for %s", cache_path) meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w") as meta_file: json.dump(meta, meta_file) return cache_path class cached_property(property): """ Descriptor that mimics @property but caches output in member variable. From tensorflow_datasets Built-in in functools from Python 3.8. """ def __get__(self, obj, objtype=None): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute") attr = "__cached_" + self.fget.__name__ cached = getattr(obj, attr, None) if cached is None: cached = self.fget(obj) setattr(obj, attr, cached) return cached def torch_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_torch_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires PyTorch.") return wrapper def tf_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_tf_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires TF.") return wrapper def is_tensor(x): """ Tests if ``x`` is a :obj:`torch.Tensor`, :obj:`tf.Tensor` or :obj:`np.ndarray`. """ if is_torch_available(): import torch if isinstance(x, torch.Tensor): return True if is_tf_available(): import tensorflow as tf if isinstance(x, tf.Tensor): return True return isinstance(x, np.ndarray) class ModelOutput(OrderedDict): """ Base class for all model outputs as dataclass. Has a ``__getitem__`` that allows indexing by integer or slice (like a tuple) or strings (like a dictionary) that will ignore the ``None`` attributes. Otherwise behaves like a regular python dictionary. .. warning:: You can't unpack a :obj:`ModelOutput` directly. Use the :meth:`~transformers.file_utils.ModelOutput.to_tuple` method to convert it to a tuple before. """ def __post_init__(self): class_fields = fields(self) # Safety and consistency checks assert len(class_fields), f"{self.__class__.__name__} has no fields." assert all( field.default is None for field in class_fields[1:] ), f"{self.__class__.__name__} should not have more than one required field." first_field = getattr(self, class_fields[0].name) other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(first_field): try: iterator = iter(first_field) first_field_iterator = True except TypeError: first_field_iterator = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for element in iterator: if ( not isinstance(element, (list, tuple)) or not len(element) == 2 or not isinstance(element[0], str) ): break setattr(self, element[0], element[1]) if element[1] is not None: self[element[0]] = element[1] elif first_field is not None: self[class_fields[0].name] = first_field else: for field in class_fields: v = getattr(self, field.name) if v is not None: self[field.name] = v def __delitem__(self, *args, **kwargs): raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def setdefault(self, *args, **kwargs): raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def pop(self, *args, **kwargs): raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def update(self, *args, **kwargs): raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") def __getitem__(self, k): if isinstance(k, str): inner_dict = {k: v for (k, v) in self.items()} return inner_dict[k] else: return self.to_tuple()[k] def __setattr__(self, name, value): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(name, value) super().__setattr__(name, value) def __setitem__(self, key, value): # Will raise a KeyException if needed super().__setitem__(key, value) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(key, value) def to_tuple(self) -> Tuple[Any]: """ Convert self to a tuple containing all the attributes/keys that are not ``None``. """ return tuple(self[k] for k in self.keys())
45,174
33.96517
150
py
SLT-FAI
SLT-FAI-main/transformers/configuration_lxmert.py
# coding=utf-8 # Copyright 2018, Hao Tan, Mohit Bansal # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ LXMERT model configuration """ from .configuration_utils import PretrainedConfig from .utils import logging logger = logging.get_logger(__name__) LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "unc-nlp/lxmert-base-uncased": "", } class LxmertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.LxmertModel` or a :class:`~transformers.TFLxmertModel`. It is used to instantiate a LXMERT model according to the specified arguments, defining the model architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 30522): Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.LxmertModel` or :class:`~transformers.TFLxmertModel`. hidden_size (:obj:`int`, `optional`, defaults to 768): Dimensionality of the encoder layers and the pooler layer. r_layers (:obj:`int`, `optional`, defaults to 5): Number of hidden layers in the Transformer visual encoder. l_layers (:obj:`int`, `optional`, defaults to 9): Number of hidden layers in the Transformer language encoder. x_layers (:obj:`int`, `optional`, defaults to 5): Number of hidden layers in the Transformer cross modality encoder. num_attention_heads (:obj:`int`, `optional`, defaults to 5): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (:obj:`int`, `optional`, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"` and :obj:`"gelu_new"` are supported. hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, `optional`, defaults to 2): The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. visual_feat_dim (:obj:`int`, `optional`, defaults to 2048): This represents the last dimension of the pooled-object features used as input for the model, representing the size of each object feature itself. visual_pos_dim (:obj:`int`, `optional`, defaults to 4): This represents the number of spacial features that are mixed into the visual features. The default is set to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height) visual_loss_normalizer (:obj:`float`, `optional`, defaults to 1/15): This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one decided to train with multiple vision-based loss objectives. num_qa_labels (:obj:`int`, `optional`, defaults to 9500): This represents the total number of different question answering (QA) labels there are. If using more than one dataset with QA, the user will need to account for the total number of labels that all of the datasets have in total. num_object_labels (:obj:`int`, `optional`, defaults to 1600): This represents the total number of semantically unique objects that lxmert will be able to classify a pooled-object feature as belonging too. num_attr_labels (:obj:`int`, `optional`, defaults to 400): This represents the total number of semantically unique attributes that lxmert will be able to classify a pooled-object feature as possessing. task_matched (:obj:`bool`, `optional`, defaults to :obj:`True`): This task is used for sentence-image matching. If the sentence correctly describes the image the label will be 1. If the sentence does not correctly describe the image, the label will be 0. task_mask_lm (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss objective. task_obj_predict (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to add object predicition, attribute predicition and feature regression to the loss objective. task_qa (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to add the question-asnwering loss to the objective visual_obj_loss (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to calculate the object-prediction loss objective visual_attr_loss (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to calculate the attribute-prediction loss objective visual_feat_loss (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to calculate the feature-regression loss objective output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the model should return the attentions from the vision, langauge, and cross-modality layers should be returned. output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the model should return the hidden states from the vision, langauge, and cross-modality layers should be returned. """ model_type = "lxmert" def __init__( self, vocab_size=30522, hidden_size=768, num_attention_heads=12, num_labels=2, num_qa_labels=9500, num_object_labels=1600, num_attr_labels=400, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, l_layers=9, x_layers=5, r_layers=5, visual_feat_dim=2048, visual_pos_dim=4, visual_loss_normalizer=6.67, task_matched=True, task_mask_lm=True, task_obj_predict=True, task_qa=True, visual_obj_loss=True, visual_attr_loss=True, visual_feat_loss=True, output_attentions=False, output_hidden_states=False, **kwargs, ): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_labels = num_labels self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.num_qa_labels = num_qa_labels self.num_object_labels = num_object_labels self.num_attr_labels = num_attr_labels self.l_layers = l_layers self.x_layers = x_layers self.r_layers = r_layers self.visual_feat_dim = visual_feat_dim self.visual_pos_dim = visual_pos_dim self.visual_loss_normalizer = visual_loss_normalizer self.task_matched = task_matched self.task_mask_lm = task_mask_lm self.task_obj_predict = task_obj_predict self.task_qa = task_qa self.visual_obj_loss = visual_obj_loss self.visual_attr_loss = visual_attr_loss self.visual_feat_loss = visual_feat_loss self.output_hidden_states = output_hidden_states self.output_attentions = self.output_attentions self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
9,873
51.802139
118
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_dpr_fast.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for DPR.""" import collections from typing import List, Optional, Union from .file_utils import add_end_docstrings, add_start_docstrings from .tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer from .tokenization_utils_base import BatchEncoding, TensorType from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", "facebook/dpr-ctx_encoder-multiset-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-tokenizer.json", "facebook/dpr-ctx_encoder-multiset-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-tokenizer.json", }, } QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", "facebook/dpr-question_encoder-multiset-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-tokenizer.json", "facebook/dpr-question_encoder-multiset-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-tokenizer.json", }, } READER_PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/dpr-reader-single-nq-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", "facebook/dpr-reader-multiset-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-tokenizer.json", "facebook/dpr-reader-multiset-base": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-tokenizer.json", }, } CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } READER_PRETRAINED_INIT_CONFIGURATION = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class DPRContextEncoderTokenizerFast(BertTokenizerFast): r""" Construct a "fast" DPRContextEncoder tokenizer (backed by HuggingFace's `tokenizers` library). :class:`~transformers.DPRContextEncoderTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION slow_tokenizer_class = DPRContextEncoderTokenizer class DPRQuestionEncoderTokenizerFast(BertTokenizerFast): r""" Constructs a "fast" DPRQuestionEncoder tokenizer (backed by HuggingFace's `tokenizers` library). :class:`~transformers.DPRQuestionEncoderTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION slow_tokenizer_class = DPRQuestionEncoderTokenizer DPRSpanPrediction = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) DPRReaderOutput = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) CUSTOM_DPR_READER_DOCSTRING = r""" Return a dictionary with the token ids of the input strings and other information to give to :obj:`.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting :obj:`input_ids` is a matrix of size :obj:`(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (:obj:`str` or :obj:`List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like :obj:`[questions] * n_passages`. Otherwise you have to specify as many questions as in :obj:`titles` or :obj:`texts`. titles (:obj:`str` or :obj:`List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (:obj:`str` or :obj:`List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (:obj:`int`, `optional`): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. return_attention_mask (:obj:`bool`, `optional`): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute. `What are attention masks? <../glossary.html#attention-mask>`__ Return: :obj:`Dict[str, List[List[int]]]`: A dictionary with the following keys: - ``input_ids``: List of token ids to be fed to a model. - ``attention_mask``: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(CUSTOM_DPR_READER_DOCSTRING) class CustomDPRReaderTokenizerMixin: def __call__( self, questions, titles: Optional[str] = None, texts: Optional[str] = None, padding: Union[bool, str] = False, truncation: Union[bool, str] = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_attention_mask: Optional[bool] = None, **kwargs ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( questions, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs, ) elif titles is None or texts is None: text_pair = titles if texts is None else texts return super().__call__( questions, text_pair, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs, ) titles = titles if not isinstance(titles, str) else [titles] texts = texts if not isinstance(texts, str) else [texts] n_passages = len(titles) questions = questions if not isinstance(questions, str) else [questions] * n_passages assert len(titles) == len( texts ), "There should be as many titles than texts but got {} titles and {} texts.".format(len(titles), len(texts)) encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)["input_ids"] encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)["input_ids"] encoded_inputs = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(encoded_question_and_titles, encoded_texts) ] } if return_attention_mask is not False: attention_mask = [input_ids != self.pad_token_id for input_ids in encoded_inputs["input_ids"]] encoded_inputs["attention_mask"] = attention_mask return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors) def decode_best_spans( self, reader_input: BatchEncoding, reader_output: DPRReaderOutput, num_spans: int = 16, max_answer_length: int = 64, num_spans_per_passage: int = 4, ) -> List[DPRSpanPrediction]: """ Get the span predictions for the extractive Q&A model. Outputs: `List` of `DPRReaderOutput` sorted by descending `(relevance_score, span_score)`. Each `DPRReaderOutput` is a `Tuple` with: **span_score**: ``float`` that corresponds to the score given by the reader for this span compared to other spans in the same passage. It corresponds to the sum of the start and end logits of the span. **relevance_score**: ``float`` that corresponds to the score of the each passage to answer the question, compared to all the other passages. It corresponds to the output of the QA classifier of the DPRReader. **doc_id**: ``int``` the id of the passage. **start_index**: ``int`` the start index of the span (inclusive). **end_index**: ``int`` the end index of the span (inclusive). Examples:: >>> from transformers import DPRReader, DPRReaderTokenizer >>> tokenizer = DPRReaderTokenizer.from_pretrained('facebook/dpr-reader-single-nq-base') >>> model = DPRReader.from_pretrained('facebook/dpr-reader-single-nq-base') >>> encoded_inputs = tokenizer( ... questions=["What is love ?"], ... titles=["Haddaway"], ... texts=["'What Is Love' is a song recorded by the artist Haddaway"], ... return_tensors='pt' ... ) >>> outputs = model(**encoded_inputs) >>> predicted_spans = tokenizer.decode_best_spans(encoded_inputs, outputs) >>> print(predicted_spans[0].text) # best span """ input_ids = reader_input["input_ids"] start_logits, end_logits, relevance_logits = reader_output[:3] n_passages = len(relevance_logits) sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__) nbest_spans_predictions: List[DPRReaderOutput] = [] for doc_id in sorted_docs: sequence_ids = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence passage_offset = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: sequence_len = sequence_ids.index(self.pad_token_id) else: sequence_len = len(sequence_ids) best_spans = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=max_answer_length, top_spans=num_spans_per_passage, ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=doc_id, start_index=start_index, end_index=end_index, text=self.decode(sequence_ids[start_index : end_index + 1]), ) ) if len(nbest_spans_predictions) >= num_spans: break return nbest_spans_predictions[:num_spans] def _get_best_spans( self, start_logits: List[int], end_logits: List[int], max_answer_length: int, top_spans: int, ) -> List[DPRSpanPrediction]: """ Finds the best answer span for the extractive Q&A model for one passage. It returns the best span by descending `span_score` order and keeping max `top_spans` spans. Spans longer that `max_answer_length` are ignored. """ scores = [] for (start_index, start_score) in enumerate(start_logits): for (answer_length, end_score) in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) scores = sorted(scores, key=lambda x: x[1], reverse=True) chosen_span_intervals = [] for (start_index, end_index), score in scores: assert start_index <= end_index, "Wrong span indices: [{}:{}]".format(start_index, end_index) length = end_index - start_index + 1 assert length <= max_answer_length, "Span is too long: {} > {}".format(length, max_answer_length) if any( [ start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ] ): continue chosen_span_intervals.append((start_index, end_index)) if len(chosen_span_intervals) == top_spans: break return chosen_span_intervals @add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING) class DPRReaderTokenizerFast(CustomDPRReaderTokenizerMixin, BertTokenizerFast): r""" Constructs a "fast" DPRReader tokenizer (backed by HuggingFace's `tokenizers` library). :class:`~transformers.DPRReaderTokenizerFast` is almost identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end tokenization: punctuation splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts that are combined to be fed to the :class:`~transformers.DPRReader` model. Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = READER_PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = READER_PRETRAINED_INIT_CONFIGURATION model_input_names = ["attention_mask"] slow_tokenizer_class = DPRReaderTokenizer
20,403
51.184143
152
py
SLT-FAI
SLT-FAI-main/transformers/modeling_tf_ctrl.py
# coding=utf-8 # Copyright 2018 Salesforce and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 CTRL model.""" import numpy as np import tensorflow as tf from .configuration_ctrl import CTRLConfig from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable from .modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast from .modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFPreTrainedModel, TFSharedEmbeddings, keras_serializable, shape_list, ) from .tokenization_utils import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "CTRLConfig" _TOKENIZER_FOR_DOC = "CTRLTokenizer" TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = [ "ctrl" # See all CTRL models at https://huggingface.co/models?filter=ctrl ] def angle_defn(pos, i, d_model_size): angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model_size)) return pos * angle_rates def positional_encoding(position, d_model_size): # create the sinusoidal pattern for the positional encoding angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size) sines = np.sin(angle_rads[:, 0::2]) cosines = np.cos(angle_rads[:, 1::2]) # pos_encoding = tf.cast(np.concatenate([sines, cosines], axis=-1)[np.newaxis, ...], dtype=tf.float32) pos_encoding = tf.cast(np.concatenate([sines, cosines], axis=-1), dtype=tf.float32) return pos_encoding def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None): # calculate attention matmul_qk = tf.matmul(q, k, transpose_b=True) dk = tf.cast(shape_list(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) if mask is not None: scaled_attention_logits += mask * -1e4 if attention_mask is not None: # Apply the attention mask scaled_attention_logits = scaled_attention_logits + attention_mask attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # Mask heads if we want to if head_mask is not None: attention_weights = attention_weights * head_mask output = tf.matmul(attention_weights, v) return output, attention_weights class TFMultiHeadAttention(tf.keras.layers.Layer): def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs): super().__init__(**kwargs) self.num_heads = num_heads self.d_model_size = d_model_size self.output_attentions = output_attentions self.depth = int(d_model_size / self.num_heads) self.Wq = tf.keras.layers.Dense(d_model_size, name="Wq") self.Wk = tf.keras.layers.Dense(d_model_size, name="Wk") self.Wv = tf.keras.layers.Dense(d_model_size, name="Wv") self.dense = tf.keras.layers.Dense(d_model_size, name="dense") def split_into_heads(self, x, batch_size): x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v, k, q, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False): batch_size = shape_list(q)[0] q = self.Wq(q) k = self.Wk(k) v = self.Wv(v) q = self.split_into_heads(q, batch_size) k = self.split_into_heads(k, batch_size) v = self.split_into_heads(v, batch_size) if layer_past is not None: past_key, past_value = tf.unstack(layer_past, axis=0) k = tf.concat((past_key, k), axis=-2) v = tf.concat((past_value, v), axis=-2) if use_cache: present = tf.stack((k, v), axis=0) else: present = (None,) output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask) scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3]) attn = output[1] original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size)) output = self.dense(original_size_attention) outputs = (output, present) if output_attentions: outputs = outputs + (attn,) return outputs class TFPointWiseFeedForwardLayer(tf.keras.layers.Layer): def __init__(self, d_model_size, dff, **kwargs): super().__init__(**kwargs) self.dense_0 = tf.keras.layers.Dense(dff, activation="relu", name="0") self.dense_2 = tf.keras.layers.Dense(d_model_size, name="2") def call(self, inputs, trainable=False): dense_0_output = self.dense_0(inputs) dense_2_output = self.dense_2(dense_0_output) return dense_2_output class TFEncoderLayer(tf.keras.layers.Layer): def __init__( self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-6, output_attentions=False, **kwargs ): super().__init__(**kwargs) self.output_attentions = output_attentions self.multi_head_attention = TFMultiHeadAttention( d_model_size, num_heads, output_attentions=self.output_attentions, name="multi_head_attention" ) self.ffn = TFPointWiseFeedForwardLayer(d_model_size, dff, name="ffn") self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm1") self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm2") self.dropout1 = tf.keras.layers.Dropout(rate) self.dropout2 = tf.keras.layers.Dropout(rate) def call(self, x, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False): normed = self.layernorm1(x) attn_outputs = self.multi_head_attention( normed, normed, normed, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=training, ) attn_output = attn_outputs[0] attn_output = self.dropout1(attn_output, training=training) out1 = x + attn_output out2 = self.layernorm2(out1) ffn_output = self.ffn(out2) ffn_output = self.dropout2(ffn_output, training=training) out2 = out1 + ffn_output outputs = (out2,) + attn_outputs[1:] return outputs @keras_serializable class TFCTRLMainLayer(tf.keras.layers.Layer): config_class = CTRLConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.use_cache = config.use_cache self.return_dict = config.use_return_dict self.d_model_size = config.n_embd self.num_layers = config.n_layer self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size) self.w = TFSharedEmbeddings( config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="w" ) self.dropout = tf.keras.layers.Dropout(config.embd_pdrop) self.h = [ TFEncoderLayer( config.n_embd, config.n_head, config.dff, config.resid_pdrop, config.layer_norm_epsilon, self.output_attentions, name="h_._{}".format(i), ) for i in range(config.n_layer) ] self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="layernorm") def get_input_embeddings(self): return self.w def set_input_embeddings(self, value): self.w.weight = value self.w.vocab_size = value.shape[0] def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError def call( self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] past = inputs[1] if len(inputs) > 1 else past attention_mask = inputs[2] if len(inputs) > 2 else attention_mask token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids position_ids = inputs[4] if len(inputs) > 4 else position_ids head_mask = inputs[5] if len(inputs) > 5 else head_mask inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds use_cache = inputs[7] if len(inputs) > 7 else use_cache output_attentions = inputs[8] if len(inputs) > 8 else output_attentions output_hidden_states = inputs[9] if len(inputs) > 9 else output_hidden_states return_dict = inputs[10] if len(inputs) > 10 else return_dict assert len(inputs) <= 11, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") past = inputs.get("past", past) attention_mask = inputs.get("attention_mask", attention_mask) token_type_ids = inputs.get("token_type_ids", token_type_ids) position_ids = inputs.get("position_ids", position_ids) head_mask = inputs.get("head_mask", head_mask) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) use_cache = inputs.get("use_cache", use_cache) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) assert len(inputs) <= 11, "Too many inputs." else: input_ids = inputs output_attentions = output_attentions if output_attentions is not None else self.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states use_cache = use_cache if use_cache is not None else self.use_cache return_dict = return_dict if return_dict is not None else self.return_dict # If using past key value states, only the last tokens # should be given as an input if past is not None: if input_ids is not None: input_ids = input_ids[:, -1:] if inputs_embeds is not None: inputs_embeds = inputs_embeds[:, -1:] if token_type_ids is not None: token_type_ids = token_type_ids[:, -1:] if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if past is None: past_length = 0 past = [None] * len(self.h) else: past_length = shape_list(past[0][0])[-2] if position_ids is None: position_ids = tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32)[tf.newaxis, :] position_ids = tf.tile(position_ids, [input_shape[0], 1]) # Attention mask. if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = tf.cast(attention_mask, tf.float32) attention_mask = (1.0 - attention_mask) * -10000.0 else: attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_layers if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.w(token_type_ids, mode="embedding") token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, tf.float32)) else: token_type_embeds = 0 position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: inputs_embeds = self.w(input_ids, mode="embedding") seq_len = input_shape[-1] mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0) inputs_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, tf.float32)) pos_embeds = tf.gather(self.pos_encoding, position_ids) hidden_states = inputs_embeds + pos_embeds + token_type_embeds hidden_states = self.dropout(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] presents = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, (h, layer_past) in enumerate(zip(self.h, past)): if output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = h( hidden_states, mask, layer_past, attention_mask, head_mask[i], use_cache, output_attentions, training=training, ) hidden_states, present = outputs[:2] if use_cache: presents = presents + (present,) if output_attentions: all_attentions = all_attentions + (outputs[2],) hidden_states = self.layernorm(hidden_states) hidden_states = tf.reshape(hidden_states, output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) if not return_dict: return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_attentions, ) class TFCTRLPreTrainedModel(TFPreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = CTRLConfig base_model_prefix = "transformer" CTRL_START_DOCSTRING = r""" This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. note:: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all the tensors in the first argument of the model call function: :obj:`model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: :obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Parameters: config (:class:`~transformers.CTRLConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ CTRL_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, input_ids_length)`): :obj:`input_ids_length` = ``sequence_length`` if ``past`` is ``None`` else ``past[0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input sequence tokens in the vocabulary. If :obj:`past` is used, only input IDs that do not have their past calculated should be passed as ``input_ids``. Indices can be obtained using :class:`~transformers.CTRLTokenizer`. See :meth:`transformers.PreTrainedTokenizer.__call__` and :meth:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see :obj:`past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed. attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`__ position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`__ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, ``past`` key value states are returned and can be used to speed up decoding (see ``past``). output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.", CTRL_START_DOCSTRING, ) class TFCTRLModel(TFCTRLPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFCTRLMainLayer(config, name="transformer") @add_start_docstrings_to_callable(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="ctrl", output_type=TFBaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def call(self, inputs, **kwargs): outputs = self.transformer(inputs, **kwargs) return outputs class TFCTRLLMHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def call(self, hidden_states): hidden_states = self.input_embeddings(hidden_states, mode="linear") hidden_states = hidden_states + self.bias return hidden_states @add_start_docstrings( """The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, CTRL_START_DOCSTRING, ) class TFCTRLLMHeadModel(TFCTRLPreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFCTRLMainLayer(config, name="transformer") self.lm_head = TFCTRLLMHead(config, self.transformer.w, name="lm_head") def get_output_embeddings(self): return self.lm_head.input_embeddings def prepare_inputs_for_generation(self, inputs, past, **kwargs): # only last token for inputs_ids if past is defined in kwargs if past: inputs = tf.expand_dims(inputs[:, -1], -1) return {"inputs": inputs, "past": past, "use_cache": kwargs["use_cache"]} @add_start_docstrings_to_callable(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="ctrl", output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the cross entropy classification loss. Indices should be in ``[0, ..., config.vocab_size - 1]``. """ return_dict = return_dict if return_dict is not None else self.transformer.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[11] if len(inputs) > 11 else labels if len(inputs) > 11: inputs = inputs[:11] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) transformer_outputs = self.transformer( inputs, past=past, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # shift labels to the left and cut last logit token logits = logits[:, :-1] labels = labels[:, 1:] loss = self.compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
28,263
40.748892
134
py
SLT-FAI
SLT-FAI-main/transformers/modeling_tf_funnel.py
# coding=utf-8 # Copyright 2020-present Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 Funnel model. """ import warnings from dataclasses import dataclass from typing import Optional, Tuple import tensorflow as tf from .activations_tf import get_tf_activation from .configuration_funnel import FunnelConfig from .file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings, ) from .modeling_tf_outputs import ( TFBaseModelOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from .modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras_serializable, shape_list, ) from .tokenization_utils import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "FunnelConfig" _TOKENIZER_FOR_DOC = "FunnelTokenizer" TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = [ "funnel-transformer/small", # B4-4-4H768 "funnel-transformer/small-base", # B4-4-4H768, no decoder "funnel-transformer/medium", # B6-3x2-3x2H768 "funnel-transformer/medium-base", # B6-3x2-3x2H768, no decoder "funnel-transformer/intermediate", # B6-6-6H768 "funnel-transformer/intermediate-base", # B6-6-6H768, no decoder "funnel-transformer/large", # B8-8-8H1024 "funnel-transformer/large-base", # B8-8-8H1024, no decoder "funnel-transformer/xlarge-base", # B10-10-10H1024 "funnel-transformer/xlarge", # B10-10-10H1024, no decoder ] INF = 1e6 class TFFunnelEmbeddings(tf.keras.layers.Layer): """Construct the embeddings from word embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.hidden_size = config.hidden_size self.initializer_range = config.initializer_range self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout) def build(self, input_shape): """Build shared word embedding layer """ with tf.name_scope("word_embeddings"): # Create and initialize weights. The random normal initializer was chosen # arbitrarily, and works well. self.word_embeddings = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) super().build(input_shape) def call( self, input_ids=None, inputs_embeds=None, mode="embedding", training=False, ): """Get token embeddings of inputs. Args: inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids) mode: string, a valid value is one of "embedding" and "linear". Returns: outputs: (1) If mode == "embedding", output embedding tensor, float32 with shape [batch_size, length, embedding_size]; (2) mode == "linear", output linear tensor, float32 with shape [batch_size, length, vocab_size]. Raises: ValueError: if mode is not valid. Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ if mode == "embedding": return self._embedding(input_ids, inputs_embeds, training=training) elif mode == "linear": return self._linear(input_ids) else: raise ValueError("mode {} is not valid.".format(mode)) def _embedding(self, input_ids, inputs_embeds, training=False): """Applies embedding based on inputs tensor.""" assert not (input_ids is None and inputs_embeds is None) if inputs_embeds is None: inputs_embeds = tf.gather(self.word_embeddings, input_ids) embeddings = self.layer_norm(inputs_embeds) embeddings = self.dropout(embeddings, training=training) return embeddings def _linear(self, inputs): """Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [batch_size, length, hidden_size] Returns: float32 tensor with shape [batch_size, length, vocab_size]. """ batch_size = shape_list(inputs)[0] length = shape_list(inputs)[1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.word_embeddings, transpose_b=True) return tf.reshape(logits, [batch_size, length, self.vocab_size]) class TFFunnelAttentionStructure: """ Contains helpers for `TFFunnelRelMultiheadAttention `. """ cls_token_type_id: int = 2 def __init__(self, config): self.d_model = config.d_model self.attention_type = config.attention_type self.num_blocks = config.num_blocks self.separate_cls = config.separate_cls self.truncate_seq = config.truncate_seq self.pool_q_only = config.pool_q_only self.pooling_type = config.pooling_type self.sin_dropout = tf.keras.layers.Dropout(config.hidden_dropout) self.cos_dropout = tf.keras.layers.Dropout(config.hidden_dropout) # Track where we are at in terms of pooling from the original input, e.g., by how much the sequence length was # dividide. self.pooling_mult = None def init_attention_inputs(self, inputs_embeds, attention_mask=None, token_type_ids=None, training=False): """ Returns the attention inputs associated to the inputs of the model. """ # inputs_embeds has shape batch_size x seq_len x d_model # attention_mask and token_type_ids have shape batch_size x seq_len self.pooling_mult = 1 self.seq_len = seq_len = inputs_embeds.shape[1] position_embeds = self.get_position_embeds(seq_len, dtype=inputs_embeds.dtype, training=training) token_type_mat = self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None cls_mask = ( tf.pad(tf.ones([seq_len - 1, seq_len - 1], dtype=inputs_embeds.dtype), [[1, 0], [1, 0]]) if self.separate_cls else None ) return (position_embeds, token_type_mat, attention_mask, cls_mask) def token_type_ids_to_mat(self, token_type_ids): """Convert `token_type_ids` to `token_type_mat`.""" token_type_mat = tf.equal(tf.expand_dims(token_type_ids, -1), tf.expand_dims(token_type_ids, -2)) # Treat <cls> as in the same segment as both A & B cls_ids = tf.equal(token_type_ids, tf.constant([self.cls_token_type_id], dtype=token_type_ids.dtype)) cls_mat = tf.logical_or(tf.expand_dims(cls_ids, -1), tf.expand_dims(cls_ids, -2)) return tf.logical_or(cls_mat, token_type_mat) def get_position_embeds(self, seq_len, dtype=tf.float32, training=False): """ Create and cache inputs related to relative position encoding. Those are very different depending on whether we are using the factorized or the relative shift attention: For the factorized attention, it returns the matrices (phi, pi, psi, omega) used in the paper, appendix A.2.2, final formula. For the relative shif attention, it returns all possible vectors R used in the paper, appendix A.2.1, final formula. Paper link: https://arxiv.org/abs/2006.03236 """ if self.attention_type == "factorized": # Notations from the paper, appending A.2.2, final formula. # We need to create and return the matrics phi, psi, pi and omega. pos_seq = tf.range(0, seq_len, 1.0, dtype=dtype) freq_seq = tf.range(0, self.d_model // 2, 1.0, dtype=dtype) inv_freq = 1 / (10000 ** (freq_seq / (self.d_model // 2))) sinusoid = tf.einsum("i,d->id", pos_seq, inv_freq) sin_embed = tf.sin(sinusoid) sin_embed_d = self.sin_dropout(sin_embed, training=training) cos_embed = tf.cos(sinusoid) cos_embed_d = self.cos_dropout(cos_embed, training=training) # This is different from the formula on the paper... phi = tf.concat([sin_embed_d, sin_embed_d], axis=-1) psi = tf.concat([cos_embed, sin_embed], axis=-1) pi = tf.concat([cos_embed_d, cos_embed_d], axis=-1) omega = tf.concat([-sin_embed, cos_embed], axis=-1) return (phi, pi, psi, omega) else: # Notations from the paper, appending A.2.1, final formula. # We need to create and return all the possible vectors R for all blocks and shifts. freq_seq = tf.range(0, self.d_model // 2, 1.0, dtype=dtype) inv_freq = 1 / (10000 ** (freq_seq / (self.d_model // 2))) # Maximum relative positions for the first input rel_pos_id = tf.range(-seq_len * 2, seq_len * 2, 1.0, dtype=dtype) zero_offset = seq_len * 2 sinusoid = tf.einsum("i,d->id", rel_pos_id, inv_freq) sin_embed = self.sin_dropout(tf.sin(sinusoid), training=training) cos_embed = self.cos_dropout(tf.cos(sinusoid), training=training) pos_embed = tf.concat([sin_embed, cos_embed], axis=-1) pos = tf.range(0, seq_len, dtype=dtype) pooled_pos = pos position_embeds_list = [] for block_index in range(0, self.num_blocks): # For each block with block_index > 0, we need two types position embeddings: # - Attention(pooled-q, unpooled-kv) # - Attention(pooled-q, pooled-kv) # For block_index = 0 we only need the second one and leave the first one as None. # First type if block_index == 0: position_embeds_pooling = None else: pooled_pos = self.stride_pool_pos(pos, block_index) # construct rel_pos_id stride = 2 ** (block_index - 1) rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2) # rel_pos = tf.expand_dims(rel_pos,1) + zero_offset # rel_pos = tf.broadcast_to(rel_pos, (rel_pos.shape[0], self.d_model)) rel_pos = rel_pos + zero_offset position_embeds_pooling = tf.gather(pos_embed, rel_pos, axis=0) # Second type pos = pooled_pos stride = 2 ** block_index rel_pos = self.relative_pos(pos, stride) # rel_pos = tf.expand_dims(rel_pos,1) + zero_offset # rel_pos = tf.broadcast_to(rel_pos, (rel_pos.shape[0], self.d_model)) rel_pos = rel_pos + zero_offset position_embeds_no_pooling = tf.gather(pos_embed, rel_pos, axis=0) position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling]) return position_embeds_list def stride_pool_pos(self, pos_id, block_index): """ Pool `pos_id` while keeping the cls token separate (if `self.separate_cls=True`). """ if self.separate_cls: # Under separate <cls>, we treat the <cls> as the first token in # the previous block of the 1st real block. Since the 1st real # block always has position 1, the position of the previous block # will be at `1 - 2 ** block_index`. cls_pos = tf.constant([-(2 ** block_index) + 1], dtype=pos_id.dtype) pooled_pos_id = pos_id[1:-1] if self.truncate_seq else pos_id[1:] return tf.concat([cls_pos, pooled_pos_id[::2]], 0) else: return pos_id[::2] def relative_pos(self, pos, stride, pooled_pos=None, shift=1): """ Build the relative positional vector between `pos` and `pooled_pos`. """ if pooled_pos is None: pooled_pos = pos ref_point = pooled_pos[0] - pos[0] num_remove = shift * len(pooled_pos) max_dist = ref_point + num_remove * stride min_dist = pooled_pos[0] - pos[-1] return tf.range(max_dist, min_dist - 1, -stride, dtype=tf.int64) def stride_pool(self, tensor, axis): """ Perform pooling by stride slicing the tensor along the given axis. """ if tensor is None: return None # Do the stride pool recursively if axis is a list or a tuple of ints. if isinstance(axis, (list, tuple)): for ax in axis: tensor = self.stride_pool(tensor, ax) return tensor # Do the stride pool recursively if tensor is a list or tuple of tensors. if isinstance(tensor, (tuple, list)): return type(tensor)(self.stride_pool(x, axis) for x in tensor) # Deal with negative axis axis %= tensor.shape.ndims axis_slice = slice(None, -1, 2) if self.separate_cls and self.truncate_seq else slice(None, None, 2) enc_slice = [slice(None)] * axis + [axis_slice] if self.separate_cls: cls_slice = [slice(None)] * axis + [slice(None, 1)] tensor = tf.concat([tensor[cls_slice], tensor], axis) return tensor[enc_slice] def pool_tensor(self, tensor, mode="mean", stride=2): """Apply 1D pooling to a tensor of size [B x T (x H)].""" if tensor is None: return None # Do the pool recursively if tensor is a list or tuple of tensors. if isinstance(tensor, (tuple, list)): return type(tensor)(self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor) if self.separate_cls: suffix = tensor[:, :-1] if self.truncate_seq else tensor tensor = tf.concat([tensor[:, :1], suffix], axis=1) ndim = tensor.shape.ndims if ndim == 2: tensor = tensor[:, :, None] if mode == "mean": tensor = tf.nn.avg_pool1d(tensor, stride, strides=stride, data_format="NWC", padding="SAME") elif mode == "max": tensor = tf.nn.max_pool1d(tensor, stride, strides=stride, data_format="NWC", padding="SAME") elif mode == "min": tensor = -tf.nn.max_pool1d(-tensor, stride, strides=stride, data_format="NWC", padding="SAME") else: raise NotImplementedError("The supported modes are 'mean', 'max' and 'min'.") return tf.squeeze(tensor, 2) if ndim == 2 else tensor def pre_attention_pooling(self, output, attention_inputs): """ Pool `output` and the proper parts of `attention_inputs` before the attention layer. """ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs if self.pool_q_only: if self.attention_type == "factorized": position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:] token_type_mat = self.stride_pool(token_type_mat, 1) cls_mask = self.stride_pool(cls_mask, 0) output = self.pool_tensor(output, mode=self.pooling_type) else: self.pooling_mult *= 2 if self.attention_type == "factorized": position_embeds = self.stride_pool(position_embeds, 0) token_type_mat = self.stride_pool(token_type_mat, [1, 2]) cls_mask = self.stride_pool(cls_mask, [1, 2]) attention_mask = self.pool_tensor(attention_mask, mode="min") output = self.pool_tensor(output, mode=self.pooling_type) attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask) return output, attention_inputs def post_attention_pooling(self, attention_inputs): """ Pool the proper parts of `attention_inputs` after the attention layer. """ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs if self.pool_q_only: self.pooling_mult *= 2 if self.attention_type == "factorized": position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0) token_type_mat = self.stride_pool(token_type_mat, 2) cls_mask = self.stride_pool(cls_mask, 1) attention_mask = self.pool_tensor(attention_mask, mode="min") attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask) return attention_inputs def _relative_shift_gather(positional_attn, context_len, shift): batch_size, n_head, seq_len, max_rel_len = shape_list(positional_attn) # max_rel_len = 2 * context_len + shift -1 is the numbers of possible relative positions i-j # What's next is the same as doing the following gather in PyTorch, which might be clearer code but less efficient. # idxs = context_len + torch.arange(0, context_len).unsqueeze(0) - torch.arange(0, seq_len).unsqueeze(1) # # matrix of context_len + i-j # return positional_attn.gather(3, idxs.expand([batch_size, n_head, context_len, context_len])) positional_attn = tf.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len]) positional_attn = positional_attn[:, :, shift:, :] positional_attn = tf.reshape(positional_attn, [batch_size, n_head, seq_len, max_rel_len - shift]) positional_attn = positional_attn[..., :context_len] return positional_attn class TFFunnelRelMultiheadAttention(tf.keras.layers.Layer): def __init__(self, config, block_index, **kwargs): super().__init__(**kwargs) self.attention_type = config.attention_type self.n_head = n_head = config.n_head self.d_head = d_head = config.d_head self.d_model = d_model = config.d_model self.initializer_range = config.initializer_range self.block_index = block_index self.hidden_dropout = tf.keras.layers.Dropout(config.hidden_dropout) self.attention_dropout = tf.keras.layers.Dropout(config.attention_dropout) initializer = get_initializer(config.initializer_range) self.q_head = tf.keras.layers.Dense( n_head * d_head, use_bias=False, kernel_initializer=initializer, name="q_head" ) self.k_head = tf.keras.layers.Dense(n_head * d_head, kernel_initializer=initializer, name="k_head") self.v_head = tf.keras.layers.Dense(n_head * d_head, kernel_initializer=initializer, name="v_head") self.post_proj = tf.keras.layers.Dense(d_model, kernel_initializer=initializer, name="post_proj") self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.scale = 1.0 / (d_head ** 0.5) def build(self, input_shape): n_head, d_head, d_model = self.n_head, self.d_head, self.d_model initializer = get_initializer(self.initializer_range) self.r_w_bias = self.add_weight( shape=(n_head, d_head), initializer=initializer, trainable=True, name="r_w_bias" ) self.r_r_bias = self.add_weight( shape=(n_head, d_head), initializer=initializer, trainable=True, name="r_r_bias" ) self.r_kernel = self.add_weight( shape=(d_model, n_head, d_head), initializer=initializer, trainable=True, name="r_kernel" ) self.r_s_bias = self.add_weight( shape=(n_head, d_head), initializer=initializer, trainable=True, name="r_s_bias" ) self.seg_embed = self.add_weight( shape=(2, n_head, d_head), initializer=initializer, trainable=True, name="seg_embed" ) super().build(input_shape) def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None): """ Relative attention score for the positional encodings """ # q_head has shape batch_size x sea_len x n_head x d_head if self.attention_type == "factorized": # Notations from the paper, appending A.2.2, final formula (https://arxiv.org/abs/2006.03236) # phi and pi have shape seq_len x d_model, psi and omega have shape context_len x d_model phi, pi, psi, omega = position_embeds # Shape n_head x d_head u = self.r_r_bias * self.scale # Shape d_model x n_head x d_head w_r = self.r_kernel # Shape batch_size x sea_len x n_head x d_model q_r_attention = tf.einsum("binh,dnh->bind", q_head + u, w_r) q_r_attention_1 = q_r_attention * phi[:, None] q_r_attention_2 = q_r_attention * pi[:, None] # Shape batch_size x n_head x seq_len x context_len positional_attn = tf.einsum("bind,jd->bnij", q_r_attention_1, psi) + tf.einsum( "bind,jd->bnij", q_r_attention_2, omega ) else: shift = 2 if q_head.shape[1] != context_len else 1 # Notations from the paper, appending A.2.1, final formula (https://arxiv.org/abs/2006.03236) # Grab the proper positional encoding, shape max_rel_len x d_model r = position_embeds[self.block_index][shift - 1] # Shape n_head x d_head v = self.r_r_bias * self.scale # Shape d_model x n_head x d_head w_r = self.r_kernel # Shape max_rel_len x n_head x d_model r_head = tf.einsum("td,dnh->tnh", r, w_r) # Shape batch_size x n_head x seq_len x max_rel_len positional_attn = tf.einsum("binh,tnh->bnit", q_head + v, r_head) # Shape batch_size x n_head x seq_len x context_len positional_attn = _relative_shift_gather(positional_attn, context_len, shift) if cls_mask is not None: positional_attn *= cls_mask return positional_attn def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None): """ Relative attention score for the token_type_ids """ if token_type_mat is None: return 0 batch_size, seq_len, context_len = shape_list(token_type_mat) # q_head has shape batch_size x seq_len x n_head x d_head # Shape n_head x d_head r_s_bias = self.r_s_bias * self.scale # Shape batch_size x n_head x seq_len x 2 token_type_bias = tf.einsum("bind,snd->bnis", q_head + r_s_bias, self.seg_embed) # Shape batch_size x n_head x seq_len x context_len new_shape = [batch_size, q_head.shape[2], seq_len, context_len] token_type_mat = tf.broadcast_to(token_type_mat[:, None], new_shape) # Shapes batch_size x n_head x seq_len diff_token_type, same_token_type = tf.split(token_type_bias, 2, axis=-1) # Shape batch_size x n_head x seq_len x context_len token_type_attn = tf.where( token_type_mat, tf.broadcast_to(same_token_type, new_shape), tf.broadcast_to(diff_token_type, new_shape) ) if cls_mask is not None: token_type_attn *= cls_mask return token_type_attn def call(self, query, key, value, attention_inputs, output_attentions=False, training=False): # query has shape batch_size x seq_len x d_model # key and value have shapes batch_size x context_len x d_model position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs batch_size, seq_len, _ = shape_list(query) context_len = key.shape[1] n_head, d_head = self.n_head, self.d_head # Shape batch_size x seq_len x n_head x d_head q_head = tf.reshape(self.q_head(query), [batch_size, seq_len, n_head, d_head]) # Shapes batch_size x context_len x n_head x d_head k_head = tf.reshape(self.k_head(key), [batch_size, context_len, n_head, d_head]) v_head = tf.reshape(self.v_head(value), [batch_size, context_len, n_head, d_head]) q_head = q_head * self.scale # Shape n_head x d_head r_w_bias = self.r_w_bias * self.scale # Shapes batch_size x n_head x seq_len x context_len content_score = tf.einsum("bind,bjnd->bnij", q_head + r_w_bias, k_head) positional_attn = self.relative_positional_attention(position_embeds, q_head, context_len, cls_mask) token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask) # merge attention scores attn_score = content_score + positional_attn + token_type_attn # precision safe in case of mixed precision training dtype = attn_score.dtype if dtype != tf.float32: attn_score = tf.cast(attn_score, tf.float32) # perform masking if attention_mask is not None: attn_score = attn_score - INF * (1 - tf.cast(attention_mask[:, None, None], tf.float32)) # attention probability attn_prob = tf.nn.softmax(attn_score, axis=-1) if dtype != tf.float32: attn_prob = tf.cast(attn_prob, dtype) attn_prob = self.attention_dropout(attn_prob, training=training) # attention output, shape batch_size x seq_len x n_head x d_head attn_vec = tf.einsum("bnij,bjnd->bind", attn_prob, v_head) # Shape shape batch_size x seq_len x d_model attn_out = self.post_proj(tf.reshape(attn_vec, [batch_size, seq_len, n_head * d_head])) attn_out = self.hidden_dropout(attn_out, training=training) output = self.layer_norm(query + attn_out) return (output, attn_prob) if output_attentions else (output,) class TFFunnelPositionwiseFFN(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) initializer = get_initializer(config.initializer_range) self.linear_1 = tf.keras.layers.Dense(config.d_inner, kernel_initializer=initializer, name="linear_1") self.activation_function = get_tf_activation(config.hidden_act) self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.linear_2 = tf.keras.layers.Dense(config.d_model, kernel_initializer=initializer, name="linear_2") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout) self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") def call(self, hidden, training=False): h = self.linear_1(hidden) h = self.activation_function(h) h = self.activation_dropout(h, training=training) h = self.linear_2(h) h = self.dropout(h, training=training) return self.layer_norm(hidden + h) class TFFunnelLayer(tf.keras.layers.Layer): def __init__(self, config, block_index, **kwargs): super().__init__(**kwargs) self.attention = TFFunnelRelMultiheadAttention(config, block_index, name="attention") self.ffn = TFFunnelPositionwiseFFN(config, name="ffn") def call(self, query, key, value, attention_inputs, output_attentions=False, training=False): attn = self.attention( query, key, value, attention_inputs, output_attentions=output_attentions, training=training ) output = self.ffn(attn[0], training=training) return (output, attn[1]) if output_attentions else (output,) class TFFunnelEncoder(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.separate_cls = config.separate_cls self.pool_q_only = config.pool_q_only self.block_repeats = config.block_repeats self.attention_structure = TFFunnelAttentionStructure(config) self.blocks = [ [TFFunnelLayer(config, block_index, name=f"blocks_._{block_index}_._{i}") for i in range(block_size)] for block_index, block_size in enumerate(config.block_sizes) ] def call( self, inputs_embeds, attention_mask=None, token_type_ids=None, output_attentions=False, output_hidden_states=False, return_dict=False, training=False, ): # The pooling is not implemented on long tensors, so we convert this mask. # attention_mask = tf.cast(attention_mask, inputs_embeds.dtype) attention_inputs = self.attention_structure.init_attention_inputs( inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, training=training, ) hidden = inputs_embeds all_hidden_states = (inputs_embeds,) if output_hidden_states else None all_attentions = () if output_attentions else None for block_index, block in enumerate(self.blocks): pooling_flag = shape_list(hidden)[1] > (2 if self.separate_cls else 1) pooling_flag = pooling_flag and block_index > 0 if pooling_flag: pooled_hidden, attention_inputs = self.attention_structure.pre_attention_pooling( hidden, attention_inputs ) for (layer_index, layer) in enumerate(block): for repeat_index in range(self.block_repeats[block_index]): do_pooling = (repeat_index == 0) and (layer_index == 0) and pooling_flag if do_pooling: query = pooled_hidden key = value = hidden if self.pool_q_only else pooled_hidden else: query = key = value = hidden layer_output = layer( query, key, value, attention_inputs, output_attentions=output_attentions, training=training ) hidden = layer_output[0] if do_pooling: attention_inputs = self.attention_structure.post_attention_pooling(attention_inputs) if output_attentions: all_attentions = all_attentions + layer_output[1:] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden,) if not return_dict: return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions) def upsample(x, stride, target_len, separate_cls=True, truncate_seq=False): """Upsample tensor `x` to match `target_len` by repeating the tokens `stride` time on the sequence length dimension.""" if stride == 1: return x if separate_cls: cls = x[:, :1] x = x[:, 1:] output = tf.repeat(x, repeats=stride, axis=1) if separate_cls: if truncate_seq: output = tf.pad(output, [[0, 0], [0, stride - 1], [0, 0]]) output = output[:, : target_len - 1] output = tf.concat([cls, output], axis=1) else: output = output[:, :target_len] return output class TFFunnelDecoder(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.separate_cls = config.separate_cls self.truncate_seq = config.truncate_seq self.stride = 2 ** (len(config.block_sizes) - 1) self.attention_structure = TFFunnelAttentionStructure(config) self.layers = [TFFunnelLayer(config, 0, name=f"layers_._{i}") for i in range(config.num_decoder_layers)] def call( self, final_hidden, first_block_hidden, attention_mask=None, token_type_ids=None, output_attentions=False, output_hidden_states=False, return_dict=False, training=False, ): upsampled_hidden = upsample( final_hidden, stride=self.stride, target_len=first_block_hidden.shape[1], separate_cls=self.separate_cls, truncate_seq=self.truncate_seq, ) hidden = upsampled_hidden + first_block_hidden all_hidden_states = (hidden,) if output_hidden_states else None all_attentions = () if output_attentions else None attention_inputs = self.attention_structure.init_attention_inputs( hidden, attention_mask=attention_mask, token_type_ids=token_type_ids, training=training, ) for layer in self.layers: layer_output = layer( hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions, training=training ) hidden = layer_output[0] if output_attentions: all_attentions = all_attentions + layer_output[1:] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden,) if not return_dict: return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions) @keras_serializable class TFFunnelBaseLayer(tf.keras.layers.Layer): """ Base model without decoder """ config_class = FunnelConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFFunnelEmbeddings(config, name="embeddings") self.encoder = TFFunnelEncoder(config, name="encoder") def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value self.embeddings.vocab_size = value.shape[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models def call( self, inputs, attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds output_attentions = inputs[4] if len(inputs) > 4 else output_attentions output_hidden_states = inputs[5] if len(inputs) > 5 else output_hidden_states return_dict = inputs[6] if len(inputs) > 6 else return_dict assert len(inputs) <= 7, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) token_type_ids = inputs.get("token_type_ids", token_type_ids) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) assert len(inputs) <= 7, "Too many inputs." else: input_ids = inputs output_attentions = output_attentions if output_attentions is not None else self.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states return_dict = return_dict if return_dict is not None else self.return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids, training=training) encoder_outputs = self.encoder( inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return encoder_outputs @keras_serializable class TFFunnelMainLayer(tf.keras.layers.Layer): """ Base model with decoder """ config_class = FunnelConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.block_sizes = config.block_sizes self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFFunnelEmbeddings(config, name="embeddings") self.encoder = TFFunnelEncoder(config, name="encoder") self.decoder = TFFunnelDecoder(config, name="decoder") def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value self.embeddings.vocab_size = value.shape[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models def call( self, inputs, attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds output_attentions = inputs[4] if len(inputs) > 4 else output_attentions output_hidden_states = inputs[5] if len(inputs) > 5 else output_hidden_states return_dict = inputs[6] if len(inputs) > 6 else return_dict assert len(inputs) <= 7, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) token_type_ids = inputs.get("token_type_ids", token_type_ids) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) assert len(inputs) <= 7, "Too many inputs." else: input_ids = inputs output_attentions = output_attentions if output_attentions is not None else self.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states return_dict = return_dict if return_dict is not None else self.return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids, training=training) encoder_outputs = self.encoder( inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict, training=training, ) decoder_outputs = self.decoder( final_hidden=encoder_outputs[0], first_block_hidden=encoder_outputs[1][self.block_sizes[0]], attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: idx = 0 outputs = (decoder_outputs[0],) if output_hidden_states: idx += 1 outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],) if output_attentions: idx += 1 outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],) return outputs return TFBaseModelOutput( last_hidden_state=decoder_outputs[0], hidden_states=(encoder_outputs.hidden_states + decoder_outputs.hidden_states) if output_hidden_states else None, attentions=(encoder_outputs.attentions + decoder_outputs.attentions) if output_attentions else None, ) class TFFunnelDiscriminatorPredictions(tf.keras.layers.Layer): """Prediction module for the discriminator, made up of two dense layers.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) initializer = get_initializer(config.initializer_range) self.dense = tf.keras.layers.Dense(config.d_model, kernel_initializer=initializer, name="dense") self.activation_function = get_tf_activation(config.hidden_act) self.dense_prediction = tf.keras.layers.Dense(1, kernel_initializer=initializer, name="dense_prediction") def call(self, discriminator_hidden_states): hidden_states = self.dense(discriminator_hidden_states) hidden_states = self.activation_function(hidden_states) logits = tf.squeeze(self.dense_prediction(hidden_states)) return logits class TFFunnelMaskedLMHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def call(self, hidden_states, training=False): hidden_states = self.input_embeddings(hidden_states, mode="linear") hidden_states = hidden_states + self.bias return hidden_states class TFFunnelClassificationHead(tf.keras.layers.Layer): def __init__(self, config, n_labels, **kwargs): super().__init__(**kwargs) initializer = get_initializer(config.initializer_range) self.linear_hidden = tf.keras.layers.Dense( config.d_model, kernel_initializer=initializer, name="linear_hidden" ) self.dropout = tf.keras.layers.Dropout(config.hidden_dropout) self.linear_out = tf.keras.layers.Dense(n_labels, kernel_initializer=initializer, name="linear_out") def call(self, hidden, training=False): hidden = self.linear_hidden(hidden) hidden = tf.keras.activations.tanh(hidden) hidden = self.dropout(hidden, training=training) return self.linear_out(hidden) class TFFunnelPreTrainedModel(TFPreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = FunnelConfig base_model_prefix = "funnel" @dataclass class TFFunnelForPreTrainingOutput(ModelOutput): """ Output type of :class:`~transformers.FunnelForPreTrainingModel`. Args: logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): Prediction scores of the head (scores for each token before SoftMax). hidden_states (:obj:`tuple(tf.ensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None FUNNEL_START_DOCSTRING = r""" The Funnel Transformer model was proposed in `Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing <https://arxiv.org/abs/2006.03236>`__ by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le. This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. note:: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all the tensors in the first argument of the model call function: :obj:`model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: :obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Parameters: config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ FUNNEL_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.FunnelTokenizer`. See :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`__ inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( """ The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called decoder) or any task-specific head on top.""", FUNNEL_START_DOCSTRING, ) class TFFunnelBaseModel(TFFunnelPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.funnel = TFFunnelBaseLayer(config, name="funnel") @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small-base", output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def call(self, inputs, **kwargs): return self.funnel(inputs, **kwargs) @add_start_docstrings( "The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.", FUNNEL_START_DOCSTRING, ) class TFFunnelModel(TFFunnelPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.funnel = TFFunnelMainLayer(config, name="funnel") @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small", output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def call(self, inputs, **kwargs): return self.funnel(inputs, **kwargs) @add_start_docstrings( """Funnel model with a binary classification head on top as used during pre-training for identifying generated tokens.""", FUNNEL_START_DOCSTRING, ) class TFFunnelForPreTraining(TFFunnelPreTrainedModel): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.funnel = TFFunnelMainLayer(config, name="funnel") self.discriminator_predictions = TFFunnelDiscriminatorPredictions(config, name="discriminator_predictions") @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFFunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, inputs, attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs ): r""" Returns: Examples:: >>> from transformers import FunnelTokenizer, TFFunnelForPreTraining >>> import torch >>> tokenizer = TFFunnelTokenizer.from_pretrained('funnel-transformer/small') >>> model = TFFunnelForPreTraining.from_pretrained('funnel-transformer/small') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors= "tf") >>> logits = model(inputs).logits """ return_dict = return_dict if return_dict is not None else self.funnel.return_dict if inputs is None and "input_ids" in kwargs and isinstance(kwargs["input_ids"], (dict, BatchEncoding)): warnings.warn( "Using `input_ids` as a dictionary keyword argument is deprecated. Please use `inputs` instead." ) inputs = kwargs["input_ids"] discriminator_hidden_states = self.funnel( inputs, attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) discriminator_sequence_output = discriminator_hidden_states[0] logits = self.discriminator_predictions(discriminator_sequence_output) if not return_dict: return (logits,) + discriminator_hidden_states[1:] return TFFunnelForPreTrainingOutput( logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, ) @add_start_docstrings("""Funnel Model with a `language modeling` head on top. """, FUNNEL_START_DOCSTRING) class TFFunnelForMaskedLM(TFFunnelPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.funnel = TFFunnelMainLayer(config, name="funnel") self.lm_head = TFFunnelMaskedLMHead(config, self.funnel.embeddings, name="lm_head") @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small", output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.funnel.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[7] if len(inputs) > 7 else labels if len(inputs) > 7: inputs = inputs[:7] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) outputs = self.funnel( inputs, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output, training=training) loss = None if labels is None else self.compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Funnel Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, FUNNEL_START_DOCSTRING, ) class TFFunnelForSequenceClassification(TFFunnelPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.funnel = TFFunnelBaseLayer(config, name="funnel") self.classifier = TFFunnelClassificationHead(config, config.num_labels, name="classifier") @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small-base", output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.funnel.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[7] if len(inputs) > 7 else labels if len(inputs) > 7: inputs = inputs[:7] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) outputs = self.funnel( inputs, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) last_hidden_state = outputs[0] pooled_output = last_hidden_state[:, 0] logits = self.classifier(pooled_output, training=training) loss = None if labels is None else self.compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Funnel Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, FUNNEL_START_DOCSTRING, ) class TFFunnelForMultipleChoice(TFFunnelPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.funnel = TFFunnelBaseLayer(config, name="funnel") self.classifier = TFFunnelClassificationHead(config, 1, name="classifier") @property def dummy_inputs(self): """Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)} @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small-base", output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs, attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds output_attentions = inputs[4] if len(inputs) > 4 else output_attentions output_hidden_states = inputs[5] if len(inputs) > 5 else output_hidden_states return_dict = inputs[6] if len(inputs) > 6 else return_dict labels = inputs[7] if len(inputs) > 7 else labels assert len(inputs) <= 8, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) token_type_ids = inputs.get("token_type_ids", token_type_ids) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) labels = inputs.get("labels", labels) assert len(inputs) <= 8, "Too many inputs." else: input_ids = inputs return_dict = return_dict if return_dict is not None else self.funnel.return_dict if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) outputs = self.funnel( flat_input_ids, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) last_hidden_state = outputs[0] pooled_output = last_hidden_state[:, 0] logits = self.classifier(pooled_output, training=training) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Funnel Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, FUNNEL_START_DOCSTRING, ) class TFFunnelForTokenClassification(TFFunnelPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.funnel = TFFunnelMainLayer(config, name="funnel") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout) self.classifier = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small", output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.funnel.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[7] if len(inputs) > 7 else labels if len(inputs) > 7: inputs = inputs[:7] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) outputs = self.funnel( inputs, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Funnel Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, FUNNEL_START_DOCSTRING, ) class TFFunnelForQuestionAnswering(TFFunnelPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.funnel = TFFunnelMainLayer(config, name="funnel") self.qa_outputs = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) @add_start_docstrings_to_callable(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small", output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, start_positions=None, end_positions=None, training=False, ): r""" start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.funnel.return_dict if isinstance(inputs, (tuple, list)): start_positions = inputs[7] if len(inputs) > 7 else start_positions end_positions = inputs[8] if len(inputs) > 8 else end_positions if len(inputs) > 7: inputs = inputs[:7] elif isinstance(inputs, (dict, BatchEncoding)): start_positions = inputs.pop("start_positions", start_positions) end_positions = inputs.pop("end_positions", start_positions) outputs = self.funnel( inputs, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions, "end_position": end_positions} loss = self.compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
73,584
43.010167
159
py
SLT-FAI
SLT-FAI-main/transformers/activations.py
import math import torch import torch.nn.functional as F from .utils import logging logger = logging.get_logger(__name__) def swish(x): return x * torch.sigmoid(x) def _gelu_python(x): """Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in torch.nn.functional Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def gelu_new(x): """Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT). Also see https://arxiv.org/abs/1606.08415 """ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)))) if torch.__version__ < "1.4.0": gelu = _gelu_python else: gelu = F.gelu def gelu_fast(x): return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x))) def mish(x): return x * torch.tanh(torch.nn.functional.softplus(x)) def linear_act(x): return x ACT2FN = { "relu": F.relu, "swish": swish, "gelu": gelu, "tanh": torch.tanh, "gelu_new": gelu_new, "gelu_fast": gelu_fast, "mish": mish, "linear": linear_act, "sigmoid": torch.sigmoid, } def get_activation(activation_string): if activation_string in ACT2FN: return ACT2FN[activation_string] else: raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys())))
1,711
23.811594
115
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_bert_fast.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Tokenization classes for Bert.""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from .tokenization_bert import BertTokenizer from .tokenization_utils_fast import PreTrainedTokenizerFast from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", "bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", "bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", "bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", "bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", "bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", "bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", "bert-base-german-cased": "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt", "bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt", "bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt", "bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt", "bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt", "bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt", "bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-vocab.txt", "bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-vocab.txt", "TurkuNLP/bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/vocab.txt", "TurkuNLP/bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/vocab.txt", "wietsedv/bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/vocab.txt", }, "tokenizer_file": { "bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-tokenizer.json", "bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-tokenizer.json", "bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-tokenizer.json", "bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-tokenizer.json", "bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-tokenizer.json", "bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-tokenizer.json", "bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-tokenizer.json", "bert-base-german-cased": "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-tokenizer.json", "bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-tokenizer.json", "bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-tokenizer.json", "bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-tokenizer.json", "bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-tokenizer.json", "bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-tokenizer.json", "bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-tokenizer.json", "bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-tokenizer.json", "TurkuNLP/bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/tokenizer.json", "TurkuNLP/bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/tokenizer.json", "wietsedv/bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "bert-base-uncased": 512, "bert-large-uncased": 512, "bert-base-cased": 512, "bert-large-cased": 512, "bert-base-multilingual-uncased": 512, "bert-base-multilingual-cased": 512, "bert-base-chinese": 512, "bert-base-german-cased": 512, "bert-large-uncased-whole-word-masking": 512, "bert-large-cased-whole-word-masking": 512, "bert-large-uncased-whole-word-masking-finetuned-squad": 512, "bert-large-cased-whole-word-masking-finetuned-squad": 512, "bert-base-cased-finetuned-mrpc": 512, "bert-base-german-dbmdz-cased": 512, "bert-base-german-dbmdz-uncased": 512, "TurkuNLP/bert-base-finnish-cased-v1": 512, "TurkuNLP/bert-base-finnish-uncased-v1": 512, "wietsedv/bert-base-dutch-cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "bert-base-uncased": {"do_lower_case": True}, "bert-large-uncased": {"do_lower_case": True}, "bert-base-cased": {"do_lower_case": False}, "bert-large-cased": {"do_lower_case": False}, "bert-base-multilingual-uncased": {"do_lower_case": True}, "bert-base-multilingual-cased": {"do_lower_case": False}, "bert-base-chinese": {"do_lower_case": False}, "bert-base-german-cased": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking": {"do_lower_case": True}, "bert-large-cased-whole-word-masking": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True}, "bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False}, "bert-base-cased-finetuned-mrpc": {"do_lower_case": False}, "bert-base-german-dbmdz-cased": {"do_lower_case": False}, "bert-base-german-dbmdz-uncased": {"do_lower_case": True}, "TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False}, "TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True}, "wietsedv/bert-base-dutch-cased": {"do_lower_case": False}, } class BertTokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" BERT tokenizer (backed by HuggingFace's `tokenizers` library). Based on WordPiece. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): File containing the vocabulary. do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to lowercase the input when tokenizing. unk_token (:obj:`str`, `optional`, defaults to :obj:`"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (:obj:`str`, `optional`, defaults to :obj:`"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see `this issue <https://github.com/huggingface/transformers/issues/328>`__). strip_accents: (:obj:`bool`, `optional`): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for :obj:`lowercase` (as in the original BERT). wordpieces_prefix: (:obj:`str`, `optional`, defaults to :obj:`"##"`): The prefix for subwords. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = BertTokenizer def __init__( self, vocab_file, tokenizer_file=None, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( pre_tok_state.get("do_lower_case", do_lower_case) != do_lower_case or pre_tok_state.get("strip_accents", strip_accents) != strip_accents ): pre_tok_class = getattr(normalizers, pre_tok_state.pop("type")) pre_tok_state["do_lower_case"] = do_lower_case pre_tok_state["strip_accents"] = strip_accents self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state) self.do_lower_case = do_lower_case def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
14,966
55.908745
188
py
SLT-FAI
SLT-FAI-main/transformers/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. __version__ = "3.4.0" # Work around to update TensorFlow's absl.logging threshold which alters the # default Python logging output behavior when present. # see: https://github.com/abseil/abseil-py/issues/99 # and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493 try: import absl.logging except ImportError: pass else: absl.logging.set_verbosity("info") absl.logging.set_stderrthreshold("info") absl.logging._warn_preinit_stderr = False # Integrations: this needs to come before other ml imports # in order to allow any 3rd-party code to initialize properly from .integrations import ( # isort:skip is_comet_available, is_optuna_available, is_ray_available, is_tensorboard_available, is_wandb_available, ) # Configurations from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, AutoConfig from .configuration_bart import BartConfig from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig from .configuration_bert_generation import BertGenerationConfig from .configuration_blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig from .configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig from .configuration_distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig from .configuration_dpr import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig from .configuration_encoder_decoder import EncoderDecoderConfig from .configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig from .configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config from .configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig from .configuration_longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .configuration_marian import MarianConfig from .configuration_mbart import MBartConfig from .configuration_mmbt import MMBTConfig from .configuration_mobilebert import MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig from .configuration_pegasus import PegasusConfig from .configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig from .configuration_rag import RagConfig from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig from .configuration_squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .configuration_utils import PretrainedConfig from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig from .configuration_xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig from .configuration_xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig from .data import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, glue_compute_metrics, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_compute_metrics, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, ) # Files and general utilities from .file_utils import ( CONFIG_NAME, MODEL_CARD_NAME, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, add_end_docstrings, add_start_docstrings, cached_path, is_apex_available, is_datasets_available, is_faiss_available, is_flax_available, is_psutil_available, is_py3nvml_available, is_sentencepiece_available, is_sklearn_available, is_tf_available, is_tokenizers_available, is_torch_available, is_torch_tpu_available, ) from .hf_argparser import HfArgumentParser # Model Cards from .modelcard import ModelCard # TF 2.0 <=> PyTorch conversion utilities from .modeling_tf_pytorch_utils import ( convert_tf_weight_name_to_pt_weight_name, load_pytorch_checkpoint_in_tf2_model, load_pytorch_model_in_tf2_model, load_pytorch_weights_in_tf2_model, load_tf2_checkpoint_in_pytorch_model, load_tf2_model_in_pytorch_model, load_tf2_weights_in_pytorch_model, ) # Pipelines from .pipelines import ( Conversation, ConversationalPipeline, CsvPipelineDataFormat, FeatureExtractionPipeline, FillMaskPipeline, JsonPipelineDataFormat, NerPipeline, PipedPipelineDataFormat, Pipeline, PipelineDataFormat, QuestionAnsweringPipeline, SummarizationPipeline, Text2TextGenerationPipeline, TextClassificationPipeline, TextGenerationPipeline, TokenClassificationPipeline, TranslationPipeline, ZeroShotClassificationPipeline, pipeline, ) # Retriever from .retrieval_rag import RagRetriever # Tokenizers from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer from .tokenization_bart import BartTokenizer from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer from .tokenization_bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer from .tokenization_bertweet import BertweetTokenizer from .tokenization_blenderbot import BlenderbotSmallTokenizer, BlenderbotTokenizer from .tokenization_ctrl import CTRLTokenizer from .tokenization_deberta import DebertaTokenizer from .tokenization_distilbert import DistilBertTokenizer from .tokenization_dpr import ( DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderOutput, DPRReaderTokenizer, ) from .tokenization_electra import ElectraTokenizer from .tokenization_flaubert import FlaubertTokenizer from .tokenization_fsmt import FSMTTokenizer from .tokenization_funnel import FunnelTokenizer from .tokenization_gpt2 import GPT2Tokenizer from .tokenization_herbert import HerbertTokenizer from .tokenization_layoutlm import LayoutLMTokenizer from .tokenization_longformer import LongformerTokenizer from .tokenization_lxmert import LxmertTokenizer from .tokenization_mobilebert import MobileBertTokenizer from .tokenization_openai import OpenAIGPTTokenizer from .tokenization_phobert import PhobertTokenizer from .tokenization_prophetnet import ProphetNetTokenizer from .tokenization_rag import RagTokenizer from .tokenization_retribert import RetriBertTokenizer from .tokenization_roberta import RobertaTokenizer from .tokenization_squeezebert import SqueezeBertTokenizer from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer from .tokenization_utils import PreTrainedTokenizer from .tokenization_utils_base import ( AddedToken, BatchEncoding, CharSpan, PreTrainedTokenizerBase, SpecialTokensMixin, TensorType, TokenSpan, ) from .tokenization_xlm import XLMTokenizer if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer from .tokenization_bert_generation import BertGenerationTokenizer from .tokenization_camembert import CamembertTokenizer from .tokenization_marian import MarianTokenizer from .tokenization_mbart import MBartTokenizer from .tokenization_pegasus import PegasusTokenizer from .tokenization_reformer import ReformerTokenizer from .tokenization_t5 import T5Tokenizer from .tokenization_xlm_prophetnet import XLMProphetNetTokenizer from .tokenization_xlm_roberta import XLMRobertaTokenizer from .tokenization_xlnet import XLNetTokenizer else: from .utils.dummy_sentencepiece_objects import * if is_tokenizers_available(): from .tokenization_albert_fast import AlbertTokenizerFast from .tokenization_bart_fast import BartTokenizerFast from .tokenization_bert_fast import BertTokenizerFast from .tokenization_camembert_fast import CamembertTokenizerFast from .tokenization_distilbert_fast import DistilBertTokenizerFast from .tokenization_dpr_fast import ( DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizerFast, DPRReaderTokenizerFast, ) from .tokenization_electra_fast import ElectraTokenizerFast from .tokenization_funnel_fast import FunnelTokenizerFast from .tokenization_gpt2_fast import GPT2TokenizerFast from .tokenization_herbert_fast import HerbertTokenizerFast from .tokenization_layoutlm_fast import LayoutLMTokenizerFast from .tokenization_longformer_fast import LongformerTokenizerFast from .tokenization_lxmert_fast import LxmertTokenizerFast from .tokenization_mbart_fast import MBartTokenizerFast from .tokenization_mobilebert_fast import MobileBertTokenizerFast from .tokenization_openai_fast import OpenAIGPTTokenizerFast from .tokenization_pegasus_fast import PegasusTokenizerFast from .tokenization_reformer_fast import ReformerTokenizerFast from .tokenization_retribert_fast import RetriBertTokenizerFast from .tokenization_roberta_fast import RobertaTokenizerFast from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast from .tokenization_t5_fast import T5TokenizerFast from .tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast from .tokenization_xlnet_fast import XLNetTokenizerFast if is_sentencepiece_available(): from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, convert_slow_tokenizer else: from .utils.dummy_tokenizers_objects import * # Trainer from .trainer_callback import ( DefaultFlowCallback, PrinterCallback, ProgressCallback, TrainerCallback, TrainerControl, TrainerState, ) from .trainer_utils import EvalPrediction, EvaluationStrategy, set_seed from .training_args import TrainingArguments from .training_args_tf import TFTrainingArguments from .utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Modeling if is_torch_available(): # Benchmarks from .benchmark.benchmark import PyTorchBenchmark from .benchmark.benchmark_args import PyTorchBenchmarkArguments from .data.data_collator import ( DataCollator, DataCollatorForLanguageModeling, DataCollatorForNextSentencePrediction, DataCollatorForPermutationLanguageModeling, DataCollatorForSOP, DataCollatorWithPadding, default_data_collator, ) from .data.datasets import ( GlueDataset, GlueDataTrainingArguments, LineByLineTextDataset, LineByLineWithSOPTextDataset, SquadDataset, SquadDataTrainingArguments, TextDataset, TextDatasetForNextSentencePrediction, ) from .generation_utils import top_k_top_p_filtering from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) from .modeling_auto import ( MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForMultipleChoice, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, ) from .modeling_bart import ( BART_PRETRAINED_MODEL_ARCHIVE_LIST, BartForConditionalGeneration, BartForQuestionAnswering, BartForSequenceClassification, BartModel, PretrainedBartModel, ) from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) from .modeling_bert_generation import ( BertGenerationDecoder, BertGenerationEncoder, load_tf_weights_in_bert_generation, ) from .modeling_blenderbot import BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForConditionalGeneration from .modeling_camembert import ( CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, CamembertForQuestionAnswering, CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, ) from .modeling_ctrl import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForSequenceClassification, DebertaModel, DebertaPreTrainedModel, ) from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) from .modeling_dpr import ( DPRContextEncoder, DPRPretrainedContextEncoder, DPRPretrainedQuestionEncoder, DPRPretrainedReader, DPRQuestionEncoder, DPRReader, ) from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) from .modeling_encoder_decoder import EncoderDecoderModel from .modeling_flaubert import ( FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, load_tf_weights_in_funnel, ) from .modeling_gpt2 import ( GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, GPT2DoubleHeadsModel, GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model, GPT2PreTrainedModel, load_tf_weights_in_gpt2, ) from .modeling_layoutlm import ( LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMForMaskedLM, LayoutLMForTokenClassification, LayoutLMModel, ) from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerSelfAttention, ) from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) from .modeling_marian import MarianMTModel from .modeling_mbart import MBartForConditionalGeneration from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) from .modeling_openai import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, OpenAIGPTPreTrainedModel, load_tf_weights_in_openai_gpt, ) from .modeling_pegasus import PegasusForConditionalGeneration from .modeling_prophetnet import ( PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, ProphetNetDecoder, ProphetNetEncoder, ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel, ProphetNetPreTrainedModel, ) from .modeling_rag import RagModel, RagSequenceForGeneration, RagTokenForGeneration from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ) from .modeling_retribert import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, ) from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) from .modeling_t5 import ( T5_PRETRAINED_MODEL_ARCHIVE_LIST, T5ForConditionalGeneration, T5Model, T5PreTrainedModel, load_tf_weights_in_t5, ) from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) from .modeling_utils import Conv1D, PreTrainedModel, apply_chunking_to_forward, prune_layer from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) from .modeling_xlm_prophetnet import ( XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLMProphetNetDecoder, XLMProphetNetEncoder, XLMProphetNetForCausalLM, XLMProphetNetForConditionalGeneration, XLMProphetNetModel, ) from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, ) from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) # Optimization from .optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) # Trainer from .trainer import Trainer from .trainer_pt_utils import torch_distributed_zero_first else: from .utils.dummy_pt_objects import * # TensorFlow if is_tf_available(): from .benchmark.benchmark_args_tf import TensorFlowBenchmarkArguments # Benchmarks from .benchmark.benchmark_tf import TensorFlowBenchmark from .generation_tf_utils import tf_top_k_top_p_filtering from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) from .modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForMultipleChoice, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, ) from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) from .modeling_tf_camembert import ( TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFCamembertForMaskedLM, TFCamembertForMultipleChoice, TFCamembertForQuestionAnswering, TFCamembertForSequenceClassification, TFCamembertForTokenClassification, TFCamembertModel, ) from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) from .modeling_tf_flaubert import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) from .modeling_tf_gpt2 import ( TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, TFGPT2DoubleHeadsModel, TFGPT2LMHeadModel, TFGPT2MainLayer, TFGPT2Model, TFGPT2PreTrainedModel, ) from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForQuestionAnswering, TFLongformerModel, TFLongformerSelfAttention, ) from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) from .modeling_tf_openai import ( TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTMainLayer, TFOpenAIGPTModel, TFOpenAIGPTPreTrainedModel, ) from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) from .modeling_tf_t5 import ( TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST, TFT5ForConditionalGeneration, TFT5Model, TFT5PreTrainedModel, ) from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, shape_list from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, ) from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) # Optimization from .optimization_tf import AdamWeightDecay, GradientAccumulator, WarmUp, create_optimizer # Trainer from .trainer_tf import TFTrainer else: # Import the same objects as dummies to get them in the namespace. # They will raise an import error if the user tries to instantiate / use them. from .utils.dummy_tf_objects import * if is_flax_available(): from .modeling_flax_bert import FlaxBertModel from .modeling_flax_roberta import FlaxRobertaModel else: # Import the same objects as dummies to get them in the namespace. # They will raise an import error if the user tries to instantiate / use them. from .utils.dummy_flax_objects import * if not is_tf_available() and not is_torch_available(): logger.warning( "Neither PyTorch nor TensorFlow >= 2.0 have been found." "Models won't be available and only tokenizers, configuration" "and file/data utilities can be used." )
30,900
35.099299
117
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_albert.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for ALBERT model.""" import os import unicodedata from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from .tokenization_utils import PreTrainedTokenizer from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-spiece.model", "albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v1-spiece.model", "albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v1-spiece.model", "albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v1-spiece.model", "albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-spiece.model", "albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-spiece.model", "albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-spiece.model", "albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-spiece.model", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "albert-base-v1": 512, "albert-large-v1": 512, "albert-xlarge-v1": 512, "albert-xxlarge-v1": 512, "albert-base-v2": 512, "albert-large-v2": 512, "albert-xlarge-v2": 512, "albert-xxlarge-v2": 512, } SPIECE_UNDERLINE = "▁" class AlbertTokenizer(PreTrainedTokenizer): """ Construct an ALBERT tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to lowercase the input when tokenizing. remove_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to strip the text when tokenizing (removing excess spaces before and after the string). keep_accents (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to keep accents when tokenizing. bos_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. .. note:: When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the :obj:`cls_token`. eos_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the :obj:`sep_token`. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. Attributes: sp_model (:obj:`SentencePieceProcessor`): The `SentencePiece` processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, remove_space=True, keep_accents=False, bos_token="[CLS]", eos_token="[SEP]", unk_token="<unk>", sep_token="[SEP]", pad_token="<pad>", cls_token="[CLS]", mask_token="[MASK]", **kwargs ): super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs, ) self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(vocab_file) @property def vocab_size(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def preprocess_text(self, inputs): if self.remove_space: outputs = " ".join(inputs.strip().split()) else: outputs = inputs outputs = outputs.replace("``", '"').replace("''", '"') if not self.keep_accents: outputs = unicodedata.normalize("NFKD", outputs) outputs = "".join([c for c in outputs if not unicodedata.combining(c)]) if self.do_lower_case: outputs = outputs.lower() return outputs def _tokenize(self, text, sample=False): """ Tokenize a string. """ text = self.preprocess_text(text) if not sample: pieces = self.sp_model.EncodeAsPieces(text) else: pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1) new_pieces = [] for piece in pieces: if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit(): cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, "")) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(cur_pieces) else: new_pieces.append(piece) return new_pieces def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ALBERT sequence has the following format: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` method. Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the token list is already formatted with special tokens for the model. Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
13,411
40.9125
119
py
SLT-FAI
SLT-FAI-main/transformers/configuration_electra.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ELECTRA model configuration """ from .configuration_utils import PretrainedConfig from .utils import logging logger = logging.get_logger(__name__) ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/electra-small-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-generator/config.json", "google/electra-base-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-generator/config.json", "google/electra-large-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-generator/config.json", "google/electra-small-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-discriminator/config.json", "google/electra-base-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-discriminator/config.json", "google/electra-large-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-discriminator/config.json", } class ElectraConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.ElectraModel` or a :class:`~transformers.TFElectraModel`. It is used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA `google/electra-small-discriminator <https://huggingface.co/google/electra-small-discriminator>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 30522): Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.ElectraModel` or :class:`~transformers.TFElectraModel`. embedding_size (:obj:`int`, `optional`, defaults to 128): Dimensionality of the encoder layers and the pooler layer. hidden_size (:obj:`int`, `optional`, defaults to 256): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (:obj:`int`, `optional`, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (:obj:`int`, `optional`, defaults to 1024): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"` and :obj:`"gelu_new"` are supported. hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, `optional`, defaults to 2): The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.ElectraModel` or :class:`~transformers.TFElectraModel`. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. summary_type (:obj:`str`, `optional`, defaults to :obj:`"first"`): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Has to be one of the following options: - :obj:`"last"`: Take the last token hidden state (like XLNet). - :obj:`"first"`: Take the first token hidden state (like BERT). - :obj:`"mean"`: Take the mean of all tokens hidden states. - :obj:`"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - :obj:`"attn"`: Not implemented now, use multi-head attention. summary_use_proj (:obj:`bool`, `optional`, defaults to :obj:`True`): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Whether or not to add a projection after the vector extraction. summary_activation (:obj:`str`, `optional`): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Pass :obj:`"gelu"` for a gelu activation to the output, any other value will result in no activation. summary_last_dropout (:obj:`float`, `optional`, defaults to 0.0): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. The dropout ratio to be used after the projection and activation. Examples:: >>> from transformers import ElectraModel, ElectraConfig >>> # Initializing a ELECTRA electra-base-uncased style configuration >>> configuration = ElectraConfig() >>> # Initializing a model from the electra-base-uncased style configuration >>> model = ElectraModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "electra" def __init__( self, vocab_size=30522, embedding_size=128, hidden_size=256, num_hidden_layers=12, num_attention_heads=4, intermediate_size=1024, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, summary_type="first", summary_use_proj=True, summary_activation="gelu", summary_last_dropout=0.1, pad_token_id=0, **kwargs ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_last_dropout = summary_last_dropout
8,495
52.433962
143
py
SLT-FAI
SLT-FAI-main/transformers/configuration_fsmt.py
# coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FSMT configuration """ import copy from .configuration_utils import PretrainedConfig from .utils import logging logger = logging.get_logger(__name__) FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP = {} class DecoderConfig(PretrainedConfig): r""" Configuration class for FSMT's decoder specific things. note: this is a private helper class """ model_type = "fsmt_decoder" def __init__(self, vocab_size=0, bos_token_id=0): super().__init__() self.vocab_size = vocab_size self.bos_token_id = bos_token_id class FSMTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.FSMTModel`. It is used to instantiate a FSMT model according to the specified arguments, defining the model architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: langs (:obj:`List[str]`): A list with source language and target_language (e.g., ['en', 'ru']). src_vocab_size (:obj:`int`): Vocabulary size of the encoder. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed to the forward method in the encoder. tgt_vocab_size (:obj:`int`): Vocabulary size of the decoder. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed to the forward method in the decoder. d_model (:obj:`int`, `optional`, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (:obj:`int`, `optional`, defaults to 12): Number of encoder layers. decoder_layers (:obj:`int`, `optional`, defaults to 12): Number of decoder layers. encoder_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"` and :obj:`"gelu_new"` are supported. dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (:obj:`int`, `optional`, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (:obj:`bool`, `optional`, defaults to :obj:`True`): Scale embeddings by diving by sqrt(d_model). bos_token_id (:obj:`int`, `optional`, defaults to 0) Beginning of stream token id. pad_token_id (:obj:`int`, `optional`, defaults to 1) Padding token id. eos_token_id (:obj:`int`, `optional`, defaults to 2) End of stream token id. decoder_start_token_id (:obj:`int`, `optional`): This model starts decoding with :obj:`eos_token_id` encoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. decoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether this is an encoder/decoder model. tie_word_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to tie input and output embeddings. num_beams (:obj:`int`, `optional`, defaults to 5) Number of beams for beam search that will be used by default in the :obj:`generate` method of the model. 1 means no beam search. length_penalty (:obj:`float`, `optional`, defaults to 1) Exponential penalty to the length that will be used by default in the :obj:`generate` method of the model. early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`) Flag that will be used by default in the :obj:`generate` method of the model. Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not. Examples:: >>> from transformers import FSMTConfig, FSMTModel >>> config = FSMTConfig.from_pretrained('facebook/wmt19-en-ru') >>> model = FSMTModel(config) """ model_type = "fsmt" # update the defaults from config file def __init__( self, langs, src_vocab_size, tgt_vocab_size, activation_function="relu", d_model=1024, max_length=200, max_position_embeddings=1024, encoder_ffn_dim=4096, encoder_layers=12, encoder_attention_heads=16, encoder_layerdrop=0.0, decoder_ffn_dim=4096, decoder_layers=12, decoder_attention_heads=16, decoder_layerdrop=0.0, attention_dropout=0.0, dropout=0.1, activation_dropout=0.0, init_std=0.02, pad_token_id=1, bos_token_id=0, eos_token_id=2, decoder_start_token_id=2, is_encoder_decoder=True, scale_embedding=True, tie_word_embeddings=False, num_beams=5, length_penalty=1.0, early_stopping=False, **common_kwargs ): if "hidden_size" in common_kwargs: raise ValueError("hidden size is called d_model") super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, is_encoder_decoder=is_encoder_decoder, tie_word_embeddings=tie_word_embeddings, **common_kwargs, ) self.langs = langs self.src_vocab_size = src_vocab_size self.tgt_vocab_size = tgt_vocab_size self.d_model = d_model # encoder_embed_dim and decoder_embed_dim self.max_length = max_length self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = self.num_hidden_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std # Normal(0, this parameter) self.activation_function = activation_function self.num_beams = num_beams self.length_penalty = length_penalty self.early_stopping = early_stopping self.decoder = DecoderConfig(vocab_size=tgt_vocab_size, bos_token_id=eos_token_id) self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True # 3 Types of Dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig`. Returns: :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) output["decoder"] = self.decoder.to_dict() output["model_type"] = self.__class__.model_type return output
9,784
43.276018
115
py
SLT-FAI
SLT-FAI-main/transformers/configuration_gpt2.py
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenAI GPT-2 configuration """ from .configuration_utils import PretrainedConfig from .utils import logging logger = logging.get_logger(__name__) GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = { "gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-config.json", "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-config.json", "gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-config.json", "distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-config.json", } class GPT2Config(PretrainedConfig): """ This is the configuration class to store the configuration of a :class:`~transformers.GPT2Model` or a :class:`~transformers.TFGPT2Model`. It is used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT-2 `small <https://huggingface.co/gpt2>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 50257): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.GPT2Model` or :class:`~transformers.TFGPT2Model`. n_positions (:obj:`int`, `optional`, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_ctx (:obj:`int`, `optional`, defaults to 1024): Dimensionality of the causal mask (usually same as n_positions). n_embd (:obj:`int`, `optional`, defaults to 768): Dimensionality of the embeddings and hidden states. n_layer (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (:obj:`int`, `optional`, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. n_inner (:obj:`int`, `optional`, defaults to None): Dimensionality of the inner feed-forward layers. :obj:`None` will set it to 4 times n_embd activation_function (:obj:`str`, `optional`, defaults to :obj:`"gelu"`): Activation function, to be selected in the list :obj:`["relu", "swish", "gelu", "tanh", "gelu_new"]`. resid_pdrop (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (:obj:`int`, `optional`, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (:obj:`float`, `optional`, defaults to 1e-5): The epsilon to use in the layer normalization layers initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. summary_type (:obj:`string`, `optional`, defaults to :obj:`"cls_index"`): Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel` and :class:`~transformers.TFGPT2DoubleHeadsModel`. Has to be one of the following options: - :obj:`"last"`: Take the last token hidden state (like XLNet). - :obj:`"first"`: Take the first token hidden state (like BERT). - :obj:`"mean"`: Take the mean of all tokens hidden states. - :obj:`"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - :obj:`"attn"`: Not implemented now, use multi-head attention. summary_use_proj (:obj:`bool`, `optional`, defaults to :obj:`True`): Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel` and :class:`~transformers.TFGPT2DoubleHeadsModel`. Whether or not to add a projection after the vector extraction. summary_activation (:obj:`str`, `optional`): Argument used when doing sequence summary. Used in for the multiple choice head in :class:`~transformers.GPT2DoubleHeadsModel`. Pass :obj:`"tanh"` for a tanh activation to the output, any other value will result in no activation. summary_proj_to_labels (:obj:`bool`, `optional`, defaults to :obj:`True`): Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel` and :class:`~transformers.TFGPT2DoubleHeadsModel`. Whether the projection outputs should have :obj:`config.num_labels` or :obj:`config.hidden_size` classes. summary_first_dropout (:obj:`float`, `optional`, defaults to 0.1): Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel` and :class:`~transformers.TFGPT2DoubleHeadsModel`. The dropout ratio to be used after the projection and activation. gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass. Example:: >>> from transformers import GPT2Model, GPT2Config >>> # Initializing a GPT2 configuration >>> configuration = GPT2Config() >>> # Initializing a model from the configuration >>> model = GPT2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "gpt2" def __init__( self, vocab_size=50257, n_positions=1024, n_ctx=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function="gelu_new", resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, summary_type="cls_index", summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, bos_token_id=50256, eos_token_id=50256, gradient_checkpointing=False, **kwargs ): super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels self.gradient_checkpointing = gradient_checkpointing self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id @property def max_position_embeddings(self): return self.n_positions @property def hidden_size(self): return self.n_embd @property def num_attention_heads(self): return self.n_head @property def num_hidden_layers(self): return self.n_layer
8,865
45.663158
117
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_bart.py
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional from .tokenization_roberta import RobertaTokenizer from .tokenization_utils_base import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) # vocab and merges same as roberta vocab_url = "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json" merges_url = "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt" _all_bart_models = [ "facebook/bart-base", "facebook/bart-large", "facebook/bart-large-mnli", "facebook/bart-large-cnn", "facebook/bart-large-xsum", "yjernite/bart_eli5", # This is not exhaustive: see https://huggingface.co/models?filter=bart ] class BartTokenizer(RobertaTokenizer): r""" Construct a BART tokenizer. :class:`~transformers.BartTokenizer` is identical to :class:`~transformers.RobertaTokenizer` and adds a new :meth:`~transformers.BartTokenizer.prepare_seq2seq_batch` Refer to superclass :class:`~transformers.RobertaTokenizer` for usage examples and documentation concerning the initialization parameters and other methods. """ # merges and vocab same as Roberta max_model_input_sizes = {m: 1024 for m in _all_bart_models} pretrained_vocab_files_map = { "vocab_file": {m: vocab_url for m in _all_bart_models}, "merges_file": {m: merges_url for m in _all_bart_models}, } def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: str = "None", truncation=True, **kwargs, ) -> BatchEncoding: r""" Prepare a batch that can be passed directly to an instance of :class:`~transformers.BartModel`. Args: src_texts: (:obj:`List[str]`): List of documents to summarize or source language texts. tgt_texts: (:obj:`List[str]`, `optional`): List of summaries or target language texts. max_length (:obj:`int`, `optional`): Controls the maximum length for encoder inputs (documents to summarize or source language texts). If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. max_target_length (:obj:`int`, `optional`): Controls the maximum length of decoder inputs (target language texts or summaries). If left unset or set to :obj:`None`, this will use the max_length value. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs: Additional keyword arguments passed along to :obj:`self.__call__`. Returns: :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: - **input_ids** -- List of token ids to be fed to the encoder. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. - **labels** -- List of token ids for tgt_texts The full set of keys ``[input_ids, attention_mask, decoder_input_ids, decoder_attention_mask]``, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys. """ kwargs.pop("src_lang", None) kwargs.pop("tgt_lang", None) if max_length is None: max_length = self.model_max_length model_inputs: BatchEncoding = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = max_length labels = self( tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs, )["input_ids"] model_inputs["labels"] = labels return model_inputs
7,982
49.847134
155
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_gpt2.py
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" import json import os import warnings from functools import lru_cache from typing import Optional, Tuple import regex as re from .tokenization_utils import AddedToken, PreTrainedTokenizer from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-vocab.json", "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-vocab.json", "gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-vocab.json", "distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-vocab.json", }, "merges_file": { "gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-merges.txt", "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-merges.txt", "gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-merges.txt", "distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-merges.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "gpt2": 1024, "gpt2-medium": 1024, "gpt2-large": 1024, "gpt2-xl": 1024, "distilgpt2": 1024, } @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class GPT2Tokenizer(PreTrainedTokenizer): """ Construct a GPT-2 tokenizer. Based on byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: :: >>> from transformers import GPT2Tokenizer >>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2") >>> tokenizer("Hello world")['input_ids'] [15496, 995] >>> tokenizer(" Hello world")['input_ids'] [18435, 995] You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. .. note:: When used with ``is_split_into_words=True``, this tokenizer will add a space before each word (even the first one). This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): Path to the vocabulary file. merges_file (:obj:`str`): Path to the merges file. errors (:obj:`str`, `optional`, defaults to :obj:`"replace"`): Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode <https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information. unk_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`): The beginning of sequence token. eos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`): The end of sequence token. add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (GPT2 tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["attention_mask"] def __init__( self, vocab_file, merges_file, errors="replace", unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", add_prefix_space=False, **kwargs ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs) with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().split("\n")[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def _tokenize(self, text): """ Tokenize a string. """ bpe_tokens = [] for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) return bpe_tokens def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ text = "".join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( "Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file) ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if "is_pretokenized" in kwargs: warnings.warn( "`is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.", FutureWarning, ) is_split_into_words = kwargs.pop("is_pretokenized") add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if is_split_into_words or add_prefix_space: text = " " + text return (text, kwargs)
12,197
39.795987
126
py
SLT-FAI
SLT-FAI-main/transformers/modeling_tf_bert.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 BERT model. """ from dataclasses import dataclass from typing import Optional, Tuple import tensorflow as tf from .activations_tf import get_tf_activation from .configuration_bert import BertConfig from .file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings, ) from .modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPooling, TFCausalLMOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFNextSentencePredictorOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from .modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFMaskedLanguageModelingLoss, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras_serializable, shape_list, ) from .tokenization_utils import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "BertConfig" _TOKENIZER_FOR_DOC = "BertTokenizer" TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "bert-base-uncased", "bert-large-uncased", "bert-base-cased", "bert-large-cased", "bert-base-multilingual-uncased", "bert-base-multilingual-cased", "bert-base-chinese", "bert-base-german-cased", "bert-large-uncased-whole-word-masking", "bert-large-cased-whole-word-masking", "bert-large-uncased-whole-word-masking-finetuned-squad", "bert-large-cased-whole-word-masking-finetuned-squad", "bert-base-cased-finetuned-mrpc", "cl-tohoku/bert-base-japanese", "cl-tohoku/bert-base-japanese-whole-word-masking", "cl-tohoku/bert-base-japanese-char", "cl-tohoku/bert-base-japanese-char-whole-word-masking", "TurkuNLP/bert-base-finnish-cased-v1", "TurkuNLP/bert-base-finnish-uncased-v1", "wietsedv/bert-base-dutch-cased", # See all BERT models at https://huggingface.co/models?filter=bert ] class TFBertEmbeddings(tf.keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.hidden_size = config.hidden_size self.initializer_range = config.initializer_range self.position_embeddings = tf.keras.layers.Embedding( config.max_position_embeddings, config.hidden_size, embeddings_initializer=get_initializer(self.initializer_range), name="position_embeddings", ) self.token_type_embeddings = tf.keras.layers.Embedding( config.type_vocab_size, config.hidden_size, embeddings_initializer=get_initializer(self.initializer_range), name="token_type_embeddings", ) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def build(self, input_shape): """Build shared word embedding layer """ with tf.name_scope("word_embeddings"): # Create and initialize weights. The random normal initializer was chosen # arbitrarily, and works well. self.word_embeddings = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) super().build(input_shape) def call( self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, mode="embedding", training=False, ): """Get token embeddings of inputs. Args: inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids) mode: string, a valid value is one of "embedding" and "linear". Returns: outputs: (1) If mode == "embedding", output embedding tensor, float32 with shape [batch_size, length, embedding_size]; (2) mode == "linear", output linear tensor, float32 with shape [batch_size, length, vocab_size]. Raises: ValueError: if mode is not valid. Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ if mode == "embedding": return self._embedding(input_ids, position_ids, token_type_ids, inputs_embeds, training=training) elif mode == "linear": return self._linear(input_ids) else: raise ValueError("mode {} is not valid.".format(mode)) def _embedding(self, input_ids, position_ids, token_type_ids, inputs_embeds, training=False): """Applies embedding based on inputs tensor.""" assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: input_shape = shape_list(input_ids) else: input_shape = shape_list(inputs_embeds)[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :] if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) if inputs_embeds is None: inputs_embeds = tf.gather(self.word_embeddings, input_ids) position_embeddings = tf.cast(self.position_embeddings(position_ids), inputs_embeds.dtype) token_type_embeddings = tf.cast(self.token_type_embeddings(token_type_ids), inputs_embeds.dtype) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings, training=training) return embeddings def _linear(self, inputs): """Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [batch_size, length, hidden_size] Returns: float32 tensor with shape [batch_size, length, vocab_size]. """ batch_size = shape_list(inputs)[0] length = shape_list(inputs)[1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.word_embeddings, transpose_b=True) return tf.reshape(logits, [batch_size, length, self.vocab_size]) class TFBertSelfAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x, batch_size): x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul( query_layer, key_layer, transpose_b=True ) # (batch size, num_heads, seq_len_q, seq_len_k) dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = tf.nn.softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape( context_layer, (batch_size, -1, self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class TFBertSelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TFBertAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.self_attention = TFBertSelfAttention(config, name="self") self.dense_output = TFBertSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False): self_outputs = self.self_attention( input_tensor, attention_mask, head_mask, output_attentions, training=training ) attention_output = self.dense_output(self_outputs[0], input_tensor, training=training) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class TFBertIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class TFBertOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TFBertLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.attention = TFBertAttention(config, name="attention") self.intermediate = TFBertIntermediate(config, name="intermediate") self.bert_output = TFBertOutput(config, name="output") def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions, training=training ) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.bert_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs class TFBertEncoder(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.layer = [TFBertLayer(config, name="layer_._{}".format(i)) for i in range(config.num_hidden_layers)] def call( self, hidden_states, attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], output_attentions, training=training ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class TFBertPooler(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) def call(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) return pooled_output class TFBertPredictionHeadTransform(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class TFBertLMPredictionHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size self.transform = TFBertPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def call(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.input_embeddings(hidden_states, mode="linear") hidden_states = hidden_states + self.bias return hidden_states class TFBertMLMHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.predictions = TFBertLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class TFBertNSPHead(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.seq_relationship = tf.keras.layers.Dense( 2, kernel_initializer=get_initializer(config.initializer_range), name="seq_relationship" ) def call(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score @keras_serializable class TFBertMainLayer(tf.keras.layers.Layer): config_class = BertConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.num_hidden_layers = config.num_hidden_layers self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFBertEmbeddings(config, name="embeddings") self.encoder = TFBertEncoder(config, name="encoder") self.pooler = TFBertPooler(config, name="pooler") def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value self.embeddings.vocab_size = value.shape[0] def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError def call( self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds output_attentions = inputs[6] if len(inputs) > 6 else output_attentions output_hidden_states = inputs[7] if len(inputs) > 7 else output_hidden_states return_dict = inputs[8] if len(inputs) > 8 else return_dict assert len(inputs) <= 9, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) token_type_ids = inputs.get("token_type_ids", token_type_ids) position_ids = inputs.get("position_ids", position_ids) head_mask = inputs.get("head_mask", head_mask) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) assert len(inputs) <= 9, "Too many inputs." else: input_ids = inputs output_attentions = output_attentions if output_attentions is not None else self.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states return_dict = return_dict if return_dict is not None else self.return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) embedding_output = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, embedding_output.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) encoder_outputs = self.encoder( embedding_output, extended_attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TFBertPreTrainedModel(TFPreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig base_model_prefix = "bert" @dataclass class TFBertForPreTrainingOutput(ModelOutput): """ Output type of :class:`~transformers.TFBertForPreTrainingModel`. Args: prediction_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ prediction_logits: tf.Tensor = None seq_relationship_logits: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None BERT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. note:: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all the tensors in the first argument of the model call function: :obj:`model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: :obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Args: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`__ position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`__ head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Bert Model transformer outputing raw hidden-states without any specific head on top.", BERT_START_DOCSTRING, ) class TFBertModel(TFBertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.bert = TFBertMainLayer(config, name="bert") @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-cased", output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def call(self, inputs, **kwargs): outputs = self.bert(inputs, **kwargs) return outputs @add_start_docstrings( """Bert Model with two heads on top as done during the pre-training: a `masked language modeling` head and a `next sentence prediction (classification)` head. """, BERT_START_DOCSTRING, ) class TFBertForPreTraining(TFBertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.bert = TFBertMainLayer(config, name="bert") self.nsp = TFBertNSPHead(config, name="nsp___cls") self.mlm = TFBertMLMHead(config, self.bert.embeddings, name="mlm___cls") def get_output_embeddings(self): return self.bert.embeddings @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call(self, inputs, **kwargs): r""" Return: Examples:: >>> import tensorflow as tf >>> from transformers import BertTokenizer, TFBertForPreTraining >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') >>> model = TFBertForPreTraining.from_pretrained('bert-base-uncased') >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1 >>> outputs = model(input_ids) >>> prediction_scores, seq_relationship_scores = outputs[:2] """ return_dict = kwargs.get("return_dict") return_dict = return_dict if return_dict is not None else self.bert.return_dict outputs = self.bert(inputs, **kwargs) sequence_output, pooled_output = outputs[:2] prediction_scores = self.mlm(sequence_output, training=kwargs.get("training", False)) seq_relationship_score = self.nsp(pooled_output) if not return_dict: return (prediction_scores, seq_relationship_score) + outputs[2:] return TFBertForPreTrainingOutput( prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING) class TFBertForMaskedLM(TFBertPreTrainedModel, TFMaskedLanguageModelingLoss): authorized_unexpected_keys = [r"pooler"] authorized_missing_keys = [r"pooler"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if config.is_decoder: logger.warning( "If you want to use `TFBertForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.bert = TFBertMainLayer(config, name="bert") self.mlm = TFBertMLMHead(config, self.bert.embeddings, name="mlm___cls") def get_output_embeddings(self): return self.bert.embeddings @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-cased", output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.bert.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[9] if len(inputs) > 9 else labels if len(inputs) > 9: inputs = inputs[:9] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) outputs = self.bert( inputs, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.mlm(sequence_output, training=training) loss = None if labels is None else self.compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class TFBertLMHeadModel(TFBertPreTrainedModel, TFCausalLanguageModelingLoss): authorized_unexpected_keys = [r"pooler"] authorized_missing_keys = [r"pooler"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if not config.is_decoder: logger.warning("If you want to use `TFBertLMHeadModel` as a standalone, add `is_decoder=True.`") self.bert = TFBertMainLayer(config, name="bert") self.mlm = TFBertMLMHead(config, self.bert.embeddings, name="mlm___cls") def get_output_embeddings(self): return self.bert.embeddings @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-cased", output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the cross entropy classification loss. Indices should be in ``[0, ..., config.vocab_size - 1]``. """ return_dict = return_dict if return_dict is not None else self.bert.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[9] if len(inputs) > 9 else labels if len(inputs) > 9: inputs = inputs[:9] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) outputs = self.bert( inputs, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.mlm(sequence_output, training=training) loss = None if labels is not None: # shift labels to the left and cut last logit token logits = logits[:, :-1] labels = labels[:, 1:] loss = self.compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Bert Model with a `next sentence prediction (classification)` head on top. """, BERT_START_DOCSTRING, ) class TFBertForNextSentencePrediction(TFBertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.bert = TFBertMainLayer(config, name="bert") self.nsp = TFBertNSPHead(config, name="nsp___cls") @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def call(self, inputs, **kwargs): r""" Return: Examples:: >>> import tensorflow as tf >>> from transformers import BertTokenizer, TFBertForNextSentencePrediction >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') >>> model = TFBertForNextSentencePrediction.from_pretrained('bert-base-uncased') >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors='tf') >>> logits = model(encoding['input_ids'], token_type_ids=encoding['token_type_ids'])[0] >>> assert logits[0][0] < logits[0][1] # the next sentence was random """ return_dict = kwargs.get("return_dict") return_dict = return_dict if return_dict is not None else self.bert.return_dict outputs = self.bert(inputs, **kwargs) pooled_output = outputs[1] seq_relationship_score = self.nsp(pooled_output) if not return_dict: return (seq_relationship_score,) + outputs[2:] return TFNextSentencePredictorOutput( logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BERT_START_DOCSTRING, ) class TFBertForSequenceClassification(TFBertPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.bert = TFBertMainLayer(config, name="bert") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-cased", output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.bert.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[9] if len(inputs) > 9 else labels if len(inputs) > 9: inputs = inputs[:9] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) outputs = self.bert( inputs, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) loss = None if labels is None else self.compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, BERT_START_DOCSTRING, ) class TFBertForMultipleChoice(TFBertPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.bert = TFBertMainLayer(config, name="bert") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @property def dummy_inputs(self): """Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)} @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-cased", output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds output_attentions = inputs[6] if len(inputs) > 6 else output_attentions output_hidden_states = inputs[7] if len(inputs) > 7 else output_hidden_states return_dict = inputs[8] if len(inputs) > 8 else return_dict labels = inputs[9] if len(inputs) > 9 else labels assert len(inputs) <= 10, "Too many inputs." elif isinstance(inputs, (dict, BatchEncoding)): input_ids = inputs.get("input_ids") attention_mask = inputs.get("attention_mask", attention_mask) token_type_ids = inputs.get("token_type_ids", token_type_ids) position_ids = inputs.get("position_ids", position_ids) head_mask = inputs.get("head_mask", head_mask) inputs_embeds = inputs.get("inputs_embeds", inputs_embeds) output_attentions = inputs.get("output_attentions", output_attentions) output_hidden_states = inputs.get("output_hidden_states", output_hidden_states) return_dict = inputs.get("return_dict", return_dict) labels = inputs.get("labels", labels) assert len(inputs) <= 10, "Too many inputs." else: input_ids = inputs return_dict = return_dict if return_dict is not None else self.bert.return_dict if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) outputs = self.bert( flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, flat_inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, BERT_START_DOCSTRING, ) class TFBertForTokenClassification(TFBertPreTrainedModel, TFTokenClassificationLoss): authorized_unexpected_keys = [r"pooler"] authorized_missing_keys = [r"pooler"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.bert = TFBertMainLayer(config, name="bert") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-cased", output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.bert.return_dict if isinstance(inputs, (tuple, list)): labels = inputs[9] if len(inputs) > 9 else labels if len(inputs) > 9: inputs = inputs[:9] elif isinstance(inputs, (dict, BatchEncoding)): labels = inputs.pop("labels", labels) outputs = self.bert( inputs, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BERT_START_DOCSTRING, ) class TFBertForQuestionAnswering(TFBertPreTrainedModel, TFQuestionAnsweringLoss): authorized_unexpected_keys = [r"pooler"] authorized_missing_keys = [r"pooler"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.bert = TFBertMainLayer(config, name="bert") self.qa_outputs = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-cased", output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, inputs=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, start_positions=None, end_positions=None, training=False, ): r""" start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.bert.return_dict if isinstance(inputs, (tuple, list)): start_positions = inputs[9] if len(inputs) > 9 else start_positions end_positions = inputs[10] if len(inputs) > 10 else end_positions if len(inputs) > 9: inputs = inputs[:9] elif isinstance(inputs, (dict, BatchEncoding)): start_positions = inputs.pop("start_positions", start_positions) end_positions = inputs.pop("end_positions", start_positions) outputs = self.bert( inputs, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
60,374
40.211604
160
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_bert_japanese.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes.""" import collections import copy import os import unicodedata from typing import Optional from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer, load_vocab from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "cl-tohoku/bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese/vocab.txt", "cl-tohoku/bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking/vocab.txt", "cl-tohoku/bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char/vocab.txt", "cl-tohoku/bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking/vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "cl-tohoku/bert-base-japanese": 512, "cl-tohoku/bert-base-japanese-whole-word-masking": 512, "cl-tohoku/bert-base-japanese-char": 512, "cl-tohoku/bert-base-japanese-char-whole-word-masking": 512, } PRETRAINED_INIT_CONFIGURATION = { "cl-tohoku/bert-base-japanese": { "do_lower_case": False, "word_tokenizer_type": "mecab", "subword_tokenizer_type": "wordpiece", }, "cl-tohoku/bert-base-japanese-whole-word-masking": { "do_lower_case": False, "word_tokenizer_type": "mecab", "subword_tokenizer_type": "wordpiece", }, "cl-tohoku/bert-base-japanese-char": { "do_lower_case": False, "word_tokenizer_type": "mecab", "subword_tokenizer_type": "character", }, "cl-tohoku/bert-base-japanese-char-whole-word-masking": { "do_lower_case": False, "word_tokenizer_type": "mecab", "subword_tokenizer_type": "character", }, } class BertJapaneseTokenizer(BertTokenizer): """BERT tokenizer for Japanese text""" vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=False, do_word_tokenize=True, do_subword_tokenize=True, word_tokenizer_type="basic", subword_tokenizer_type="wordpiece", never_split=None, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", mecab_kwargs=None, **kwargs ): """Constructs a MecabBertTokenizer. Args: **vocab_file**: Path to a one-wordpiece-per-line vocabulary file. **do_lower_case**: (`optional`) boolean (default True) Whether to lower case the input. Only has an effect when do_basic_tokenize=True. **do_word_tokenize**: (`optional`) boolean (default True) Whether to do word tokenization. **do_subword_tokenize**: (`optional`) boolean (default True) Whether to do subword tokenization. **word_tokenizer_type**: (`optional`) string (default "basic") Type of word tokenizer. **subword_tokenizer_type**: (`optional`) string (default "wordpiece") Type of subword tokenizer. **mecab_kwargs**: (`optional`) dict passed to `MecabTokenizer` constructor (default None) """ super(BertTokenizer, self).__init__( unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, do_lower_case=do_lower_case, do_word_tokenize=do_word_tokenize, do_subword_tokenize=do_subword_tokenize, word_tokenizer_type=word_tokenizer_type, subword_tokenizer_type=subword_tokenizer_type, never_split=never_split, mecab_kwargs=mecab_kwargs, **kwargs, ) # ^^ We call the grandparent's init, not the parent's. if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file) ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_word_tokenize = do_word_tokenize self.word_tokenizer_type = word_tokenizer_type self.lower_case = do_lower_case self.never_split = never_split self.mecab_kwargs = copy.deepcopy(mecab_kwargs) if do_word_tokenize: if word_tokenizer_type == "basic": self.word_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=False ) elif word_tokenizer_type == "mecab": self.word_tokenizer = MecabTokenizer( do_lower_case=do_lower_case, never_split=never_split, **(mecab_kwargs or {}) ) else: raise ValueError("Invalid word_tokenizer_type '{}' is specified.".format(word_tokenizer_type)) self.do_subword_tokenize = do_subword_tokenize self.subword_tokenizer_type = subword_tokenizer_type if do_subword_tokenize: if subword_tokenizer_type == "wordpiece": self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) elif subword_tokenizer_type == "character": self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=self.unk_token) else: raise ValueError("Invalid subword_tokenizer_type '{}' is specified.".format(subword_tokenizer_type)) @property def do_lower_case(self): return self.lower_case def __getstate__(self): state = dict(self.__dict__) if self.word_tokenizer_type == "mecab": del state["word_tokenizer"] return state def __setstate__(self, state): self.__dict__ = state if self.word_tokenizer_type == "mecab": self.word_tokenizer = MecabTokenizer( do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.mecab_kwargs or {}) ) def _tokenize(self, text): if self.do_word_tokenize: tokens = self.word_tokenizer.tokenize(text, never_split=self.all_special_tokens) else: tokens = [text] if self.do_subword_tokenize: split_tokens = [sub_token for token in tokens for sub_token in self.subword_tokenizer.tokenize(token)] else: split_tokens = tokens return split_tokens class MecabTokenizer: """Runs basic tokenization with MeCab morphological parser.""" def __init__( self, do_lower_case=False, never_split=None, normalize_text=True, mecab_dic: Optional[str] = "ipadic", mecab_option: Optional[str] = None, ): """Constructs a MecabTokenizer. Args: **do_lower_case**: (`optional`) boolean (default True) Whether to lowercase the input. **never_split**: (`optional`) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) List of tokens not to split. **normalize_text**: (`optional`) boolean (default True) Whether to apply unicode normalization to text before tokenization. **mecab_dic**: (`optional`) string (default "ipadic") Name of dictionary to be used for MeCab initialization. If you are using a system-installed dictionary, set thi option to `None` and modify `mecab_option`. **mecab_option**: (`optional`) string String passed to MeCab constructor. """ self.do_lower_case = do_lower_case self.never_split = never_split if never_split is not None else [] self.normalize_text = normalize_text try: import fugashi except ModuleNotFoundError as error: raise error.__class__( "You need to install fugashi to use MecabTokenizer." "See https://pypi.org/project/fugashi/ for installation." ) mecab_option = mecab_option or "" if mecab_dic is not None: if mecab_dic == "ipadic": try: import ipadic except ModuleNotFoundError as error: raise error.__class__( "The ipadic dictionary is not installed. " "See https://github.com/polm/ipadic-py for installation." ) dic_dir = ipadic.DICDIR elif mecab_dic == "unidic_lite": try: import unidic_lite except ModuleNotFoundError as error: raise error.__class__( "The unidic_lite dictionary is not installed. " "See https://github.com/polm/unidic-lite for installation." ) dic_dir = unidic_lite.DICDIR elif mecab_dic == "unidic": try: import unidic except ModuleNotFoundError as error: raise error.__class__( "The unidic dictionary is not installed. " "See https://github.com/polm/unidic-py for installation." ) dic_dir = unidic.DICDIR if not os.path.isdir(dic_dir): raise RuntimeError( "The unidic dictionary itself is not found." "See https://github.com/polm/unidic-py for installation." ) else: raise ValueError("Invalid mecab_dic is specified.") mecabrc = os.path.join(dic_dir, "mecabrc") mecab_option = '-d "{}" -r "{}" '.format(dic_dir, mecabrc) + mecab_option self.mecab = fugashi.GenericTagger(mecab_option) def tokenize(self, text, never_split=None, **kwargs): """Tokenizes a piece of text.""" if self.normalize_text: text = unicodedata.normalize("NFKC", text) never_split = self.never_split + (never_split if never_split is not None else []) tokens = [] for word in self.mecab(text): token = word.surface if self.do_lower_case and token not in never_split: token = token.lower() tokens.append(token) return tokens class CharacterTokenizer: """Runs Character tokenziation.""" def __init__(self, vocab, unk_token, normalize_text=True): """Constructs a CharacterTokenizer. Args: **vocab**: Vocabulary object. **unk_token**: str A special symbol for out-of-vocabulary token. **normalize_text**: (`optional`) boolean (default True) Whether to apply unicode normalization to text before tokenization. """ self.vocab = vocab self.unk_token = unk_token self.normalize_text = normalize_text def tokenize(self, text): """Tokenizes a piece of text into characters. For example: input = "apple" output = ["a", "p", "p", "l", "e"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of characters. """ if self.normalize_text: text = unicodedata.normalize("NFKC", text) output_tokens = [] for char in text: if char not in self.vocab: output_tokens.append(self.unk_token) continue output_tokens.append(char) return output_tokens
13,274
37.256484
181
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_distilbert_fast.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for DistilBERT.""" from .tokenization_bert_fast import BertTokenizerFast from .tokenization_distilbert import DistilBertTokenizer from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", "distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", "distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", "distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", "distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-vocab.txt", "distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", }, "tokenizer_file": { "distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-tokenizer.json", "distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-tokenizer.json", "distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-tokenizer.json", "distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-tokenizer.json", "distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-tokenizer.json", "distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "distilbert-base-uncased": 512, "distilbert-base-uncased-distilled-squad": 512, "distilbert-base-cased": 512, "distilbert-base-cased-distilled-squad": 512, "distilbert-base-german-cased": 512, "distilbert-base-multilingual-cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class DistilBertTokenizerFast(BertTokenizerFast): r""" Construct a "fast" DistilBERT tokenizer (backed by HuggingFace's `tokenizers` library). :class:`~transformers.DistilBertTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION model_input_names = ["attention_mask"] slow_tokenizer_class = DistilBertTokenizer
4,107
49.097561
144
py
SLT-FAI
SLT-FAI-main/transformers/modeling_prophetnet.py
# coding=utf-8 # Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch ProphetNet model, ported from ProphetNet repo(fairsequery_states version). """ import copy import math from dataclasses import dataclass from typing import Dict, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor, nn from .activations import ACT2FN from .configuration_prophetnet import ProphetNetConfig from .file_utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings from .modeling_outputs import BaseModelOutput from .modeling_utils import PreTrainedModel from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "ProphenetConfig" _TOKENIZER_FOR_DOC = "ProphetNetTokenizer" PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/prophetnet-large-uncased", # See all ProphetNet models at https://huggingface.co/models?filter=prophetnet ] PROPHETNET_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) Original ProphetNet code can be found at <https://github.com/microsoft/ProphetNet> . Checkpoints were converted from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the file ``convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py``. This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and behavior. Parameters: config (:class:`~transformers.ProphetNetConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ PROPHETNET_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.ProphetNetTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Provide for translation and summarization training. By default, the model will create this tensor by shifting the :obj:`input_ids` to the right, following the paper. decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`): Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read :func:`modeling_bart._prepare_decoder_inputs` and modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more information on the default strategy. encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`): Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`: :obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids`` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ PROPHETNET_STANDALONE_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.ProphetNetTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ def softmax(hidden_state, dim, onnx_trace=False): if onnx_trace: return F.softmax(hidden_state.float(), dim=dim) else: return F.softmax(hidden_state, dim=dim, dtype=torch.float32) def ngram_attention_bias(sequence_length, ngram, device, dtype): """ This function computes the bias for the predict stream """ bias = torch.ones((ngram, sequence_length, 2 * sequence_length), device=device, dtype=dtype) * float("-inf") # create bias for stream_idx in range(ngram): for i in range(sequence_length): bias[stream_idx, i, sequence_length + i] = 0 bias[stream_idx, i, : max(i - stream_idx, 0) + 1] = 0 return bias def compute_relative_buckets(num_buckets, max_distance, relative_positions, is_bidirectional=False): """ This function computes individual parts of the relative position buckets. For more detail, see paper. """ inv_relative_positions = -relative_positions rel_positions_bucket = 0 if is_bidirectional: num_buckets = num_buckets // 2 rel_positions_bucket = ( rel_positions_bucket + torch.lt(inv_relative_positions, torch.zeros_like(inv_relative_positions)).int() * num_buckets ) inv_relative_positions = torch.abs(inv_relative_positions) else: inv_relative_positions = torch.max(inv_relative_positions, torch.zeros_like(inv_relative_positions)) max_exact = num_buckets // 2 is_small = torch.lt(inv_relative_positions, max_exact) val_if_large = max_exact + torch.log(inv_relative_positions.float() / max_exact) / math.log( max_distance / max_exact ) * (num_buckets - max_exact) val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1)).int() rel_positions_bucket = rel_positions_bucket + torch.where(is_small, inv_relative_positions.int(), val_if_large) return rel_positions_bucket def compute_all_stream_relative_buckets(num_buckets, max_distance, position_ids): """ This function computes both main and predict relative position buckets. For more detail, see paper. """ # main stream main_stream_relative_positions = position_ids.unsqueeze(1).repeat(1, position_ids.size(-1), 1) main_stream_relative_positions = main_stream_relative_positions - position_ids.unsqueeze(-1) # predicting stream predicting_stream_relative_positions = torch.cat((position_ids - 1, position_ids), dim=-1).unsqueeze(1) predicting_stream_relative_positions = predicting_stream_relative_positions.repeat(1, position_ids.size(-1), 1) predicting_stream_relative_positions = predicting_stream_relative_positions - position_ids.unsqueeze(-1) # get both position buckets main_relative_position_buckets = compute_relative_buckets( num_buckets, max_distance, main_stream_relative_positions, is_bidirectional=False ) predict_relative_position_buckets = compute_relative_buckets( num_buckets, max_distance, predicting_stream_relative_positions, is_bidirectional=False ) return main_relative_position_buckets, predict_relative_position_buckets @dataclass class ProphetNetSeq2SeqLMOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Languaged modeling loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, config.vocab_size)`): Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax). logits_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax). past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`. Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs. decoder_ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)`. Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, encoder_sequence_length, encoder_sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None logits_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Tuple[torch.FloatTensor]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None decoder_cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class ProphetNetSeq2SeqModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`): Sequence of main stream hidden-states at the output of the last layer of the decoder of the model. If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size, 1, hidden_size)` is output. last_hidden_state_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,ngram * decoder_sequence_length, config.vocab_size)`): Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model. past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`. Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs. decoder_ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. decoder_ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the decoder_cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)`. Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, encoder_sequence_length, encoder_sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor last_hidden_state_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Tuple[torch.FloatTensor]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None decoder_cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class ProphetNetDecoderModelOutput(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`): Sequence of main stream hidden-states at the output of the last layer of the decoder of the model. If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size, 1, hidden_size)` is output. last_hidden_state_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model. past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`. Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs. ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)`. Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the """ last_hidden_state: torch.FloatTensor last_hidden_state_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Tuple[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class ProphetNetDecoderLMOutput(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Languaged modeling loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, decoder_sequence_length, config.vocab_size)`): Prediction scores of the main stream language modeling head (scores for each vocabulary token before SoftMax). logits_ngram (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): Prediction scores of the predict stream language modeling head (scores for each vocabulary token before SoftMax). past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``): List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2, batch_size, num_attn_heads, decoder_sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be used (see :obj:`past_key_values` input) to speed up sequential decoding. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, decoder_sequence_length, hidden_size)`. Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs. ngram_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, ngram * decoder_sequence_length, hidden_size)`. Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. ngram_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, decoder_sequence_length, decoder_sequence_length)`. Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the weighted average in the cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_attn_heads, encoder_sequence_length, decoder_sequence_length)`. Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to compute the weighted average in the """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None logits_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Tuple[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None def ProphetNetLayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True): if torch.cuda.is_available(): try: from apex.normalization import FusedProphetNetLayerNorm return FusedProphetNetLayerNorm(normalized_shape, eps, elementwise_affine) except ImportError: pass return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) class ProphetNetPreTrainedModel(PreTrainedModel): config_class = ProphetNetConfig base_model_prefix = "prophetnet" def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert ( decoder_start_token_id is not None ), "self.model.config.decoder_start_token_id has to be defined. In ProphetNet it is usually set to the pad_token_id. See ProphetNet docs for more information" # shift inputs to the right shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values" return shifted_input_ids class ProhpetNetPositionalEmbeddings(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to the forward function. """ def __init__(self, config: ProphetNetConfig): super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id) def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None): assert (position_ids is None) or ( self.padding_idx is None ), "If position_ids is pre-computed then padding_idx should not be set." if position_ids is None: if past_key_values is not None: # position_ids is the same for every token when decoding a single step # Without the int() cast, it doesn't work in some cases when exporting to ONNX prev_num_input_ids = past_key_values[0]["self"]["prev_key_states"].shape[2] num_input_ids = inputs_shape[1] + prev_num_input_ids position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * ( int(self.padding_idx + num_input_ids) ) else: if attention_mask is None: attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device) # retrieve position_ids from input_ids / attention_mask position_ids = ( torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask ).long() + self.padding_idx return super().forward(position_ids), position_ids def _forward(self, position_ids): return super().forward(position_ids) class ProphetNetSelfAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, config: ProphetNetConfig, num_attn_heads: int, ): super().__init__() hidden_size = config.hidden_size self.attention_dropout = config.attention_dropout self.dropout = config.dropout self.num_attn_heads = num_attn_heads self.head_dim = hidden_size // num_attn_heads assert ( self.head_dim * num_attn_heads == hidden_size ), "`config.hidden_size` must be divisible by `config.num_encoder_attention_heads` and `config.num_decoder_attention_heads`" self.key_proj = nn.Linear(hidden_size, hidden_size) self.value_proj = nn.Linear(hidden_size, hidden_size) self.query_proj = nn.Linear(hidden_size, hidden_size) self.out_proj = nn.Linear(hidden_size, hidden_size) def _reshape(self, tensor, first_dim, batch_size): return tensor.reshape(first_dim, batch_size * self.num_attn_heads, self.head_dim).transpose(0, 1) def forward( self, hidden_states, key_value_states: Optional[Tensor] = None, attention_mask: Optional[Tensor] = None, layer_state: Optional[Dict[str, Optional[Tensor]]] = None, ) -> Tuple[Tensor, Optional[Tensor]]: sequence_length, batch_size, hidden_size = hidden_states.size() # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None cache_key = "cross_attention" if is_cross_attention else "self" assert list(hidden_states.size()) == [ sequence_length, batch_size, hidden_size, ], f"Size of hidden states should be {sequence_length, batch_size, hidden_size}, but is {hidden_states.size()}" # previous time steps are cached - no need to recompute key and value if they are static if layer_state is not None: saved_state = layer_state.get(cache_key, None) query_states = self.query_proj(hidden_states) / (self.head_dim ** 0.5) query_states = self._reshape(query_states, sequence_length, batch_size) if not is_cross_attention: # self-attention key_states = self.key_proj(hidden_states) key_states = self._reshape(key_states, -1, batch_size) value_states = self.value_proj(hidden_states) value_states = self._reshape(value_states, -1, batch_size) elif saved_state is None: # cross-attention without layer state key_states = self.key_proj(key_value_states) key_states = self._reshape(key_states, -1, batch_size) value_states = self.value_proj(key_value_states) value_states = self._reshape(value_states, -1, batch_size) else: key_states = saved_state["prev_key_states"].view(batch_size * self.num_attn_heads, -1, self.head_dim) value_states = saved_state["prev_value_states"].view(batch_size * self.num_attn_heads, -1, self.head_dim) # Update cache if is_cross_attention: layer_state[cache_key] = { "prev_key_states": key_states.view(batch_size, self.num_attn_heads, -1, self.head_dim), "prev_value_states": value_states.view(batch_size, self.num_attn_heads, -1, self.head_dim), } key_sequence_length = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) assert attn_weights.size() == ( batch_size * self.num_attn_heads, sequence_length, key_sequence_length, ), f"`attn_weights` should be of size {batch_size * self.num_attn_heads, sequence_length, key_sequence_length}, but is of size {attn_weights.shape}" # This is part of a workaround to get around fork/join parallelism not supporting Optional types. if attention_mask is not None and attention_mask.dim() == 0: attention_mask = None assert attention_mask is None or attention_mask.size() == ( self.num_attn_heads * batch_size, 1, key_sequence_length, ), f"`attention_mask` should be `None` or of shape attention_mask.size() == {batch_size * self.num_attn_heads, 1, key_sequence_length}, but is {attention_mask.shape}" if attention_mask is not None: # don't attend to padding symbols attn_weights = attn_weights + attention_mask attn_weights = F.softmax(attn_weights, dim=-1) attn_probs = F.dropout( attn_weights, p=self.attention_dropout, training=self.training, ) attn_output = torch.bmm(attn_probs, value_states) assert attn_output.size() == ( batch_size * self.num_attn_heads, sequence_length, self.head_dim, ), "`attn_output` should be of shape {batch_size * self.num_attn_heads, sequence_length, self.head_dim}, but is of shape {attn_output.size()}" attn_output = attn_output.transpose(0, 1).contiguous().view(sequence_length, batch_size, hidden_size) attn_output = self.out_proj(attn_output) attn_weights = attn_weights.view(batch_size, self.num_attn_heads, sequence_length, key_sequence_length) attn_output = F.dropout(attn_output, p=self.dropout, training=self.training) return attn_output, attn_weights class ProhpetNetFeedForward(nn.Module): """ This is the residual two feed-forward layer block based on the original Transformer implementation. """ def __init__(self, config: ProphetNetConfig, ffn_dim: int): super().__init__() self.activation_fn = ACT2FN[config.activation_function] self.intermediate = nn.Linear(config.hidden_size, ffn_dim) self.output = nn.Linear(ffn_dim, config.hidden_size) self.activation_dropout = config.activation_dropout self.dropout = config.dropout def forward(self, hidden_states): hidden_states = self.intermediate(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.output(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states class ProphetNetNgramProphetNetSelfAttention(nn.Module): def __init__(self, config: ProphetNetConfig): super().__init__() self.hidden_size = config.hidden_size self.num_buckets = config.num_buckets self.relative_max_distance = config.relative_max_distance self.num_attn_heads = config.num_attention_heads self.dropout = config.dropout self.attention_dropout = config.attention_dropout self.head_dim = config.hidden_size // self.num_attn_heads self.ngram = config.ngram assert ( self.head_dim * self.num_attn_heads == config.hidden_size ), "config.hidden_size must be divisible by num_attn_heads" # key, value, query projection self.key_proj = nn.Linear(config.hidden_size, config.hidden_size) self.value_proj = nn.Linear(config.hidden_size, config.hidden_size) self.query_proj = nn.Linear(config.hidden_size, config.hidden_size) # out projection self.out_proj = nn.Linear(config.hidden_size, config.hidden_size) # rel position embeddings self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads) # for onnx runtime self.onnx_trace = False def _reshape(self, tensor, first_dim, batch_size): return tensor.reshape(first_dim, batch_size * self.num_attn_heads, self.head_dim).transpose(0, 1) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, hidden_states, layer_state=None, attention_mask=None, extended_predict_attention_mask=None, main_relative_position_buckets=None, predict_relative_position_buckets=None, position_ids=None, ): sequence_length, batch_size, hidden_size = hidden_states.size() assert list(hidden_states.size()) == [ sequence_length, batch_size, hidden_size, ], f"`hidden_states` should be of shape {sequence_length, batch_size, hidden_size}, but is of shape {hidden_states.shape}" # key and value of previous time steps are cached saved_state = layer_state.get("self", None) # project query_states = self.query_proj(hidden_states) key_states = self.key_proj(hidden_states) value_states = self.value_proj(hidden_states) # normalize query_states = query_states / (self.head_dim ** 0.5) # reshape query_states = self._reshape(query_states, sequence_length, batch_size) key_states = self._reshape(key_states, -1, batch_size) value_states = self._reshape(value_states, -1, batch_size) # chunk into main stream and predict stream hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=0) query_states_list = query_states.chunk(1 + self.ngram, dim=1) key_states_list = key_states.chunk(1 + self.ngram, dim=1) value_states_list = value_states.chunk(1 + self.ngram, dim=1) main_hidden_states, hidden_states_predict_list = hidden_states_list[0], hidden_states_list[1:] main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:] main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:] main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:] # saved states are stored with shape (batch_size, num_attn_heads, seq_len, head_dim) if saved_state is not None: prev_main_key_states = saved_state["prev_key_states"].view( batch_size * self.num_attn_heads, -1, self.head_dim ) main_key_states = torch.cat((prev_main_key_states, main_key_states), dim=1) prev_main_value_states = saved_state["prev_value_states"].view( batch_size * self.num_attn_heads, -1, self.head_dim ) main_value_states = torch.cat((prev_main_value_states, main_value_states), dim=1) # Update cache layer_state["self"] = { "prev_key_states": main_key_states.view(batch_size, self.num_attn_heads, -1, self.head_dim), "prev_value_states": main_value_states.view(batch_size, self.num_attn_heads, -1, self.head_dim), } # get seq_length of main stream only main_sequence_length = sequence_length // (1 + self.ngram) # MAIN-STREAM # main attn weights main_attn_weights = torch.bmm(main_query_states, main_key_states.transpose(1, 2)) # retrieve relative position embeddings for each layer -> see paper for more details main_relative_pos_embeddings = self.get_main_relative_pos_embeddings( main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets ) main_attn_weights = main_attn_weights + main_relative_pos_embeddings if attention_mask is not None: main_attn_weights = main_attn_weights + attention_mask main_attn_probs = softmax( main_attn_weights, dim=-1, onnx_trace=self.onnx_trace, ).type_as(main_attn_weights) main_attn_probs = F.dropout(main_attn_probs, p=self.attention_dropout, training=self.training) # project to attn_output main_attn_output = torch.bmm(main_attn_probs, main_value_states) main_attn_output = ( main_attn_output.transpose(0, 1).contiguous().view(1, main_sequence_length, batch_size, hidden_size) ) main_attn_output = self.out_proj(main_attn_output) # PREDICT-STREAM # [ngram, B*head, T, c] predict_query_states = torch.cat(predict_query_states_list, 0).view( self.ngram, -1, main_sequence_length, self.head_dim ) # [ngram, B*head, 2*T, c] predict_key_states = torch.cat( [torch.cat([main_key_states, key], 1).unsqueeze(0) for key in predict_key_states_list], 0 ) # [ngram, T, B, C] predict_hidden_states = torch.cat(hidden_states_predict_list, 0).view( self.ngram, main_sequence_length, batch_size, hidden_size ) # [ngram, B*head, 2*T, c] predict_value_states = torch.cat( [torch.cat([main_value_states, v_p], 1).unsqueeze(0) for v_p in predict_value_states_list], 0 ) # [ngram, B*head, T, 2*T] predict_attn_weights = torch.einsum("nbtc,nbsc->nbts", (predict_query_states, predict_key_states)) # [ngram, B*head, T, S] # retrieve relative position embeddings for each layer -> see paper for more details predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings( predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets ) # [ngram, B*head, T, 2*T] predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings if extended_predict_attention_mask is not None: predict_attn_weights = predict_attn_weights + extended_predict_attention_mask predict_attn_probs = softmax( predict_attn_weights, dim=-1, onnx_trace=self.onnx_trace, ).type_as(predict_attn_weights) predict_attn_probs = F.dropout(predict_attn_probs, p=self.attention_dropout, training=self.training) # project to attention output # [ngram, B*head, T, c] predict_attn_output = torch.einsum("nbts,nbsc->nbtc", (predict_attn_probs, predict_value_states)) # [ngram, T, B, C] predict_attn_output = ( predict_attn_output.transpose(1, 2) .contiguous() .view(self.ngram, main_sequence_length, batch_size, hidden_size) ) predict_attn_output = self.out_proj(predict_attn_output) # concat to single attn output # [1+ngram*T, B, C] attn_output = torch.cat([main_attn_output, predict_attn_output], 0).view(-1, batch_size, hidden_size) # reshape into better form for `config.output_attentions` main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, main_sequence_length, -1) predict_attn_probs = predict_attn_probs.view( self.ngram, batch_size, self.num_attn_heads, main_sequence_length, -1 ).transpose(0, 1) attn_output = F.dropout(attn_output, p=self.dropout, training=self.training) return attn_output, main_attn_probs, predict_attn_probs def get_main_relative_pos_embeddings( self, hidden_states, attn_weights, position_ids, main_relative_position_buckets ): # input hidden_states [T,B,C], input attn_weights [T*head,T,S], input position_ids [B,T] or [1,1] if main_relative_position_buckets is None: batch_size, sequence_length = hidden_states.shape[:2] relative_positions = ( torch.arange(1, attn_weights.shape[-1] + 1) .unsqueeze(0) .unsqueeze(0) .repeat(batch_size, sequence_length, 1) .to(position_ids.device) ) relative_positions = relative_positions - position_ids.unsqueeze(0).repeat( batch_size, sequence_length, 1 ) # [B, T, s] main_relative_position_buckets = compute_relative_buckets( self.num_buckets, self.relative_max_distance, relative_positions, False ) hidden_states = hidden_states.transpose(0, 1) # [B,T,C] rel_pos_embeddings = self.relative_pos_embeddings(hidden_states) # [B,T,Buckets*head] rel_pos_embeddings = rel_pos_embeddings.view( rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads) ).permute( 0, 3, 1, 2 ) # [B,T,Buckets,head] rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:2] + (-1,)) # [B*head,T,Buckets] main_relative_position_buckets = ( main_relative_position_buckets.repeat(1, self.num_attn_heads, 1) .view(-1, main_relative_position_buckets.shape[-1]) .long() ) # [B*head*T, T] rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1)) # [B*head*T,Buckets] main_relative_pos_embeddings = torch.gather( rel_pos_embeddings, dim=1, index=main_relative_position_buckets ).view(attn_weights.shape[:2] + (-1,)) return main_relative_pos_embeddings def get_predict_relative_pos_embeddings( self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets ): # input hidden_states [ngram, T,B,C], input attn_weights [ngram, B*head,T,S], input position_ids [B,T] or [1,1], input predict_relative_position_buckets [B,T, 2*T] or None sequence_length, batch_size = hidden_states.shape[1:3] if predict_relative_position_buckets is None: key_sequence_length = attn_weights.shape[-1] assert ( position_ids[0][0] == key_sequence_length - 1 ), "`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)" relative_positions = ( torch.arange(0, key_sequence_length) .unsqueeze(0) .unsqueeze(0) .repeat(batch_size, sequence_length, 1) .to(position_ids.device) ) relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1) predict_relative_position_buckets = compute_relative_buckets( self.num_buckets, self.relative_max_distance, relative_positions, False ) hidden_states = hidden_states.transpose(1, 2) # [ngram, B, T, C] rel_pos_embeddings = self.relative_pos_embeddings(hidden_states).view( hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads) ) # [ngram, B, T, bucket, head] rel_pos_embeddings = rel_pos_embeddings.permute(0, 1, 4, 2, 3).reshape( self.ngram * batch_size * self.num_attn_heads, sequence_length, -1 ) # [ngram*B*head, T, bucket] predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0).repeat( self.ngram, 1, self.num_attn_heads, 1 ) # [ngram, B, head*T, S] rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1)) predict_relative_position_buckets = predict_relative_position_buckets.view( -1, predict_relative_position_buckets.size(-1) ).long() # [ngram*B*head*T, S] predict_relative_pos_embeddings = torch.gather( rel_pos_embeddings, dim=1, index=predict_relative_position_buckets ).view( self.ngram, batch_size * self.num_attn_heads, sequence_length, -1 ) # [ngram, B*head, T, S] return predict_relative_pos_embeddings class ProphetNetEncoderLayer(nn.Module): """ Encoder block for Prophetnet """ def __init__(self, config: ProphetNetConfig): super().__init__() # 1st residual block self.self_attn = ProphetNetSelfAttention(config, config.num_encoder_attention_heads) self.self_attn_layer_norm = ProphetNetLayerNorm(config.hidden_size) # 2nd residual block self.feed_forward = ProhpetNetFeedForward(config, config.encoder_ffn_dim) self.feed_forward_layer_norm = ProphetNetLayerNorm(config.hidden_size) def forward(self, hidden_states, attention_mask): # 1st residual block attention_output, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, ) hidden_states = self.self_attn_layer_norm(attention_output + hidden_states) # 2nd residual block feed_forward_output = self.feed_forward(hidden_states) hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states) return hidden_states, attn_weights class ProphetNetDecoderLayer(nn.Module): """ Decoder block for Prophetnet """ def __init__(self, config: ProphetNetConfig): super().__init__() # 1st residual block self.self_attn = ProphetNetNgramProphetNetSelfAttention(config) self.self_attn_layer_norm = ProphetNetLayerNorm(config.hidden_size) # 2nd residual block if config.add_cross_attention: self.cross_attn = ProphetNetSelfAttention(config, config.num_decoder_attention_heads) self.cross_attn_layer_norm = ProphetNetLayerNorm(config.hidden_size) # 3rd residual block self.feed_forward = ProhpetNetFeedForward(config, config.decoder_ffn_dim) self.feed_forward_layer_norm = ProphetNetLayerNorm(config.hidden_size) def forward( self, hidden_states, encoder_hidden_states=None, encoder_attn_mask=None, layer_state=None, attention_mask=None, extended_predict_attention_mask=None, main_relative_position_buckets=None, predict_relative_position_buckets=None, position_ids=None, ): layer_state = layer_state if layer_state is not None else {} # 1st residual block ngram_attention_output, self_attn_weights, self_attn_weights_ngram = self.self_attn( hidden_states=hidden_states, layer_state=layer_state, attention_mask=attention_mask, extended_predict_attention_mask=extended_predict_attention_mask, main_relative_position_buckets=main_relative_position_buckets, predict_relative_position_buckets=predict_relative_position_buckets, position_ids=position_ids, ) hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output) cross_attn_weights = None if encoder_hidden_states is not None: # 2nd residual block attention_output, cross_attn_weights = self.cross_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attn_mask, layer_state=layer_state, # mutates layer state ) hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states) # 3rd residual block feed_forward_output = self.feed_forward(hidden_states) hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states) return ( hidden_states, self_attn_weights, self_attn_weights_ngram, cross_attn_weights, layer_state, ) # just self_attn weights for now, following t5, layer_state = cache for decoding @add_start_docstrings( "The standalone encoder part of the ProphetNetModel.", PROPHETNET_START_DOCSTRING, ) class ProphetNetEncoder(ProphetNetPreTrainedModel): r""" word_embeddings (:obj:`torch.nn.Embeddings` of shape :obj:`(config.vocab_size, config.hidden_size)`, `optional`): The word embedding parameters. This can be used to initialize :class:`~transformers.ProphetNetEncoder` with pre-defined word embeddings instead of randomely initialized word embeddings. """ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = None): super().__init__(config) self.word_embeddings = ( word_embeddings if word_embeddings is not None else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) ) self.position_embeddings = ProhpetNetPositionalEmbeddings(config) self.embeddings_layer_norm = ProphetNetLayerNorm(config.hidden_size) self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)]) self.init_weights() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, value): self.word_embeddings = value @add_start_docstrings_to_callable(PROPHETNET_STANDALONE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Example:: >>> from transformers import ProphetNetTokenizer, ProphetNetEncoder >>> import torch >>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') >>> model = ProphetNetEncoder.from_pretrained('patrickvonplaten/prophetnet-large-uncased-standalone', return_dict=True) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError("Either input_ids or inputs_embeds has to be passed.") elif input_ids is not None and inputs_embeds is not None: raise ValueError("Make sure to only pass input_ids or inputs_embeds.") elif input_ids is not None and inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # prepare attention mask if attention_mask is not None: extended_attention_mask = ( 1.0 - attention_mask[:, None, :].repeat(self.config.num_attention_heads, 1, 1) ) * -10000.0 extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype) else: extended_attention_mask = None position_embeddings, position_ids = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device) hidden_states = inputs_embeds + position_embeddings hidden_states = self.embeddings_layer_norm(hidden_states) hidden_states = F.dropout(hidden_states, p=self.config.dropout, training=self.training) hidden_states = hidden_states.transpose(0, 1) # B x T x C -> T x B x C encoder_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for encoder_layer in self.layers: if output_hidden_states: encoder_hidden_states = encoder_hidden_states + (hidden_states.transpose(0, 1),) hidden_states, attn_probs = encoder_layer(hidden_states, attention_mask=extended_attention_mask) if output_attentions: all_attentions = all_attentions + (attn_probs,) hidden_states = hidden_states.transpose(0, 1) if output_hidden_states: encoder_hidden_states = encoder_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions ) @add_start_docstrings( "The standalone decoder part of the ProphetNetModel.", PROPHETNET_START_DOCSTRING, ) class ProphetNetDecoder(ProphetNetPreTrainedModel): r""" word_embeddings (:obj:`torch.nn.Embeddings` of shape :obj:`(config.vocab_size, config.hidden_size)`, `optional`): The word embedding parameters. This can be used to initialize :class:`~transformers.ProphetNetEncoder` with pre-defined word embeddings instead of randomely initialized word embeddings. """ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = None): super().__init__(config) self.ngram = config.ngram self.num_buckets = config.num_buckets self.relative_max_distance = config.relative_max_distance self.dropout = config.dropout self.max_target_positions = config.max_position_embeddings self.word_embeddings = ( word_embeddings if word_embeddings is not None else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) ) self.position_embeddings = ProhpetNetPositionalEmbeddings(config) self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None) self.layers = nn.ModuleList([ProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)]) self.embeddings_layer_norm = ProphetNetLayerNorm(config.hidden_size) self.init_weights() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, value): self.word_embeddings = value @add_start_docstrings_to_callable(PROPHETNET_STANDALONE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ProphetNetDecoderModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids`` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Returns: Example:: >>> from transformers import ProphetNetTokenizer, ProphetNetDecoder >>> import torch >>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') >>> model = ProphetNetDecoder.from_pretrained('patrickvonplaten/prophetnet-large-uncased-standalone', add_cross_attention=False, return_dict=True) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state """ use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError("Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.") elif input_ids is not None and inputs_embeds is not None: raise ValueError("Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.") elif input_ids is not None and inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) batch_size, sequence_length = inputs_embeds.shape[:2] main_stream_pos_embed, position_ids = self.position_embeddings( (batch_size, sequence_length), device=inputs_embeds.device, past_key_values=past_key_values, ) if past_key_values is not None: main_relative_position_buckets, predict_relative_position_buckets = None, None else: ( main_relative_position_buckets, predict_relative_position_buckets, ) = self.compute_buffered_relative_buckets(position_ids) predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1) # add position embeddings hidden_states = inputs_embeds + main_stream_pos_embed hidden_states = hidden_states.transpose(0, 1) ngram_embeddings = self.ngram_embeddings.weight # prepare attention mask if past_key_values is not None: assert ( hidden_states.size(0) == 1 ), "At the moment `use_cache` is only supported for `decoder_input_ids` of length 1" ngram_hidden_states = [ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).transpose(0, 1).repeat(1, batch_size, 1) for ngram in range(self.ngram) ] extended_attention_mask = None extended_predict_attention_mask = None else: ngram_hidden_states = [ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).transpose(0, 1) for ngram in range(self.ngram) ] extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask) extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask) # prepare encoder attention mask if encoder_attention_mask is not None: extended_encoder_attention_mask = ( 1.0 - encoder_attention_mask[:, None, :].repeat(self.config.num_attention_heads, 1, 1) ) * -10000.0 extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype) else: extended_encoder_attention_mask = None hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 0) if self.embeddings_layer_norm: hidden_states = self.embeddings_layer_norm(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) if encoder_hidden_states is not None: encoder_hidden_states = encoder_hidden_states.transpose(0, 1) # init attentions, hidden_states and cache with empty tuples all_main_stream_hidden_states = () if output_hidden_states else None all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None all_main_stream_attns = () if output_attentions else None all_ngram_stream_attns = () if output_attentions else None all_cross_attns = () if output_attentions and self.config.add_cross_attention else None present_key_values = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_main_stream_hidden_states += (hidden_states[:sequence_length].transpose(0, 1),) if self.config.ngram > 0: all_ngram_stream_hidden_states += (hidden_states[sequence_length:].transpose(0, 1),) layer_state = past_key_values[idx] if past_key_values is not None else None ( hidden_states, layer_self_attn, layer_self_predict_attn_output, layer_cross_attn, layer_past, ) = decoder_layer( hidden_states, encoder_hidden_states=encoder_hidden_states, encoder_attn_mask=extended_encoder_attention_mask, layer_state=layer_state, attention_mask=extended_attention_mask, extended_predict_attention_mask=extended_predict_attention_mask, main_relative_position_buckets=main_relative_position_buckets, predict_relative_position_buckets=predict_relative_position_buckets, position_ids=position_ids, ) if use_cache: present_key_values += (layer_past,) if output_attentions: all_main_stream_attns += (layer_self_attn,) all_ngram_stream_attns += (layer_self_predict_attn_output,) if self.config.add_cross_attention: all_cross_attns += (layer_cross_attn,) if output_hidden_states: all_main_stream_hidden_states += (hidden_states[:sequence_length].transpose(0, 1),) if self.config.ngram > 0: all_ngram_stream_hidden_states += (hidden_states[sequence_length:].transpose(0, 1),) # split last_hidden_state for return last_hidden_state = hidden_states[:sequence_length].transpose(0, 1) last_hidden_state_ngram = hidden_states[sequence_length:].transpose(0, 1) if self.config.ngram > 0 else None encoder_hidden_states = encoder_hidden_states.transpose(0, 1) if encoder_hidden_states is not None else None if not return_dict: return tuple( v for v in [ last_hidden_state, last_hidden_state_ngram, present_key_values, all_main_stream_hidden_states, all_ngram_stream_hidden_states, all_main_stream_attns, all_ngram_stream_attns, all_cross_attns, ] if v is not None ) return ProphetNetDecoderModelOutput( last_hidden_state=last_hidden_state, last_hidden_state_ngram=last_hidden_state_ngram, past_key_values=present_key_values, hidden_states=all_main_stream_hidden_states, hidden_states_ngram=all_ngram_stream_hidden_states, attentions=all_main_stream_attns, ngram_attentions=all_ngram_stream_attns, cross_attentions=all_cross_attns, ) def compute_buffered_relative_buckets(self, position_ids): batch_size, sequence_length = position_ids.shape position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1) main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets( self.num_buckets, self.relative_max_distance, position_ids ) # buffer relative buckets main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1) predict_relative_buckets = torch.cat( [ predict_relative_buckets[:, :sequence_length, :sequence_length], predict_relative_buckets[ :, :sequence_length, self.max_target_positions : self.max_target_positions + sequence_length ], ], 2, ).repeat(batch_size, 1, 1) return main_relative_buckets, predict_relative_buckets def prepare_attention_mask(self, hidden_states, attention_mask): seq_length, batch_size = hidden_states.shape[:2] # get causal mask causal_mask = hidden_states.new(seq_length, seq_length).float().fill_(-float("inf")) causal_mask = torch.triu(causal_mask, 1) extended_causal_mask = causal_mask[:seq_length, :seq_length][None, :, :].expand( (batch_size,) + causal_mask.shape ) # add usual attention mask if attention_mask is not None: extended_attention_mask = (1.0 - attention_mask[:, None, :]) * -10000.0 extended_attention_mask = extended_causal_mask + extended_attention_mask else: extended_attention_mask = extended_causal_mask return extended_attention_mask.repeat(self.config.num_decoder_attention_heads, 1, 1).to(hidden_states.dtype) def prepare_predict_attention_mask(self, hidden_states, attention_mask): seq_length, batch_size = hidden_states.shape[:2] # get causal mask predict_causal_mask = ngram_attention_bias( self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype ) predict_causal_mask = torch.cat( [ predict_causal_mask[:, :seq_length, :seq_length], predict_causal_mask[ :, :seq_length, self.max_target_positions : self.max_target_positions + seq_length ], ], dim=-1, ) extended_predict_causal_mask = predict_causal_mask[:, None, :, :].expand( predict_causal_mask.shape[:1] + (batch_size,) + predict_causal_mask.shape[1:] ) # add usual attention mask if attention_mask is not None: extended_attention_mask = (1.0 - attention_mask[None, :, None, :]) * -10000.0 extended_attention_mask = extended_attention_mask.expand((self.ngram, batch_size, seq_length, seq_length)) # predicted stream attention_mask should always be 0 extended_attention_mask = torch.cat( [extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1 ) extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask else: extended_predict_attention_mask = extended_predict_causal_mask return extended_predict_attention_mask.repeat(1, self.config.num_decoder_attention_heads, 1, 1).to( hidden_states.dtype ) @add_start_docstrings( "The bare ProphetNet Model outputting raw hidden-states without any specific head on top.", PROPHETNET_START_DOCSTRING, ) class ProphetNetModel(ProphetNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) encoder_config = copy.deepcopy(config) encoder_config.is_encoder_decoder = False encoder_config.use_cache = False self.encoder = ProphetNetEncoder(encoder_config, self.word_embeddings) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False self.decoder = ProphetNetDecoder(decoder_config, self.word_embeddings) self.init_weights() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, value): self.word_embeddings = value self.encoder.word_embeddings = self.word_embeddings self.decoder.word_embeddings = self.word_embeddings def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_callable(PROPHETNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ProphetNetSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs: Optional[Tuple] = None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Example:: >>> from transformers import ProphetNetTokenizer, ProphetNetModel >>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') >>> model = ProphetNetModel.from_pretrained('microsoft/prophetnet-large-uncased') >>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids, return_dict=True) >>> last_hidden_states = outputs.last_hidden_state # main stream hidden states >>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states """ use_cache == use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return ProphetNetSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram, decoder_attentions=decoder_outputs.attentions, decoder_ngram_attentions=decoder_outputs.ngram_attentions, decoder_cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks.", PROPHETNET_START_DOCSTRING, ) class ProphetNetForConditionalGeneration(ProphetNetPreTrainedModel): def __init__(self, config: ProphetNetConfig): super().__init__(config) self.prophetnet = ProphetNetModel(config) self.padding_idx = config.pad_token_id self.disable_ngram_loss = config.disable_ngram_loss self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.init_weights() def get_output_embeddings(self): return self.lm_head def get_input_embeddings(self): return self.prophetnet.word_embeddings @add_start_docstrings_to_callable(PROPHETNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ProphetNetSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ..., config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` Returns: Example:: >>> from transformers import ProphetNetTokenizer, ProphetNetForConditionalGeneration >>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') >>> model = ProphetNetForConditionalGeneration.from_pretrained('microsoft/prophetnet-large-uncased') >>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids, return_dict=True) >>> logits_next_token = outputs.logits # logits to predict next token as usual >>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) outputs = self.prophetnet( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) batch_size, sequence_length = ( decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2] ) predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1) predict_logits = self.lm_head(predicting_streams) logits = predict_logits[:, 0] logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None loss = None if labels is not None: loss = self._compute_loss(predict_logits, labels) if not return_dict: all_logits = tuple(v for v in [logits, logits_ngram] if v is not None) return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:] else: return ProphetNetSeq2SeqLMOutput( loss=loss, logits=logits, logits_ngram=logits_ngram, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states, decoder_attentions=outputs.decoder_attentions, decoder_ngram_attentions=outputs.decoder_ngram_attentions, decoder_cross_attentions=outputs.decoder_cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def _compute_loss(self, logits, labels): expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(self.padding_idx) for i in range(self.config.ngram): if i > 0 and self.disable_ngram_loss: break expend_targets[i, :, :] = labels lprobs = F.log_softmax( logits.view(-1, logits.size(-1)), dim=-1, dtype=torch.float32, ) loss = F.nll_loss(lprobs, expend_targets.view(-1), reduction="sum") if self.config.eps > 0.0: smooth_loss = -lprobs.sum(dim=-1, keepdim=True) non_pad_mask = expend_targets.ne(self.padding_idx).view(-1) smooth_loss = smooth_loss[non_pad_mask] smooth_loss = smooth_loss.sum() eps_i = self.config.eps / lprobs.size(-1) loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss return loss def prepare_inputs_for_generation( self, decoder_input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs ): assert encoder_outputs is not None, "`encoder_outputs` have to be passed for generation." if past: decoder_input_ids = decoder_input_ids[:, -1:] # first step, decoder_cached_states are empty return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "use_cache": use_cache, } @staticmethod def _reorder_cache(past, beam_idx): # this function reorders the cache for beam search def _reorder_cache(cache_dict, beam_idx): for k, key_value_states in cache_dict.items(): if key_value_states is not None: cache_dict[k] = key_value_states.index_select(0, beam_idx) return cache_dict reordered_past = [] for layer_past in past: # get the correct batch idx from decoder layer's batch dim for cross and self-attn layer_past_new = { attn_key: _reorder_cache(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items() } reordered_past.append(layer_past_new) return reordered_past def get_encoder(self): return self.prophetnet.encoder def get_decoder(self): return self.prophetnet.decoder @add_start_docstrings( "The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal language modeling.", PROPHETNET_START_DOCSTRING, ) class ProphetNetForCausalLM(ProphetNetPreTrainedModel): def __init__(self, config): super().__init__(config) # set config for CLM config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False self.decoder = ProphetNetDecoder(config) self.padding_idx = config.pad_token_id self.disable_ngram_loss = config.disable_ngram_loss self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.init_weights() def get_input_embeddings(self): return self.decoder.word_embeddings def set_input_embeddings(self, value): self.decoder.word_embeddings = value def get_output_embeddings(self): return self.lm_head @add_start_docstrings_to_callable(PROPHETNET_STANDALONE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ProphetNetDecoderLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids`` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` Returns: Example:: >>> from transformers import ProphetNetTokenizer, ProphetNetForCausalLM >>> import torch >>> tokenizer = ProphetNetTokenizer.from_pretrained('microsoft/prophetnet-large-uncased') >>> model = ProphetNetForCausalLM.from_pretrained('patrickvonplaten/prophetnet-decoder-clm-large-uncased', return_dict=True) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs, return_dict=True) >>> logits = outputs.logits >>> # Model can also be used with EncoderDecoder framework >>> from transformers import BertTokenizer, EncoderDecoderModel >>> import torch >>> tokenizer = BertTokenizer.from_pretrained('bert-uncased-large') >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-uncased-large", "patrickvonplaten/prophetnet-decoder-clm-large-uncased") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(input_ids=inputs["input_ids"], labels=inputs["input_ids"], return_dict=True) >>> loss = outputs.loss """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) batch_size, sequence_length = input_ids.shape if input_ids is not None else inputs_embeds.shape[:2] predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1) predict_logits = self.lm_head(predicting_streams) logits = predict_logits[:, 0] logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None loss = None if labels is not None: loss = self._compute_loss(predict_logits, labels) if not return_dict: all_logits = tuple(v for v in [logits, logits_ngram] if v is not None) return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:] else: return ProphetNetDecoderLMOutput( loss=loss, logits=logits, logits_ngram=logits_ngram, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, hidden_states_ngram=outputs.hidden_states_ngram, attentions=outputs.attentions, ngram_attentions=outputs.ngram_attentions, ) def _compute_loss(self, logits, labels): expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(self.padding_idx) for i in range(self.config.ngram): if i > 0 and self.disable_ngram_loss: break expend_targets[i, :, :] = labels lprobs = F.log_softmax( logits.view(-1, logits.size(-1)), dim=-1, dtype=torch.float32, ) loss = F.nll_loss(lprobs, expend_targets.view(-1), reduction="sum") if self.config.eps > 0.0: smooth_loss = -lprobs.sum(dim=-1, keepdim=True) non_pad_mask = expend_targets.ne(self.padding_idx).view(-1) smooth_loss = smooth_loss[non_pad_mask] smooth_loss = smooth_loss.sum() eps_i = self.config.eps / lprobs.size(-1) loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss return loss def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past: input_ids = input_ids[:, -1:] # first step, decoder_cached_states are empty return { "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed "attention_mask": attention_mask, "past_key_values": past, "use_cache": use_cache, } @staticmethod def _reorder_cache(past, beam_idx): # this function reorders the cache for beam search def _reorder_cache(cache_dict, beam_idx): for k, key_value_states in cache_dict.items(): if key_value_states is not None: cache_dict[k] = key_value_states.index_select(0, beam_idx) return cache_dict reordered_past = [] for layer_past in past: # get the correct batch idx from decoder layer's batch dim for cross and self-attn layer_past_new = { attn_key: _reorder_cache(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items() } reordered_past.append(layer_past_new) return reordered_past def set_decoder(self, decoder): self.decoder = decoder def get_decoder(self): return self.decoder
101,399
48.391135
213
py
SLT-FAI
SLT-FAI-main/transformers/modeling_mobilebert.py
# MIT License # # Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import math import os import warnings from dataclasses import dataclass from typing import Optional, Tuple import torch import torch.nn.functional as F from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from .activations import ACT2FN from .configuration_mobilebert import MobileBertConfig from .file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable, replace_return_docstrings, ) from .modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from .utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "MobileBertConfig" _TOKENIZER_FOR_DOC = "MobileBertTokenizer" MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = ["google/mobilebert-uncased"] def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.replace("ffn_layer", "ffn") name = name.replace("FakeLayerNorm", "LayerNorm") name = name.replace("extra_output_weights", "dense/kernel") name = name.replace("bert", "mobilebert") name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model def mish(x): return x * torch.tanh(nn.functional.softplus(x)) class NoNorm(nn.Module): def __init__(self, feat_size, eps=None): super().__init__() self.bias = nn.Parameter(torch.zeros(feat_size)) self.weight = nn.Parameter(torch.ones(feat_size)) def forward(self, input_tensor): return input_tensor * self.weight + self.bias NORM2FN = {"layer_norm": torch.nn.LayerNorm, "no_norm": NoNorm} class MobileBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.trigram_input = config.trigram_input self.embedding_size = config.embedding_size self.hidden_size = config.hidden_size self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) embed_dim_multiplier = 3 if self.trigram_input else 1 embedded_input_size = self.embedding_size * embed_dim_multiplier self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if self.trigram_input: # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited # Devices (https://arxiv.org/abs/2004.02984) # # The embedding table in BERT models accounts for a substantial proportion of model size. To compress # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT. # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512 # dimensional output. inputs_embeds = torch.cat( [ F.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0), inputs_embeds, F.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0), ], dim=2, ) if self.trigram_input or self.embedding_size != self.hidden_size: inputs_embeds = self.embedding_transformation(inputs_embeds) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class MobileBertSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.true_hidden_size, self.all_head_size) self.key = nn.Linear(config.true_hidden_size, self.all_head_size) self.value = nn.Linear( config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size ) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, query_tensor, key_tensor, value_tensor, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, ): mixed_query_layer = self.query(query_tensor) mixed_key_layer = self.key(key_tensor) mixed_value_layer = self.value(value_tensor) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class MobileBertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.use_bottleneck = config.use_bottleneck self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps) if not self.use_bottleneck: self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, residual_tensor): layer_outputs = self.dense(hidden_states) if not self.use_bottleneck: layer_outputs = self.dropout(layer_outputs) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs class MobileBertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = MobileBertSelfAttention(config) self.output = MobileBertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, query_tensor, key_tensor, value_tensor, layer_input, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, ): self_outputs = self.self( query_tensor, key_tensor, value_tensor, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions, ) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. attention_output = self.output(self_outputs[0], layer_input) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class MobileBertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class OutputBottleneck(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.true_hidden_size, config.hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, residual_tensor): layer_outputs = self.dense(hidden_states) layer_outputs = self.dropout(layer_outputs) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs class MobileBertOutput(nn.Module): def __init__(self, config): super().__init__() self.use_bottleneck = config.use_bottleneck self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size) if not self.use_bottleneck: self.dropout = nn.Dropout(config.hidden_dropout_prob) else: self.bottleneck = OutputBottleneck(config) def forward(self, intermediate_states, residual_tensor_1, residual_tensor_2): layer_output = self.dense(intermediate_states) if not self.use_bottleneck: layer_output = self.dropout(layer_output) layer_output = self.LayerNorm(layer_output + residual_tensor_1) else: layer_output = self.LayerNorm(layer_output + residual_tensor_1) layer_output = self.bottleneck(layer_output, residual_tensor_2) return layer_output class BottleneckLayer(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size) self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps) def forward(self, hidden_states): layer_input = self.dense(hidden_states) layer_input = self.LayerNorm(layer_input) return layer_input class Bottleneck(nn.Module): def __init__(self, config): super().__init__() self.key_query_shared_bottleneck = config.key_query_shared_bottleneck self.use_bottleneck_attention = config.use_bottleneck_attention self.input = BottleneckLayer(config) if self.key_query_shared_bottleneck: self.attention = BottleneckLayer(config) def forward(self, hidden_states): # This method can return three different tuples of values. These different values make use of bottlenecks, # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory # usage. These linear layer have weights that are learned during training. # # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the # key, query, value, and "layer input" to be used by the attention layer. # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor # in the attention self output, after the attention scores have been computed. # # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return # four values, three of which have been passed through a bottleneck: the query and key, passed through the same # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck. # # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck, # and the residual layer will be this value passed through a bottleneck. bottlenecked_hidden_states = self.input(hidden_states) if self.use_bottleneck_attention: return (bottlenecked_hidden_states,) * 4 elif self.key_query_shared_bottleneck: shared_attention_input = self.attention(hidden_states) return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states) else: return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states) class FFNOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, residual_tensor): layer_outputs = self.dense(hidden_states) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs class FFNLayer(nn.Module): def __init__(self, config): super().__init__() self.intermediate = MobileBertIntermediate(config) self.output = FFNOutput(config) def forward(self, hidden_states): intermediate_output = self.intermediate(hidden_states) layer_outputs = self.output(intermediate_output, hidden_states) return layer_outputs class MobileBertLayer(nn.Module): def __init__(self, config): super().__init__() self.use_bottleneck = config.use_bottleneck self.num_feedforward_networks = config.num_feedforward_networks self.attention = MobileBertAttention(config) self.intermediate = MobileBertIntermediate(config) self.output = MobileBertOutput(config) if self.use_bottleneck: self.bottleneck = Bottleneck(config) if config.num_feedforward_networks > 1: self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, ): if self.use_bottleneck: query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states) else: query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4 self_attention_outputs = self.attention( query_tensor, key_tensor, value_tensor, layer_input, attention_mask, head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] s = (attention_output,) outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if self.num_feedforward_networks != 1: for i, ffn_module in enumerate(self.ffn): attention_output = ffn_module(attention_output) s += (attention_output,) intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output, hidden_states) outputs = ( (layer_output,) + outputs + ( torch.tensor(1000), query_tensor, key_tensor, value_tensor, layer_input, attention_output, intermediate_output, ) + s ) return outputs class MobileBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=False, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class MobileBertPooler(nn.Module): def __init__(self, config): super().__init__() self.do_activate = config.classifier_activation if self.do_activate: self.dense = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] if not self.do_activate: return first_token_tensor else: pooled_output = self.dense(first_token_tensor) pooled_output = torch.tanh(pooled_output) return pooled_output class MobileBertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class MobileBertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = MobileBertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False) self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0)) hidden_states += self.bias return hidden_states class MobileBertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = MobileBertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class MobileBertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = MobileBertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class MobileBertPreTrainedModel(PreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileBertConfig pretrained_model_archive_map = MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST load_tf_weights = load_tf_weights_in_mobilebert base_model_prefix = "mobilebert" authorized_missing_keys = [r"position_ids"] def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, (nn.LayerNorm, NoNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @dataclass class MobileBertForPreTrainingOutput(ModelOutput): """ Output type of :class:`~transformers.MobileBertForPreTrainingModel`. Args: loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None MOBILEBERT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.MobileBertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ MOBILEBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.", MOBILEBERT_START_DOCSTRING, ) class MobileBertModel(MobileBertPreTrainedModel): """ https://arxiv.org/pdf/2004.02984.pdf """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = MobileBertEmbeddings(config) self.encoder = MobileBertEncoder(config) self.pooler = MobileBertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/mobilebert-uncased", output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_hidden_states=None, output_attentions=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, input_shape, self.device ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """MobileBert Model with two heads on top as done during the pre-training: a `masked language modeling` head and a `next sentence prediction (classification)` head. """, MOBILEBERT_START_DOCSTRING, ) class MobileBertForPreTraining(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config) self.cls = MobileBertPreTrainingHeads(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def tie_weights(self): """ Tie the weights between the input embeddings and the output embeddings. If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() resized_dense = nn.Linear( input_embeddings.num_embeddings, self.config.hidden_size - self.config.embedding_size, bias=False ) kept_data = self.cls.predictions.dense.weight.data[ ..., : min(self.cls.predictions.dense.weight.data.shape[1], resized_dense.weight.data.shape[1]) ] resized_dense.weight.data[..., : self.cls.predictions.dense.weight.data.shape[1]] = kept_data self.cls.predictions.dense = resized_dense self.cls.predictions.dense.to(self.device) if output_embeddings is not None and self.config.tie_word_embeddings: self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=MobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, next_sentence_label=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Returns: Examples:: >>> from transformers import MobileBertTokenizer, MobileBertForPreTraining >>> import torch >>> tokenizer = MobileBertTokenizer.from_pretrained("google/mobilebert-uncased") >>> model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased", return_dict=True) >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 >>> outputs = model(input_ids) >>> prediction_logits = outptus.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return MobileBertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""MobileBert Model with a `language modeling` head on top. """, MOBILEBERT_START_DOCSTRING) class MobileBertForMaskedLM(MobileBertPreTrainedModel): authorized_unexpected_keys = [r"pooler"] def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.cls = MobileBertOnlyMLMHead(config) self.config = config self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def tie_weights(self): """ Tie the weights between the input embeddings and the output embeddings. If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() resized_dense = nn.Linear( input_embeddings.num_embeddings, self.config.hidden_size - self.config.embedding_size, bias=False ) kept_data = self.cls.predictions.dense.weight.data[ ..., : min(self.cls.predictions.dense.weight.data.shape[1], resized_dense.weight.data.shape[1]) ] resized_dense.weight.data[..., : self.cls.predictions.dense.weight.data.shape[1]] = kept_data self.cls.predictions.dense = resized_dense self.cls.predictions.dense.to(self.device) if output_embeddings is not None and self.config.tie_word_embeddings: self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/mobilebert-uncased", output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): Used to hide legacy arguments that have been deprecated. """ if "masked_lm_labels" in kwargs: warnings.warn( "The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.", FutureWarning, ) labels = kwargs.pop("masked_lm_labels") return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class MobileBertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score @add_start_docstrings( """MobileBert Model with a `next sentence prediction (classification)` head on top. """, MOBILEBERT_START_DOCSTRING, ) class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config) self.cls = MobileBertOnlyNSPHead(config) self.init_weights() @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, next_sentence_label=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" next_sentence_label (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring) Indices should be in ``[0, 1]``. - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Returns: Examples:: >>> from transformers import MobileBertTokenizer, MobileBertForNextSentencePrediction >>> import torch >>> tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') >>> model = MobileBertForNextSentencePrediction.from_pretrained('google/mobilebert-uncased', return_dict=True) >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt') >>> outputs = model(**encoding, next_sentence_label=torch.LongTensor([1])) >>> loss = outputs.loss >>> logits = outputs.logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] seq_relationship_score = self.cls(pooled_output) next_sentence_loss = None if next_sentence_label is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) if not return_dict: output = (seq_relationship_score,) + outputs[2:] return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output return NextSentencePredictorOutput( loss=next_sentence_loss, logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MOBILEBERT_START_DOCSTRING, ) class MobileBertForSequenceClassification(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mobilebert = MobileBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.num_labels) self.init_weights() @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/mobilebert-uncased", output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MOBILEBERT_START_DOCSTRING, ) class MobileBertForQuestionAnswering(MobileBertPreTrainedModel): authorized_unexpected_keys = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/mobilebert-uncased", output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, MOBILEBERT_START_DOCSTRING, ) class MobileBertForMultipleChoice(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.init_weights() @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/mobilebert-uncased", output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """MoibleBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, MOBILEBERT_START_DOCSTRING, ) class MobileBertForTokenClassification(MobileBertPreTrainedModel): authorized_unexpected_keys = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/mobilebert-uncased", output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
68,232
41.067201
168
py
SLT-FAI
SLT-FAI-main/transformers/trainer_utils.py
# coding=utf-8 # Copyright 2020-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow. """ import random from typing import Any, Dict, NamedTuple, Optional, Tuple, Union import numpy as np from .file_utils import is_tf_available, is_torch_available from .tokenization_utils_base import ExplicitEnum def set_seed(seed: int): """ Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if installed). Args: seed (:obj:`int`): The seed to set. """ random.seed(seed) np.random.seed(seed) if is_torch_available(): import torch torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available if is_tf_available(): import tensorflow as tf tf.random.set_seed(seed) class EvalPrediction(NamedTuple): """ Evaluation output (always contains labels), to be used to compute metrics. Parameters: predictions (:obj:`np.ndarray`): Predictions of the model. label_ids (:obj:`np.ndarray`): Targets to be matched. """ predictions: Union[np.ndarray, Tuple[np.ndarray]] label_ids: np.ndarray class PredictionOutput(NamedTuple): predictions: Union[np.ndarray, Tuple[np.ndarray]] label_ids: Optional[np.ndarray] metrics: Optional[Dict[str, float]] class TrainOutput(NamedTuple): global_step: int training_loss: float PREFIX_CHECKPOINT_DIR = "checkpoint" class EvaluationStrategy(ExplicitEnum): NO = "no" STEPS = "steps" EPOCH = "epoch" class BestRun(NamedTuple): """ The best run found by an hyperparameter search (see :class:`~transformers.Trainer.hyperparameter_search`). Parameters: run_id (:obj:`str`): The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending with run-{run_id}). objective (:obj:`float`): The objective that was obtained for this run. hyperparameters (:obj:`Dict[str, Any]`): The hyperparameters picked to get this run. """ run_id: str objective: float hyperparameters: Dict[str, Any] def default_compute_objective(metrics: Dict[str, float]) -> float: """ The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no metrics are provided to the :class:`~transformers.Trainer`, the sum of all metrics otherwise. Args: metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method. Return: :obj:`float`: The objective to minimize or maximize """ loss = metrics.pop("eval_loss", None) _ = metrics.pop("epoch", None) _ = metrics.pop("total_flos", None) return loss if len(metrics) == 0 else sum(metrics.values()) def default_hp_space_optuna(trial) -> Dict[str, float]: from .integrations import is_optuna_available assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`" return { "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), "num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5), "seed": trial.suggest_int("seed", 1, 40), "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]), } def default_hp_space_ray(trial) -> Dict[str, float]: from .integrations import is_ray_available assert is_ray_available(), "This function needs ray installed: `pip install ray[tune]`" from ray import tune return { "learning_rate": tune.loguniform(1e-6, 1e-4), "num_train_epochs": tune.choice(list(range(1, 6))), "seed": tune.uniform(1, 40), "per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]), } class HPSearchBackend(ExplicitEnum): OPTUNA = "optuna" RAY = "ray" default_hp_space = { HPSearchBackend.OPTUNA: default_hp_space_optuna, HPSearchBackend.RAY: default_hp_space_ray, }
4,719
29.649351
116
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_bart_fast.py
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional from .tokenization_bart import BartTokenizer from .tokenization_roberta_fast import RobertaTokenizerFast from .tokenization_utils_base import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) # vocab and merges same as roberta vocab_url = "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json" merges_url = "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt" tokenizer_url = "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-tokenizer.json" _all_bart_models = [ "facebook/bart-base", "facebook/bart-large", "facebook/bart-large-mnli", "facebook/bart-large-cnn", "facebook/bart-large-xsum", "yjernite/bart_eli5", # This is not exhaustive: see https://huggingface.co/models?filter=bart ] class BartTokenizerFast(RobertaTokenizerFast): # merges and vocab same as Roberta max_model_input_sizes = {m: 1024 for m in _all_bart_models} pretrained_vocab_files_map = { "vocab_file": {m: vocab_url for m in _all_bart_models}, "merges_file": {m: merges_url for m in _all_bart_models}, "tokenizer_file": {m: tokenizer_url for m in _all_bart_models}, } slow_tokenizer_class = BartTokenizer def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: str = "None", truncation=True, **kwargs, ) -> BatchEncoding: r""" Prepare a batch that can be passed directly to an instance of :class:`~transformers.BartModel`. Args: src_texts: (:obj:`List[str]`): List of documents to summarize or source language texts. tgt_texts: (:obj:`List[str]`, `optional`): List of summaries or target language texts. max_length (:obj:`int`, `optional`): Controls the maximum length for encoder inputs (documents to summarize or source language texts). If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. max_target_length (:obj:`int`, `optional`): Controls the maximum length of decoder inputs (target language texts or summaries). If left unset or set to :obj:`None`, this will use the max_length value. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs: Additional keyword arguments passed along to :obj:`self.__call__`. Returns: :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: - **input_ids** -- List of token ids to be fed to the encoder. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. - **decoder_input_ids** -- List of token ids to be fed to the decoder. - **decoder_attention_mask** -- List of indices specifying which tokens should be attended to by the decoder. This does not include causal mask, which is built by the model. The full set of keys ``[input_ids, attention_mask, decoder_input_ids, decoder_attention_mask]``, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys. """ if max_length is None: max_length = self.model_max_length model_inputs: BatchEncoding = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = max_length labels = self( tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs, )["input_ids"] model_inputs["labels"] = labels return model_inputs
8,017
51.75
155
py
SLT-FAI
SLT-FAI-main/transformers/tokenization_bert_generation.py
# coding=utf-8 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model BertGeneration.""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from .tokenization_utils import PreTrainedTokenizer from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} tokenizer_url = ( "https://s3.amazonaws.com/models.huggingface.co/bert/google/bert_for_seq_generation_L-24_bbc_encoder/spiece.model" ) class BertGenerationTokenizer(PreTrainedTokenizer): """ Construct a BertGeneration tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`): The end of sequence token. bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`): The begin of sequence token. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`): The token used for padding, for example when batching sequences of different lengths. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = {"vocab_file": {"bert_for_seq_generation": tokenizer_url}} max_model_input_sizes = {"bert_for_seq_generation": 512} prefix_tokens: List[int] = [] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", unk_token="<unk>", pad_token="<pad>", sep_token="<::::>", **kwargs ): # Add extra_ids to the special token list super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, **kwargs, ) self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(vocab_file) @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def _tokenize(self, text, sample=False): """Take as input a string and return a list of strings (tokens) for words/sub-words""" if not sample: pieces = self.sp_model.EncodeAsPieces(text) else: pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1) return pieces def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = self.sp_model.decode_pieces(tokens) return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
5,191
35.56338
119
py
SLT-FAI
SLT-FAI-main/transformers/configuration_roberta.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ RoBERTa configuration """ from .configuration_bert import BertConfig from .utils import logging logger = logging.get_logger(__name__) ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = { "roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-config.json", "roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-config.json", "roberta-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-config.json", "distilroberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-config.json", "roberta-base-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-openai-detector-config.json", "roberta-large-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-openai-detector-config.json", } class RobertaConfig(BertConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.RobertaModel` or a :class:`~transformers.TFRobertaModel`. It is used to instantiate a RoBERTa model according to the specified arguments, defining the model architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. The :class:`~transformers.RobertaConfig` class directly inherits :class:`~transformers.BertConfig`. It reuses the same defaults. Please check the parent class for more information. Examples:: >>> from transformers import RobertaConfig, RobertaModel >>> # Initializing a RoBERTa configuration >>> configuration = RobertaConfig() >>> # Initializing a model from the configuration >>> model = RobertaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "roberta" def __init__(self, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): """Constructs RobertaConfig.""" super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
2,966
43.954545
133
py
SLT-FAI
SLT-FAI-main/transformers/modeling_retribert.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ RetriBERT model """ import math import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from .configuration_retribert import RetriBertConfig from .file_utils import add_start_docstrings from .modeling_bert import BertModel from .modeling_utils import PreTrainedModel from .utils import logging logger = logging.get_logger(__name__) RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "yjernite/retribert-base-uncased", # See all RetriBert models at https://huggingface.co/models?filter=retribert ] # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL # class RetriBertPreTrainedModel(PreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RetriBertConfig load_tf_weights = None base_model_prefix = "retribert" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() RETRIBERT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.RetriBertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ @add_start_docstrings( """Bert Based model to embed queries or document for document retreival. """, RETRIBERT_START_DOCSTRING, ) class RetriBertModel(RetriBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.projection_dim = config.projection_dim self.bert_query = BertModel(config) self.bert_doc = None if config.share_encoders else BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.ce_loss = nn.CrossEntropyLoss(reduction="mean") self.init_weights() def embed_sentences_checkpointed( self, input_ids, attention_mask, sent_encoder, checkpoint_batch_size=-1, ): # reproduces BERT forward pass with checkpointing if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size: return sent_encoder(input_ids, attention_mask=attention_mask)[1] else: # prepare implicit variables device = input_ids.device input_shape = input_ids.size() token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) head_mask = [None] * sent_encoder.config.num_hidden_layers extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask( attention_mask, input_shape, device ) # define function for cehckpointing def partial_encode(*inputs): encoder_outputs = sent_encoder.encoder( inputs[0], attention_mask=inputs[1], head_mask=head_mask, ) sequence_output = encoder_outputs[0] pooled_output = sent_encoder.pooler(sequence_output) return pooled_output # run embedding layer on everything at once embedding_output = sent_encoder.embeddings( input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None ) # run encoding and pooling on one mini-batch at a time pooled_output_list = [] for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)): b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask) pooled_output_list.append(pooled_output) return torch.cat(pooled_output_list, dim=0) def embed_questions( self, input_ids, attention_mask=None, checkpoint_batch_size=-1, ): q_reps = self.embed_sentences_checkpointed( input_ids, attention_mask, self.bert_query, checkpoint_batch_size, ) return self.project_query(q_reps) def embed_answers( self, input_ids, attention_mask=None, checkpoint_batch_size=-1, ): a_reps = self.embed_sentences_checkpointed( input_ids, attention_mask, self.bert_query if self.bert_doc is None else self.bert_doc, checkpoint_batch_size, ) return self.project_doc(a_reps) def forward( self, input_ids_query, attention_mask_query, input_ids_doc, attention_mask_doc, checkpoint_batch_size=-1 ): r""" Args: input_ids_query (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary for the queries in a batch. Indices can be obtained using :class:`~transformers.RetriBertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask_query (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ input_ids_doc (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary for the documents in a batch. attention_mask_doc (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on documents padding token indices. checkpoint_batch_size (:obj:`int`, `optional`, defaults to `:obj:`-1`): If greater than 0, uses gradient checkpointing to only compute sequence representation on :obj:`checkpoint_batch_size` examples at a time on the GPU. All query representations are still compared to all document representations in the batch. Return: :obj:`torch.FloatTensor`: The bidirectional cross-entropy loss obtained while trying to match each query to its corresponding document and each cocument to its corresponding query in the batch """ device = input_ids_query.device q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size) a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size) compare_scores = torch.mm(q_reps, a_reps.t()) loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device)) loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device)) loss = (loss_qa + loss_aq) / 2 return loss
9,240
42.589623
120
py
SLT-FAI
SLT-FAI-main/transformers/modeling_pegasus.py
# coding=utf-8 # Copyright 2020 Google and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Pegasus model, ported from https://github.com/google-research/pegasus""" from .configuration_pegasus import PegasusConfig from .file_utils import add_start_docstrings from .modeling_bart import BART_START_DOCSTRING, BartForConditionalGeneration @add_start_docstrings("The Pegasus Model for summarization ", BART_START_DOCSTRING) class PegasusForConditionalGeneration(BartForConditionalGeneration): r""" Pytorch version of google's pegasus model for summarization. Available models are listed `here <https://huggingface.co/models?search=pegasus>`__. This class overrides :class:`~transformers.BartForConditionalGeneration`. Please check the superclass for the appropriate documentation alongside usage examples. Examples:: >>> from transformers import PegasusTokenizer, PegasusForConditionalGeneration >>> from typing import List >>> PGE_ARTICLE = "PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow." >>> mname = "google/pegasus-xsum" >>> model = PegasusForConditionalGeneration.from_pretrained(mname) >>> tok = PegasusTokenizer.from_pretrained(mname) >>> batch = tok.prepare_seq2seq_batch(src_texts=[PGE_ARTICLE]) # don't need tgt_text for inference >>> gen = model.generate(**batch) # for forward pass: model(**batch) >>> summary: List[str] = tok.batch_decode(gen, skip_special_tokens=True) >>> assert summary == "California's largest electricity provider has turned off power to tens of thousands of customers." """ # All the code is in src/transformers/modeling_bart.py config_class = PegasusConfig authorized_missing_keys = [ r"final_logits_bias", r"encoder\.version", r"decoder\.version", r"model.encoder.embed_positions", "model.decoder.embed_positions", ]
2,696
47.160714
309
py
SLT-FAI
SLT-FAI-main/transformers/configuration_openai.py
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenAI GPT configuration """ from .configuration_utils import PretrainedConfig from .utils import logging logger = logging.get_logger(__name__) OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json" } class OpenAIGPTConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a :class:`~transformers.OpenAIGPTModel` or a :class:`~transformers.TFOpenAIGPTModel`. It is used to instantiate a GPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `GPT <https://huggingface.co/openai-gpt>`__ architecture from OpenAI. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 40478): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.OpenAIGPTModel` or :class:`~transformers.TFOpenAIGPTModel`. n_positions (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_ctx (:obj:`int`, `optional`, defaults to 512): Dimensionality of the causal mask (usually same as n_positions). n_embd (:obj:`int`, `optional`, defaults to 768): Dimensionality of the embeddings and hidden states. n_layer (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (:obj:`int`, `optional`, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. afn (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"` and :obj:`"gelu_new"` are supported. resid_pdrop (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (:obj:`int`, `optional`, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (:obj:`float`, `optional`, defaults to 1e-5): The epsilon to use in the layer normalization layers initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. predict_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not special tokens should be predicted when the model has a language modeling head. summary_type (:obj:`str`, `optional`, defaults to :obj:`"cls_index"`): Argument used when doing sequence summary, used in the models :class:`~transformers.OpenAIGPTDoubleHeadsModel` and :class:`~transformers.OpenAIGPTDoubleHeadsModel`. Has to be one of the following options: - :obj:`"last"`: Take the last token hidden state (like XLNet). - :obj:`"first"`: Take the first token hidden state (like BERT). - :obj:`"mean"`: Take the mean of all tokens hidden states. - :obj:`"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - :obj:`"attn"`: Not implemented now, use multi-head attention. summary_use_proj (:obj:`bool`, `optional`, defaults to :obj:`True`): Argument used when doing sequence summary, used in the models :class:`~transformers.OpenAIGPTDoubleHeadsModel` and :class:`~transformers.OpenAIGPTDoubleHeadsModel`. Whether or not to add a projection after the vector extraction. summary_activation (:obj:`str`, `optional`): Argument used when doing sequence summary, used in the models :class:`~transformers.OpenAIGPTDoubleHeadsModel` and :class:`~transformers.OpenAIGPTDoubleHeadsModel`. Pass :obj:`"tanh"` for a tanh activation to the output, any other value will result in no activation. summary_proj_to_labels (:obj:`bool`, `optional`, defaults to :obj:`True`): Argument used when doing sequence summary, used in the models :class:`~transformers.OpenAIGPTDoubleHeadsModel` and :class:`~transformers.OpenAIGPTDoubleHeadsModel`. Whether the projection outputs should have :obj:`config.num_labels` or :obj:`config.hidden_size` classes. summary_first_dropout (:obj:`float`, `optional`, defaults to 0.1): Argument used when doing sequence summary, used in the models :class:`~transformers.OpenAIGPTDoubleHeadsModel` and :class:`~transformers.OpenAIGPTDoubleHeadsModel`. The dropout ratio to be used after the projection and activation. Examples:: >>> from transformers import OpenAIGPTConfig, OpenAIGPTModel >>> # Initializing a GPT configuration >>> configuration = OpenAIGPTConfig() >>> # Initializing a model from the configuration >>> model = OpenAIGPTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "openai-gpt" def __init__( self, vocab_size=40478, n_positions=512, n_ctx=512, n_embd=768, n_layer=12, n_head=12, afn="gelu", resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, predict_special_tokens=True, summary_type="cls_index", summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs ): super().__init__(**kwargs) self.vocab_size = vocab_size self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.afn = afn self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.predict_special_tokens = predict_special_tokens self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels @property def max_position_embeddings(self): return self.n_positions @property def hidden_size(self): return self.n_embd @property def num_attention_heads(self): return self.n_head @property def num_hidden_layers(self): return self.n_layer
8,241
45.564972
117
py
SLT-FAI
SLT-FAI-main/transformers/configuration_marian.py
# coding=utf-8 # Copyright 2020 The OPUS-NMT Team, Marian team, and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Marian model configuration """ from .configuration_bart import BartConfig PRETRAINED_CONFIG_ARCHIVE_MAP = { "Helsinki-NLP/opus-mt-en-de": "https://s3.amazonaws.com/models.huggingface.co/bert/Helsinki-NLP/opus-mt-en-de/config.json", } class MarianConfig(BartConfig): """ This is the configuration class to store the configuration of a :class:`~transformers.MarianMTModel`. It is used to instantiate a Marian model according to the specified arguments, defining the model architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 58101): Vocabulary size of the Marian model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.MarianMTModel`. d_model (:obj:`int`, `optional`, defaults to 512): Dimensionality of the layers and the pooler layer. encoder_layers (:obj:`int`, `optional`, defaults to 6): Number of encoder layers. decoder_layers (:obj:`int`, `optional`, defaults to 6): Number of decoder layers. encoder_attention_heads (:obj:`int`, `optional`, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (:obj:`int`, `optional`, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (:obj:`int`, `optional`, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in decoder. encoder_ffn_dim (:obj:`int`, `optional`, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in decoder. activation_function (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"` and :obj:`"gelu_new"` are supported. dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (:obj:`float`, `optional`, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. add_bias_logits (:obj:`bool`, `optional`, defaults to :obj:`False`): This should be completed, specific to marian. normalize_before (:obj:`bool`, `optional`, defaults to :obj:`False`): Call layernorm before attention ops. normalize_embedding (:obj:`bool`, `optional`, defaults to :obj:`False`): Call layernorm after embeddings. static_position_embeddings (:obj:`bool`, `optional`, defaults to :obj:`True`): Don't learn positional embeddings, use sinusoidal. add_final_layer_norm (:obj:`bool`, `optional`, defaults to :obj:`False`): Why not add another layernorm? scale_embedding (:obj:`bool`, `optional`, defaults to :obj:`False`): Scale embeddings by diving by sqrt(d_model). eos_token_id (:obj:`int`, `optional`, defaults to 2) End of stream token id. pad_token_id (:obj:`int`, `optional`, defaults to 1) Padding token id. bos_token_id (:obj:`int`, `optional`, defaults to 0) Beginning of stream token id. encoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): The LayerDrop probability for the encoder. See the `LayerDrop paper <see https://arxiv.org/abs/1909.11556>`__ for more details. decoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0): The LayerDrop probability for the decoder. See the `LayerDrop paper <see https://arxiv.org/abs/1909.11556>`__ for more details. extra_pos_embeddings: (:obj:`int`, `optional`, defaults to 2): How many extra learned positional embeddings to use. is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether this is an encoder/decoder model force_bos_token_to_be_generated (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force BOS token to be generated at step 1 (after ``decoder_start_token_id``). """ model_type = "marian"
5,957
57.990099
127
py
SLT-FAI
SLT-FAI-main/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BERT checkpoint.""" import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): # Initialise PyTorch model config = BertConfig.from_json_file(bert_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = BertForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_bert(model, config, tf_checkpoint_path) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
2,152
33.725806
117
py
SLT-FAI
SLT-FAI-main/transformers/convert_lxmert_original_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert LXMERT checkpoint.""" import argparse import logging import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert logging.basicConfig(level=logging.INFO) def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): # Initialise PyTorch model config = LxmertConfig.from_json_file(config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = LxmertForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_lxmert(model, config, tf_checkpoint_path) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \n" "This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
2,128
33.33871
117
py
SLT-FAI
SLT-FAI-main/transformers/configuration_retribert.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ RetriBERT model configuration """ from .configuration_utils import PretrainedConfig from .utils import logging logger = logging.get_logger(__name__) # TODO: uploadto AWS RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "retribert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-config.json", } class RetriBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.RetriBertModel`. It is used to instantiate a RetriBertModel model according to the specified arguments, defining the model architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 30522): Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.RetriBertModel` hidden_size (:obj:`int`, `optional`, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (:obj:`int`, `optional`, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (:obj:`int`, `optional`, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"swish"` and :obj:`"gelu_new"` are supported. hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, `optional`, defaults to 2): The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. share_encoders (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to use the same Bert-type encoder for the queries and document projection_dim (:obj:`int`, `optional`, defaults to 128): Final dimension of the query and document representation after projection """ model_type = "retribert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=8, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, share_encoders=True, projection_dim=128, pad_token_id=0, **kwargs ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.share_encoders = share_encoders self.projection_dim = projection_dim
5,380
47.918182
120
py