repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/tagging_models/losses/focal_loss.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
'''Multi-class Focal loss implementation'''
def __init__(self, gamma=2, weight=None,ignore_index=-100):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.weight = weight
self.ignore_index=ignore_index
def forward(self, input, target):
"""
input: [N, C]
target: [N, ]
"""
logpt = F.log_softmax(input, dim=1)
# 指数表达式的y是log之前的,所以要加exp
pt = torch.exp(logpt)
logpt = (1-pt)**self.gamma * logpt
loss = F.nll_loss(logpt, target, self.weight,ignore_index=self.ignore_index)
return loss
| 707 | 28.5 | 84 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/tagging_models/losses/__init__.py
| 2 | 0 | 0 |
py
|
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/transfo_xl_denoise/tokenization_transfo_xl_denoise.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for TransfoXLDenoise."""
import sentencepiece as spm
from transformers.tokenization_utils import PreTrainedTokenizer
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"transformer-xl-1b-base":
"https://huggingface.co/IDEA-CCNL/Bigan-Transformer-XL-denoise-1.1B/resolve/main/spiece.model",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"transformer-xl-1b-base": 512,
}
class TransfoXLDenoiseTokenizer(PreTrainedTokenizer):
"""
Construct a TransfoXLDenoise tokenizer. Based on pretrained sentence piece
Args:
vocab_file (`str`):
Path to the vocabulary file.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
SPIECE_UNDERLINE = "▁"
def __init__(
self,
vocab_file,
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
**kwargs
):
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs)
"Initialisation"
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(vocab_file)
@property
def vocab_size(self):
"Returns vocab size"
return len(self.sp_model)
def _tokenize(self, text):
""" Returns a tokenized string. """
return self.sp_model.EncodeAsPieces(text)
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = "".join(tokens).replace(self.SPIECE_UNDERLINE, " ").strip()
return out_string
| 2,805 | 32.807229 | 107 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/transfo_xl_denoise/generate.py
|
import torch
import torch.nn.functional as F
from fengshen.models.transfo_xl_denoise.tokenization_transfo_xl_denoise import TransfoXLDenoiseTokenizer
from fengshen.models.transfo_xl_denoise.modeling_transfo_xl_denoise import TransfoXLDenoiseModel
from fengshen.utils import top_k_logits, get_masks_and_position_ids
def get_batch(context_tokens, mem_length, batch_size=1):
tokens = context_tokens
tokens = tokens.view(batch_size, -1).contiguous()
# Get the masks and postition ids.
attention_mask, position_ids = get_masks_and_position_ids(tokens, mem_length=mem_length)
return tokens, attention_mask, position_ids
def denoise_generate(model,
tokenizer,
input_text,
device=0,
mem_length=512,
temperature=1.,
top_p=0.9,
eod_token=50000):
''' Generate with fixed prompt pretrained '''
prompt = f"“{input_text}”改写后是“"
res = []
counter = 0
tokens, attention_mask, position_ids = get_batch(
torch.LongTensor(tokenizer.encode(prompt)), mem_length, batch_size=1)
tokens, attention_mask, position_ids = tokens.cuda(
device), attention_mask.cuda(device), position_ids.cuda(device)
org_context_length = tokens.shape[-1]
model = model.cuda(device)
while counter < 100:
if counter == 0:
mems = [] # empty at the begining
output = model(input_ids=tokens, attention_mask=attention_mask,
position_ids=position_ids, hidden_states=mems)
logits, mems = output.logits, output.hidden_states
else:
index = org_context_length + counter
output = model(input_ids=tokens[:, index - 1: index], position_ids=tokens.new_ones((1, 1)) * (index - 1),
attention_mask=tokens.new_ones(1, 1, 1, mem_length + 1, device=device,
dtype=torch.float), hidden_states=mems)
logits, mems = output.logits, output.hidden_states
logits = logits[:, -1]
logits /= temperature
logits = top_k_logits(logits, top_k=0, top_p=top_p)
log_probs = F.softmax(logits, dim=-1)
prev = torch.multinomial(log_probs, num_samples=1)[0]
is_end = prev == eod_token
if is_end:
break
tokens = torch.cat((tokens, prev.view(1, 1)), dim=1)
counter += 1
res.append(tokenizer.decode(tokens.view(-1).contiguous().tolist()))
return res
if __name__ == "__main__":
device = 1
tokenizer = TransfoXLDenoiseTokenizer.from_pretrained('IDEA-CCNL/Bigan-Transformer-XL-denoise-1.1B')
model = TransfoXLDenoiseModel.from_pretrained('IDEA-CCNL/Bigan-Transformer-XL-denoise-1.1B')
input_text = "凡是有成就的人, 都很严肃地对待生命自己的"
res = denoise_generate(model, tokenizer, input_text)
print(res)
| 2,934 | 42.80597 | 117 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/transfo_xl_denoise/configuration_transfo_xl_denoise.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TransfoXLDenoise model configuration """
from transformers.configuration_utils import PretrainedConfig
Transfo_XL_Denoise_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"transformer-xl-1b-base": "https://huggingface.co/transformer-xl-1b-base/resolve/main/config.json",
# See all TransfoXLDenoise models at https://huggingface.co/models?filter=transfo_xl_denoise
}
class TransfoXLDenoiseConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`~TransfoXLDenoiseModel`].
It is used to instantiate an TransfoXLDenoise model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the TransfoXLDenoise [transformer-xl-1b-base](https://huggingface.co/transformer-xl-1b-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used
to control the model outputs. Read the documentation from [`PretrainedConfig`]
for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the TransfoXLDenoise model. Defines the number of different
tokens that can be represented by the
`inputs_ids` passed when calling [`~TransfoXLDenoiseModel`] or
[`~TFTransfoXLDenoiseModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler.
If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with.
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`~TransfoXLDenoiseModel`] or
[`~TFTransfoXLDenoiseModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
Example:
```python
>>> from transformers import TransfoXLDenoiseModel, TransfoXLDenoiseConfig
>>> # Initializing a TransfoXLDenoise transformer-xl-1b-base style configuration
>>> configuration = TransfoXLDenoiseConfig()
>>> # Initializing a model from the transformer-xl-1b-base style configuration
>>> model = TransfoXLDenoiseModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "transfo_xl_denoise"
def __init__(
self,
num_layers=32,
vocab_size=50048,
hidden_size=1600,
num_attention_heads=25,
embedding_dropout_prob=0.1,
attention_dropout_prob=0.1,
output_dropout_prob=0.1,
max_sequence_length=512,
max_memory_length=512,
checkpoint_activations=False,
checkpoint_num_layers=1,
parallel_output=True,
relative_encoding=True,
**kwargs
):
self.num_layers = num_layers
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.embedding_dropout_prob = embedding_dropout_prob
self.attention_dropout_prob = attention_dropout_prob
self.output_dropout_prob = output_dropout_prob
self.max_sequence_length = max_sequence_length
self.max_memory_length = max_memory_length
self.checkpoint_activations = checkpoint_activations
self.checkpoint_num_layers = checkpoint_num_layers
self.parallel_output = parallel_output
self.relative_encoding = relative_encoding
super().__init__(**kwargs)
| 5,820 | 47.508333 | 112 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/transfo_xl_denoise/__init__.py
| 0 | 0 | 0 |
py
|
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/transfo_xl_denoise/modeling_transfo_xl_denoise.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch TransfoXLDenoise model. """
import math
import torch
import torch.utils.checkpoint as checkpoint
import torch.nn.functional as F
from dataclasses import dataclass
from typing import Optional, Tuple
from transformers.modeling_utils import (
PreTrainedModel
)
from transformers.modeling_outputs import ModelOutput
from .configuration_transfo_xl_denoise import TransfoXLDenoiseConfig
_CHECKPOINT_FOR_DOC = "transformer-xl-1b-base"
_CONFIG_FOR_DOC = "TransfoXLDenoiseConfig"
_TOKENIZER_FOR_DOC = "TransfoXLDenoiseTokenizer"
Transfo_XL_Denoise_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config ([`~TransfoXLDenoiseConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
Transfo_XL_Denoise_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`TransfoXLDenoiseTokenizer`].
See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert *input_ids* indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
Transfo_XL_Denoise_PRETRAINED_MODEL_ARCHIVE_LIST = [
"transformer-xl-1b-base",
]
@dataclass
class TransfoXLDenoiseModelOutput(ModelOutput):
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
class PositionalEmbedding(torch.nn.Module):
def __init__(self, hidden_size):
super(PositionalEmbedding, self).__init__()
self.hidden_size = hidden_size
inv_freq = 1 / (10000 ** (torch.arange(0.0, hidden_size, 2.0) / hidden_size))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[None, :, :].expand(bsz, -1, -1)
else:
return pos_emb[None, :, :]
def ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, '{} is not divisible by {}'.format(
numerator, denominator)
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
def scaled_init_method(sigma, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
def unscaled_init_method(sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_
@torch.jit.script
def gelu_impl(x):
"""OpenAI's gelu implementation."""
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x
* (1.0 + 0.044715 * x * x)))
def gelu(x):
return gelu_impl(x)
class GPT2SelfAttention(torch.nn.Module):
"""Parallel self-attention layer for GPT2.
Self-attention layer takes input with size [b, s, h] where b is
the batch size, s is the sequence lenght, and h is the hidden size
and creates output of the same size.
Arguments:
hidden_size: total hidden size of the layer (h).
num_attention_heads: number of attention heads (n). Note that we
require n to be divisible by number of GPUs
used to parallelize the model. Also, we
require hidden size to be divisible by n.
dropout_prob: dropout probability for the attention scores.
init_method: weight initialization.
output_layer_init_method: output layer initialization. If None, use
`init_method`.
We use the following notation:
h: hidden_size
n: num_attention_heads
p: number of partitions
np: n/p
hp: h/p
hn: h/n
b: batch size
s: sequence length
"""
def __init__(self, hidden_size, num_attention_heads,
attention_dropout_prob, output_dropout_prob,
init_method, output_layer_init_method=None, relative_encoding=False):
super(GPT2SelfAttention, self).__init__()
# Set output layer initialization if not provided.
if output_layer_init_method is None:
output_layer_init_method = init_method
# Per attention head and per partition values.
self.hidden_size_per_partition = hidden_size
self.hidden_size_per_attention_head = divide(hidden_size,
num_attention_heads)
self.num_attention_heads_per_partition = num_attention_heads
self.relative_encoding = relative_encoding
# Strided linear layer.
self.query_key_value = torch.nn.Linear(hidden_size,
3 * hidden_size, bias=True)
if relative_encoding:
self.relative = torch.nn.Linear(hidden_size, hidden_size, bias=True)
# Dropout. Note that for a single iteration, this layer will generate
# different outputs on different number of parallel partitions but
# on average it should not be partition dependent.
self.attention_dropout = torch.nn.Dropout(attention_dropout_prob)
# Output.
self.dense = torch.nn.Linear(hidden_size, hidden_size, bias=True)
self.output_dropout = torch.nn.Dropout(output_dropout_prob)
def _transpose_for_scores(self, tensor):
"""Transpose a 3D tensor [b, s, np*hn] into a 4D tensor with
size [b, np, s, hn].
"""
new_tensor_shape = tensor.size()[:-1] + \
(self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head)
tensor = tensor.view(*new_tensor_shape)
return tensor.permute(0, 2, 1, 3)
@staticmethod
def _rel_shift(x, zero_triu=False):
# ql x kl x bsz x h
# bsz x h x ql x kl
zero_pad = torch.zeros((*x.size()[:-2], x.size(-2), 1),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(*x.size()[:-2], x.size(-1) + 1, x.size(-2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:, :, None, None]
return x
@staticmethod
def _rel_shift_latest(x: torch.Tensor):
ndims = x.dim()
x_shape = x.size()
row_dim = 2
col_dim = row_dim + 1
assert col_dim < ndims
tgt_shape_1, tgt_shape_2 = [], []
for i in range(ndims):
if i == row_dim:
tgt_shape_1.append(x_shape[col_dim])
tgt_shape_2.append(x_shape[row_dim])
elif i == col_dim:
tgt_shape_1.append(x_shape[row_dim])
tgt_shape_2.append(x_shape[col_dim] - 1)
else:
tgt_shape_1.append(x_shape[i])
tgt_shape_2.append(x_shape[i])
x = x.view(*tgt_shape_1)
x = x[:, :, 1:, :]
x = x.view(*tgt_shape_2)
return x
def forward(self, hidden_states, ltor_mask, position_embeddings=None, r_w_bias=None, r_r_bias=None, mem=None):
# hidden_states: [b, s, h]
# ltor_mask: [1, 1, s, s]
# Attention heads. [b, s, hp]
query_length = hidden_states.size(1)
if mem is None:
mixed_x_layer = self.query_key_value(hidden_states)
(mixed_query_layer,
mixed_key_layer,
mixed_value_layer) = torch.chunk(mixed_x_layer, 3, dim=-1)
else:
cat = torch.cat((mem, hidden_states), 1)
mixed_x_layer = self.query_key_value(cat)
(mixed_query_layer,
mixed_key_layer,
mixed_value_layer) = torch.chunk(mixed_x_layer, 3, dim=-1)
mixed_query_layer = mixed_query_layer[:, -query_length:]
# Reshape and transpose [b, np, s, hn]
query_layer = self._transpose_for_scores(mixed_query_layer)
key_layer = self._transpose_for_scores(mixed_key_layer)
value_layer = self._transpose_for_scores(mixed_value_layer)
if self.relative_encoding:
relative_layer = self.relative(position_embeddings)
relative_layer = self._transpose_for_scores(
relative_layer) # 1 (bsz) x n_head x klen x d_head
# Raw attention scores. [b, np, qs, ks]
rw_head_q = query_layer + r_w_bias.unsqueeze(1)
ac_score = torch.matmul(rw_head_q, key_layer.transpose(-1, -2))
rr_head_q = query_layer + r_r_bias.unsqueeze(1)
bd_score = torch.matmul(rr_head_q, relative_layer.transpose(-1, -2))
bd_score = self._rel_shift(bd_score) # qlen x klen x bsz x n_head
# bd_score = bd_score.permute(2, 3, 0, 1) # bsz n_head qlen klen
attention_scores = ac_score + bd_score
else:
# Raw attention scores. [b, np, s, s]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(
self.hidden_size_per_attention_head)
# Apply the left to right attention mask.
attention_scores = torch.mul(attention_scores, ltor_mask) - \
10000.0 * (1.0 - ltor_mask)
# Attention probabilities. [b, np, s, s]
attention_probs = torch.nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
# with get_cuda_rng_tracker().fork():
# attention_probs = self.attention_dropout(attention_probs)
# Context layer.
# [b, np, s, hn]
context_layer = torch.matmul(attention_probs, value_layer)
# [b, s, np, hn]
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + \
(self.hidden_size_per_partition,)
# [b, s, hp]
context_layer = context_layer.view(*new_context_layer_shape)
# Output. [b, s, h]
output = self.dense(context_layer)
output = self.output_dropout(output)
return output
class GPT2MLP(torch.nn.Module):
"""MLP for GPT2.
MLP will take the input with h hidden state, project it to 4*h
hidden dimension, perform gelu transformation, and project the
state back into h hidden dimension. At the end, dropout is also
applied.
Arguments:
hidden_size: The hidden size of the self attention.
output_dropout_prob: dropout probability for the outputs
after self attention and final output.
init_method: initialization method used for the weights. Note
that all biases are initialized to zero and
layernorm weight are initialized to one.
output_layer_init_method: output layer initialization. If None,
use `init_method`.
"""
def __init__(self, hidden_size, output_dropout_prob, init_method,
output_layer_init_method=None):
super(GPT2MLP, self).__init__()
# Set output layer initialization if not provided.
if output_layer_init_method is None:
output_layer_init_method = init_method
# Project to 4h.
self.dense_h_to_4h = torch.nn.Linear(hidden_size, 4 * hidden_size)
# Project back to h.
self.dense_4h_to_h = torch.nn.Linear(4 * hidden_size, hidden_size)
self.dropout = torch.nn.Dropout(output_dropout_prob)
def forward(self, hidden_states):
# [b, s, 4hp]
intermediate_parallel = self.dense_h_to_4h(hidden_states)
intermediate_parallel = gelu(intermediate_parallel)
# [b, s, h]
output = self.dense_4h_to_h(intermediate_parallel)
output = self.dropout(output)
return output
class GPT2TransformerLayer(torch.nn.Module):
"""A single layer transformer for GPT2.
We use the following notation:
h: hidden size
n: number of attention heads
b: batch size
s: sequence length
Transformore layer takes input with size [b, s, h] and returns an
output of the same size.
Arguments:
hidden_size: The hidden size of the self attention.
num_attention_heads: number of attention head in the self
attention.
attention_dropout_prob: dropout probability of the attention
score in self attention.
output_dropout_prob: dropout probability for the outputs
after self attention and final output.
layernorm_epsilon: epsilon used in layernorm to avoid
division by zero.
init_method: initialization method used for the weights. Note
that all biases are initialized to zero and
layernorm weight are initialized to one.
output_layer_init_method: output layers (attention output and
mlp output) initialization. If None,
use `init_method`.
"""
def __init__(self,
hidden_size,
num_attention_heads,
attention_dropout_prob,
output_dropout_prob,
layernorm_epsilon,
init_method,
output_layer_init_method=None,
relative_encoding=False):
super(GPT2TransformerLayer, self).__init__()
# Set output layer initialization if not provided.
if output_layer_init_method is None:
output_layer_init_method = init_method
# Layernorm on the input data.
self.input_layernorm = torch.nn.LayerNorm(hidden_size, eps=layernorm_epsilon)
# Self attention.
self.attention = GPT2SelfAttention(
hidden_size,
num_attention_heads,
attention_dropout_prob,
output_dropout_prob,
init_method,
output_layer_init_method=output_layer_init_method,
relative_encoding=relative_encoding)
# Layernorm on the input data.
self.post_attention_layernorm = torch.nn.LayerNorm(hidden_size,
eps=layernorm_epsilon)
# MLP
self.mlp = GPT2MLP(
hidden_size,
output_dropout_prob,
init_method,
output_layer_init_method=output_layer_init_method)
def forward(self, hidden_states, ltor_mask, position_embeddings=None, r_w_bias=None, r_r_bias=None, mem=None):
# hidden_states: [b, s, h]
# ltor_mask: [1, 1, s, s]
# Layer norm at the begining of the transformer layer.
layernorm_output = self.input_layernorm(hidden_states)
mem = self.input_layernorm(mem) if mem is not None else None
# Self attention.
attention_output = self.attention(
layernorm_output, ltor_mask, position_embeddings, r_w_bias, r_r_bias, mem)
# Residual connection.
# print(f'hz {hidden_states.shape}, attn {attention_output.shape}')
layernorm_input = hidden_states + attention_output
# Layer norm post the self attention.
layernorm_output = self.post_attention_layernorm(layernorm_input)
# MLP.
mlp_output = self.mlp(layernorm_output)
# Second residual connection.
output = layernorm_input + mlp_output
return output
class GPT2Transformer(torch.nn.Module):
"""GPT-2 transformer.
This module takes input from embedding layer and it's output can
be used directly by a logit layer. It consists of L (num-layers)
blocks of:
layer norm
self attention
residual connection
layer norm
mlp
residual connection
followed by a final layer norm.
Arguments:
num_layers: Number of transformer layers.
hidden_size: The hidden size of the self attention.
num_attention_heads: number of attention head in the self
attention.
attention_dropout_prob: dropout probability of the attention
score in self attention.
output_dropout_prob: dropout probability for the outputs
after self attention and final output.
checkpoint_activations: if True, checkpoint activations.
checkpoint_num_layers: number of layers to checkpoint. This
is basically the chunk size in checkpoitning.
layernorm_epsilon: epsilon used in layernorm to avoid
division by zero.
init_method_std: standard deviation of the init method which has
the form N(0, std).
use_scaled_init_for_output_weights: If Ture use 1/sqrt(2*num_layers)
scaling for the output weights (
output of self attention and mlp).
"""
def __init__(self,
num_layers,
hidden_size,
num_attention_heads,
max_sequence_length,
max_memory_length,
embedding_dropout_prob,
attention_dropout_prob,
output_dropout_prob,
checkpoint_activations,
checkpoint_num_layers=1,
layernorm_epsilon=1.0e-5,
init_method_std=0.02,
use_scaled_init_for_output_weights=True,
relative_encoding=False):
super(GPT2Transformer, self).__init__()
# Store activation checkpoiting flag.
self.checkpoint_activations = checkpoint_activations
self.checkpoint_num_layers = checkpoint_num_layers
self.max_memory_length = max_memory_length
output_layer_init_method = None
if use_scaled_init_for_output_weights:
output_layer_init_method = scaled_init_method(init_method_std,
num_layers)
# Embeddings dropout
self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)
self.relative_encoding = relative_encoding
if relative_encoding:
# Relative position embedding
self.position_embeddings = PositionalEmbedding(hidden_size)
# Per attention head and per partition values.
self.hidden_size_per_attention_head = divide(hidden_size,
num_attention_heads)
self.num_attention_heads_per_partition = num_attention_heads
self.r_w_bias = torch.nn.Parameter(
torch.Tensor(self.num_attention_heads_per_partition, self.hidden_size_per_attention_head))
self.r_r_bias = torch.nn.Parameter(
torch.Tensor(self.num_attention_heads_per_partition, self.hidden_size_per_attention_head))
# Always initialize bias to zero.
with torch.no_grad():
self.r_w_bias.zero_()
self.r_r_bias.zero_()
else:
# Position embedding (serial).
self.position_embeddings = torch.nn.Embedding(max_sequence_length,
hidden_size)
# Initialize the position embeddings.
torch.nn.init.normal_(self.position_embeddings.weight, mean=0.0, std=init_method_std)
def get_layer():
return GPT2TransformerLayer(
hidden_size,
num_attention_heads,
attention_dropout_prob,
output_dropout_prob,
layernorm_epsilon,
unscaled_init_method(init_method_std),
output_layer_init_method=output_layer_init_method,
relative_encoding=relative_encoding)
# Transformer layers.
self.layers = torch.nn.ModuleList(
[get_layer() for _ in range(num_layers)])
# Final layer norm before output.
self.final_layernorm = torch.nn.LayerNorm(hidden_size, eps=layernorm_epsilon)
def forward(self, hidden_states, position_ids, attention_mask, *mems):
batch_size, query_length = hidden_states.size()[:2]
memory_length = mems[0].size(1) if mems else 0
key_length = query_length + memory_length
attention_mask = attention_mask[:, :, :, -query_length - memory_length:]
if self.relative_encoding:
# why drop twice here
# hidden_states = self.embedding_dropout(hidden_states)
position_sequence = torch.arange(key_length - 1, -1, -1.0, device=hidden_states.device,
dtype=hidden_states.dtype)
position_embeddings = self.position_embeddings(position_sequence)
# Apply dropout
position_embeddings = self.embedding_dropout(position_embeddings)
hidden_states = self.embedding_dropout(hidden_states)
else:
position_embeddings = self.position_embeddings(position_ids)
hidden_states = hidden_states + position_embeddings
hidden_states = self.embedding_dropout(hidden_states)
if self.max_memory_length > 0:
mem_layers = [hidden_states.detach()]
else:
mem_layers = []
def custom(start, end):
def custom_forward(*inputs):
layers_ = self.layers[start:end]
x_, inputs = inputs[0], inputs[1:]
if self.relative_encoding:
inputs, mems_ = inputs[:4], inputs[4:]
else:
inputs, mems_ = inputs[:1], inputs[1:]
for i, layer in enumerate(layers_):
mem_i_ = mems_[i] if mems_ else None
x_ = layer(x_, *inputs, mem=mem_i_)
if self.max_memory_length > 0:
mem_layers.append(x_.detach())
return x_
return custom_forward
if self.checkpoint_activations:
la = 0
num_layers = len(self.layers)
chunk_length = self.checkpoint_num_layers
while la < num_layers:
args = [hidden_states, attention_mask]
if self.relative_encoding:
args += [position_embeddings, self.r_w_bias, self.r_r_bias]
if mems:
args += mems[la: la + chunk_length]
hidden_states = checkpoint(custom(la, la + chunk_length), *args)
la += chunk_length
else:
for i, layer in enumerate(self.layers):
args = [hidden_states, attention_mask]
if self.relative_encoding:
args += [position_embeddings, self.r_w_bias, self.r_r_bias]
mem_i = mems[i] if mems else None
hidden_states = layer(*args, mem=mem_i)
if self.max_memory_length > 0:
mem_layers.append(hidden_states.detach())
# Final layer norm.
output = self.final_layernorm(hidden_states)
if self.max_memory_length > 0:
mem_layers = self.update_mems(mem_layers, mems)
return (output, *mem_layers)
def update_mems(self, hiddens, mems):
memory_length = mems[0].size(1) if mems else 0
query_length = hiddens[0].size(1)
new_memory_length = min(self.max_memory_length, memory_length + query_length)
new_mems = []
with torch.no_grad():
for i in range(len(hiddens)):
if new_memory_length <= query_length:
new_mems.append(hiddens[i][:, -new_memory_length:])
else:
new_mems.append(
torch.cat(
(mems[i][:, -new_memory_length + query_length:], hiddens[i]), dim=1))
return new_mems
class TransfoXLDenoisePreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = TransfoXLDenoiseConfig
base_model_prefix = "transfo_xl_denoise"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
pass # to bypass the not implement error
class TransfoXLDenoiseModel(TransfoXLDenoisePreTrainedModel):
"""GPT-2 Language model.
The output of the forward method are the logits (parallel or
serial depending on the `parallel_output` flag.
"""
def __init__(self, config: TransfoXLDenoiseConfig):
super().__init__(config)
self.config = config
# Word embeddings (parallel).
self.word_embeddings = torch.nn.Embedding(config.vocab_size, config.hidden_size)
# Transformer
self.transformer = GPT2Transformer(config.num_layers,
config.hidden_size,
config.num_attention_heads,
config.max_sequence_length,
config.max_memory_length,
config.embedding_dropout_prob,
config.attention_dropout_prob,
config.output_dropout_prob,
config.checkpoint_activations,
config.checkpoint_num_layers,
relative_encoding=config.relative_encoding)
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
hidden_states=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**unused,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with
each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids`
(those that don't have their past key value states given to this model) of shape `(batch_size, 1)`
instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
"""
# Embeddings.
# one-hot batch_size * seq_len * vocab_size, can use gradient
# if input_ids.shape[-1] == self.word_embeddings.weight.shape[0]:
# words_embeddings = torch.einsum("ijk,kl->ijl", input_ids, self.word_embeddings.weight)
# else:
# print(f'input_ids {input_ids.device}, word_embedding {self.word_embeddings.weight.device}')
# words_embeddings = self.word_embeddings(input_ids)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
assert input_ids is not None and attention_mask is not None and position_ids is not None, \
"You have to specify input_ids, attention_mask, and position_ids. Check tokenizer.encode_plus for details"
if not hidden_states:
hidden_states = []
embeddings = self.word_embeddings(input_ids)
# Transformer.
transformer_output = self.transformer(
embeddings, position_ids, attention_mask, *hidden_states)
logits, *hidden_states = transformer_output
logits = F.linear(logits, self.word_embeddings.weight)
if not return_dict:
return logits, hidden_states
return TransfoXLDenoiseModelOutput(
logits=logits,
hidden_states=hidden_states
)
| 33,182 | 42.094805 | 120 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/zen1/tokenization.py
|
# coding=utf-8
# This file is derived from the code at
# https://github.com/huggingface/transformers/blob/master/transformers/tokenization_bert.py
#
# Original copyright notice:
#
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from transformers import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt",
'IDEA-CCNL/Erlangshen-ZEN1-224M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN1-224M-Chinese/resolve/main/vocab.txt',
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
logger.error(
"Couldn't reach server at '{}' to download vocabulary.".format(
vocab_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 18,927 | 42.116173 | 179 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/zen1/modeling.py
|
# coding: utf-8
# Copyright 2019 Sinovation Ventures AI Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is partially derived from the code at
# https://github.com/huggingface/transformers/tree/master/transformers
#
# Original copyright notice:
#
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ZEN model classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import logging
import math
import os
import sys
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import PreTrainedModel
from .configuration_zen1 import ZenConfig
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'IDEA-CCNL/Erlangshen-ZEN1-224M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN1-224M-Chinese/resolve/main/pytorch_model.bin',
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'IDEA-CCNL/Erlangshen-ZEN1-224M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN1-224M-Chinese/resolve/main/config.json',
}
BERT_CONFIG_NAME = 'bert_config.json'
TF_WEIGHTS_NAME = 'model.ckpt'
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
lname = re.split(r'_(\d+)', m_name)
else:
lname = [m_name]
if lname[0] == 'kernel' or lname[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif lname[0] == 'output_bias' or lname[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif lname[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif lname[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, lname[0])
except AttributeError:
print("Skipping {}".format("/".join(name)))
continue
if len(lname) >= 2:
num = int(lname[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
try:
# from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
from torch.nn import LayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertWordEmbeddings(nn.Module):
"""Construct the embeddings from ngram, position and token_type embeddings.
"""
def __init__(self, config):
super(BertWordEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.word_size, config.hidden_size, padding_idx=0)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = output_attentions
self.keep_multihead_output = keep_multihead_output
self.multihead_output = None
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
if self.keep_multihead_output:
self.multihead_output = context_layer
self.multihead_output.retain_grad()
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if self.output_attentions:
return attention_probs, context_layer
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertAttention, self).__init__()
self.output_attentions = output_attentions
self.self = BertSelfAttention(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.output = BertSelfOutput(config)
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
def forward(self, input_tensor, attention_mask, head_mask=None):
self_output = self.self(input_tensor, attention_mask, head_mask)
if self.output_attentions:
attentions, self_output = self_output
attention_output = self.output(self_output, input_tensor)
if self.output_attentions:
return attentions, attention_output
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
# if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertLayer, self).__init__()
self.output_attentions = output_attentions
self.attention = BertAttention(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
if self.output_attentions:
attentions, attention_output = attention_output
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if self.output_attentions:
return attentions, layer_output
return layer_output
class ZenEncoder(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(ZenEncoder, self).__init__()
self.output_attentions = output_attentions
layer = BertLayer(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
self.word_layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_word_layers)])
self.num_hidden_word_layers = config.num_hidden_word_layers
def forward(self, hidden_states, ngram_hidden_states, ngram_position_matrix, attention_mask,
ngram_attention_mask,
output_all_encoded_layers=True, head_mask=None):
# Need to check what is the attention masking doing here
all_encoder_layers = []
all_attentions = []
num_hidden_ngram_layers = self.num_hidden_word_layers
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask, head_mask[i])
if i < num_hidden_ngram_layers:
ngram_hidden_states = self.word_layers[i](ngram_hidden_states, ngram_attention_mask, head_mask[i])
if self.output_attentions:
ngram_attentions, ngram_hidden_states = ngram_hidden_states
if self.output_attentions:
attentions, hidden_states = hidden_states
all_attentions.append(attentions)
hidden_states += torch.bmm(ngram_position_matrix.float(), ngram_hidden_states.float())
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if self.output_attentions:
return all_attentions, all_encoder_layers
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
# if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class ZenOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(ZenOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class ZenOnlyNSPHead(nn.Module):
def __init__(self, config):
super(ZenOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class ZenPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(ZenPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class ZenPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = ZenConfig
base_model_prefix = "IDEA-CCNL/Erlangshen-ZEN1-224M-Chinese"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
class ZenModel(ZenPreTrainedModel):
"""ZEN model ("BERT-based Chinese (Z) text encoder Enhanced by N-gram representations").
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
`input_ngram_ids`: input_ids of ngrams.
`ngram_token_type_ids`: token_type_ids of ngrams.
`ngram_attention_mask`: attention_mask of ngrams.
`ngram_position_matrix`: position matrix of ngrams.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(ZenModel, self).__init__(config)
self.output_attentions = output_attentions
self.embeddings = BertEmbeddings(config)
self.word_embeddings = BertWordEmbeddings(config)
self.encoder = ZenEncoder(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.pooler = BertPooler(config)
self.init_weights()
def prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_multihead_outputs(self):
""" Gather all multi-head outputs.
Return: list (layers) of multihead module outputs with gradients
"""
return [layer.attention.self.multihead_output for layer in self.encoder.layer]
def forward(self, input_ids,
input_ngram_ids,
ngram_position_matrix,
token_type_ids=None,
ngram_token_type_ids=None,
attention_mask=None,
ngram_attention_mask=None,
output_all_encoded_layers=True,
head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
if ngram_attention_mask is None:
ngram_attention_mask = torch.ones_like(input_ngram_ids)
if ngram_token_type_ids is None:
ngram_token_type_ids = torch.zeros_like(input_ngram_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_ngram_attention_mask = ngram_attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
extended_ngram_attention_mask = extended_ngram_attention_mask.to(dtype=next(self.parameters()).dtype)
extended_ngram_attention_mask = (1.0 - extended_ngram_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand_as(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(
-1) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, token_type_ids)
ngram_embedding_output = self.word_embeddings(input_ngram_ids, ngram_token_type_ids)
encoded_layers = self.encoder(embedding_output,
ngram_embedding_output,
ngram_position_matrix,
extended_attention_mask,
extended_ngram_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
head_mask=head_mask)
if self.output_attentions:
all_attentions, encoded_layers = encoded_layers
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
if self.output_attentions:
return all_attentions, encoded_layers, pooled_output
return encoded_layers, pooled_output
class ZenForPreTraining(ZenPreTrainedModel):
"""ZEN model with pre-training heads.
This module comprises the ZEN model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
`input_ngram_ids`: input_ids of ngrams.
`ngram_token_type_ids`: token_type_ids of ngrams.
`ngram_attention_mask`: attention_mask of ngrams.
`ngram_position_matrix`: position matrix of ngrams.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(ZenForPreTraining, self).__init__(config)
self.output_attentions = output_attentions
self.bert = ZenModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = ZenPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.init_weights()
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None,
ngram_token_type_ids=None,
attention_mask=None,
ngram_attention_mask=None,
masked_lm_labels=None,
next_sentence_label=None, head_mask=None):
outputs = self.bert(input_ids,
input_ngram_ids,
ngram_position_matrix,
token_type_ids,
ngram_token_type_ids,
attention_mask,
ngram_attention_mask,
output_all_encoded_layers=False, head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, pooled_output = outputs
else:
sequence_output, pooled_output = outputs
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
elif self.output_attentions:
return all_attentions, prediction_scores, seq_relationship_score
return prediction_scores, seq_relationship_score
class ZenForMaskedLM(ZenPreTrainedModel):
"""ZEN model with the masked language modeling head.
This module comprises the ZEN model followed by the masked language modeling head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`head_mask`: an optional torch.LongTensor of shape [num_heads] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
`input_ngram_ids`: input_ids of ngrams.
`ngram_token_type_ids`: token_type_ids of ngrams.
`ngram_attention_mask`: attention_mask of ngrams.
`ngram_position_matrix`: position matrix of ngrams.
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(ZenForMaskedLM, self).__init__(config)
self.output_attentions = output_attentions
self.bert = ZenModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = ZenOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.init_weights()
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None, attention_mask=None, masked_lm_labels=None, head_mask=None):
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids, attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, _ = outputs
else:
sequence_output, _ = outputs
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
elif self.output_attentions:
return all_attentions, prediction_scores
return prediction_scores
class ZenForNextSentencePrediction(ZenPreTrainedModel):
"""ZEN model with next sentence prediction head.
This module comprises the ZEN model followed by the next sentence classification head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
`input_ngram_ids`: input_ids of ngrams.
`ngram_token_type_ids`: token_type_ids of ngrams.
`ngram_attention_mask`: attention_mask of ngrams.
`ngram_position_matrix`: position matrix of ngrams.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(ZenForNextSentencePrediction, self).__init__(config)
self.output_attentions = output_attentions
self.bert = ZenModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = ZenOnlyNSPHead(config)
self.init_weights()
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None, attention_mask=None, next_sentence_label=None, head_mask=None):
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids, attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, _, pooled_output = outputs
else:
_, pooled_output = outputs
seq_relationship_score = self.cls(pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
elif self.output_attentions:
return all_attentions, seq_relationship_score
return seq_relationship_score
class ZenForSequenceClassification(ZenPreTrainedModel):
"""ZEN model for classification.
This module is composed of the ZEN model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
`input_ngram_ids`: input_ids of ngrams.
`ngram_token_type_ids`: token_type_ids of ngrams.
`ngram_attention_mask`: attention_mask of ngrams.
`ngram_position_matrix`: position matrix of ngrams.
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
"""
def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False):
# super().__init__(config, num_labels, output_attentions, keep_multihead_output)
super().__init__(config)
self.config = config
self.output_attentions = output_attentions
self.num_labels = config.num_labels
self.bert = ZenModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.init_weights()
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids, attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, _, pooled_output = outputs
else:
_, pooled_output = outputs
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# print('logits***************', logits, labels)
# breakpoint()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss, logits
elif self.output_attentions:
return all_attentions, logits
return loss, logits
class ZenForTokenClassification(ZenPreTrainedModel):
"""ZEN model for token-level classification.
This module is composed of the ZEN model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
`input_ngram_ids`: input_ids of ngrams.
`ngram_token_type_ids`: token_type_ids of ngrams.
`ngram_attention_mask`: attention_mask of ngrams.
`ngram_position_matrix`: position matrix of ngrams.
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
"""
def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False):
super().__init__(config)
self.output_attentions = output_attentions
self.num_labels = config.num_labels
self.bert = ZenModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, valid_ids=None,
attention_mask_label=None, ngram_ids=None, ngram_positions=None, head_mask=None):
outputs = self.bert(input_ids, ngram_ids, ngram_positions, token_type_ids, attention_mask,
output_all_encoded_layers=False, head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, _ = outputs
else:
sequence_output, _ = outputs
batch_size, max_len, feat_dim = sequence_output.shape
valid_output = torch.zeros(batch_size, max_len, feat_dim, dtype=torch.float32, device=input_ids.device)
if self.num_labels == 38:
# just for POS to filter/mask input_ids=0
for i in range(batch_size):
temp = sequence_output[i][valid_ids[i] == 1]
valid_output[i][:temp.size(0)] = temp
else:
valid_output = sequence_output
sequence_output = self.dropout(valid_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=0)
# Only keep active parts of the loss
attention_mask_label = None
if attention_mask_label is not None:
active_loss = attention_mask_label.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss, logits
else:
return loss, logits
| 54,948 | 49.597606 | 171 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/zen1/ngram_utils.py
|
# coding: utf-8
# Copyright 2019 Sinovation Ventures AI Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""utils for ngram for ZEN model."""
import os
import logging
from transformers import cached_path
NGRAM_DICT_NAME = 'ngram.txt'
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {'IDEA-CCNL/Erlangshen-ZEN1-224M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN1-224M-Chinese/resolve/main/ngram.txt'}
class ZenNgramDict(object):
"""
Dict class to store the ngram
"""
def __init__(self, ngram_freq_path, tokenizer, max_ngram_in_seq=128):
"""Constructs ZenNgramDict
:param ngram_freq_path: ngrams with frequency
"""
if os.path.isdir(ngram_freq_path):
ngram_freq_path = os.path.join(ngram_freq_path, NGRAM_DICT_NAME)
self.ngram_freq_path = ngram_freq_path
self.max_ngram_in_seq = max_ngram_in_seq
self.id_to_ngram_list = ["[pad]"]
self.ngram_to_id_dict = {"[pad]": 0}
self.ngram_to_freq_dict = {}
logger.info("loading ngram frequency file {}".format(ngram_freq_path))
with open(ngram_freq_path, "r", encoding="utf-8") as fin:
for i, line in enumerate(fin):
ngram, freq = line.split(",")
tokens = tuple(tokenizer.tokenize(ngram))
self.ngram_to_freq_dict[ngram] = freq
self.id_to_ngram_list.append(tokens)
self.ngram_to_id_dict[tokens] = i + 1
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
ngram_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
else:
ngram_file = pretrained_model_name_or_path
if os.path.isdir(ngram_file):
ngram_file = os.path.join(ngram_file, NGRAM_DICT_NAME)
# redirect to the cache, if necessary
try:
resolved_ngram_file = cached_path(ngram_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
logger.error(
"Couldn't reach server at '{}' to download vocabulary.".format(
ngram_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
ngram_file))
return None
if resolved_ngram_file == ngram_file:
logger.info("loading vocabulary file {}".format(ngram_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
ngram_file, resolved_ngram_file))
# Instantiate ngram.
ngram_dict = cls(resolved_ngram_file, **kwargs)
return ngram_dict
def save(self, ngram_freq_path):
with open(ngram_freq_path, "w", encoding="utf-8") as fout:
for ngram, freq in self.ngram_to_freq_dict.items():
fout.write("{},{}\n".format(ngram, freq))
| 4,929 | 45.074766 | 161 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/zen1/__init__.py
|
from .ngram_utils import ZenNgramDict, NGRAM_DICT_NAME
from .modeling import ZenConfig, ZenModel, ZenForPreTraining, ZenForTokenClassification, ZenForSequenceClassification
from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer
version = "0.1.0"
__all__ = ['ZenNgramDict', 'NGRAM_DICT_NAME', "ZenConfig", "ZenModel", "ZenForPreTraining", "ZenForTokenClassification",
"ZenForSequenceClassification", "BertTokenizer", "BasicTokenizer", "WordpieceTokenizer"]
| 488 | 68.857143 | 120 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/zen1/configuration_zen1.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TransfoXLDenoise model configuration """
from transformers.configuration_utils import PretrainedConfig
class ZenConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `ZenModel`.
"""
def __init__(self,
# vocab_size_or_config_json_file,
# word_vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
num_hidden_word_layers=6,
**kwargs):
"""Constructs ZenConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
# self.vocab_size = vocab_size_or_config_json_file
# self.word_size = word_vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.num_hidden_word_layers = num_hidden_word_layers
super().__init__(**kwargs)
| 3,783 | 45.716049 | 91 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/tcbert/modeling_tcbert.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import basicConfig
import torch
from torch import nn
import json
from tqdm import tqdm
import os
import numpy as np
from transformers import BertTokenizer
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import trainer, loggers
from torch.utils.data import Dataset, DataLoader
from transformers.optimization import get_linear_schedule_with_warmup
from transformers import BertForMaskedLM
from transformers import AutoConfig
from transformers.pipelines.base import Pipeline
from transformers import MegatronBertForMaskedLM
import argparse
import copy
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
import warnings
from transformers import TextClassificationPipeline as HuggingfacePipe
class TCBertDataset(Dataset):
def __init__(self, data, tokenizer, args, prompt, label_classes):
super().__init__()
self.tokenizer = tokenizer
self.max_length = args.max_length
self.num_labels = args.num_labels
self.data = data
self.args = args
self.label_classes = label_classes
self.prompt = prompt
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.encode(self.data[index])
def encode(self, item, labeled=True):
if labeled:
ori_texta = self.prompt.format(item['label']) + item['content']
mask_texta = self.prompt.format("[MASK]" * len(item['label'])) + item['content']
# print('texta', texta)
labels = self.label_classes[item['label']]
ori_encode_dict = self.tokenizer.encode_plus(ori_texta,
max_length=self.max_length,
padding="longest",
truncation=True
)
mask_encode_dict = self.tokenizer.encode_plus(mask_texta,
max_length=self.max_length,
padding="longest",
truncation=True
)
ori_input_ids = torch.tensor(ori_encode_dict['input_ids']).long()
token_type_ids = torch.tensor(ori_encode_dict['token_type_ids']).long()
attention_mask = torch.tensor(ori_encode_dict['attention_mask']).float()
mask_input_ids = torch.tensor(mask_encode_dict['input_ids']).long()
mlmlabels = torch.where(mask_input_ids == self.tokenizer.mask_token_id, ori_input_ids, -100)
labels = torch.tensor(labels).long()
mlmlabels = torch.tensor(mlmlabels).long()
encoded = {
"sentence": item["content"],
"input_ids": mask_input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
"labels": labels,
"mlmlabels": mlmlabels,
}
else:
texta = self.prompt.format("[MASK]" * self.args.fixed_lablen) + item['content']
encode_dict = self.tokenizer.encode_plus(texta,
max_length=self.max_length,
padding="longest",
truncation=True
)
input_ids = encode_dict['input_ids']
token_type_ids = encode_dict['token_type_ids']
attention_mask = encode_dict['attention_mask']
encoded = {
"sentence": item["content"],
"input_ids": torch.tensor(input_ids).long(),
"token_type_ids": torch.tensor(token_type_ids).long(),
"attention_mask": torch.tensor(attention_mask).float(),
}
return encoded
class TCBertDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--batchsize', default=16, type=int)
parser.add_argument('--max_length', default=512, type=int)
parser.add_argument('--fixed_lablen', default=2, type=int)
return parent_args
def __init__(self, train_data, val_data, tokenizer, args, prompt, prompt_label):
super().__init__()
self.batchsize = args.batchsize
self.label_classes = self.get_label_classes(prompt_label)
args.num_labels = len(self.label_classes)
self.train_data = TCBertDataset(train_data, tokenizer, args, prompt, self.label_classes)
self.valid_data = TCBertDataset(val_data, tokenizer, args, prompt, self.label_classes)
def get_label_classes(self, prompt_label):
label_classes = {}
i = 0
for key in prompt_label.keys():
label_classes[key] = i
i+=1
print("label_classes:",label_classes)
return label_classes
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, collate_fn=self.collate_fn, batch_size=self.batchsize, pin_memory=False)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, collate_fn=self.collate_fn, batch_size=self.batchsize, pin_memory=False)
def collate_fn(self, batch):
'''
Aggregate a batch data.
batch = [ins1_dict, ins2_dict, ..., insN_dict]
batch_data = {'sentence':[ins1_sentence, ins2_sentence...], 'input_ids':[ins1_input_ids, ins2_input_ids...], ...}
'''
batch_data = {}
for key in batch[0]:
batch_data[key] = [example[key] for example in batch]
input_ids = batch_data['input_ids']
attention_mask = batch_data['attention_mask']
token_type_ids = batch_data["token_type_ids"]
labels = None
if 'labels' in batch_data:
labels = torch.LongTensor(batch_data['labels'])
mlmlabels = None
if 'mlmlabels' in batch_data:
mlmlabels = nn.utils.rnn.pad_sequence(batch_data['mlmlabels'],
batch_first=True,
padding_value=-100)
input_ids = nn.utils.rnn.pad_sequence(input_ids,
batch_first=True,
padding_value=0)
token_type_ids = nn.utils.rnn.pad_sequence(token_type_ids,
batch_first=True,
padding_value=0)
attention_mask = nn.utils.rnn.pad_sequence(attention_mask,
batch_first=True,
padding_value=0)
batch_data = {
"sentence":batch_data["sentence"],
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"labels": labels,
"mlmlabels":mlmlabels
}
return batch_data
class TCBertModel(nn.Module):
def __init__(self, pre_train_dir, nlabels):
super().__init__()
self.config = AutoConfig.from_pretrained(pre_train_dir)
print("pre_train_dir", pre_train_dir)
# if self.config.model_type == 'megatron-bert':
if "1.3B" in pre_train_dir:
self.bert = MegatronBertForMaskedLM.from_pretrained(pre_train_dir)
else:
self.bert = BertForMaskedLM.from_pretrained(pre_train_dir)
self.dropout = nn.Dropout(0.1)
self.nlabels = nlabels
self.linear_classifier = nn.Linear(self.config.hidden_size, self.nlabels)
def forward(self, input_ids, attention_mask, token_type_ids, mlmlabels=None):
outputs = self.bert(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=mlmlabels,
output_hidden_states=True) # (bsz, seq, dim)
mlm_logits = outputs.logits
hidden_states = outputs.hidden_states[-1]
cls_logits = hidden_states[:,0]
cls_logits = self.dropout(cls_logits)
logits = self.linear_classifier(cls_logits)
return outputs.loss, logits, mlm_logits
class TCBertLitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--learning_rate', default=1e-5, type=float)
parser.add_argument('--weight_decay', default=0.1, type=float)
parser.add_argument('--warmup', default=0.01, type=float)
parser.add_argument('--num_labels', default=2, type=int)
return parent_args
def __init__(self, args, model_path, nlabels):
super().__init__()
self.args = args
self.loss_fn = torch.nn.CrossEntropyLoss()
self.model = TCBertModel(model_path, nlabels)
def setup(self, stage) -> None:
if stage == 'fit':
num_gpus = self.trainer.gpus if self.trainer.gpus is not None else 0
self.total_step = int(self.trainer.max_epochs * self.num_data /
(max(1, num_gpus) * self.trainer.accumulate_grad_batches))
print('Total training step:', self.total_step)
def train_inputs(self, batch):
inputs = {
'input_ids': batch['input_ids'],
'attention_mask': batch['attention_mask'],
'token_type_ids': batch['token_type_ids'],
'mlmlabels': batch['mlmlabels']
}
return inputs
def training_step(self, batch, batch_idx):
labels = batch['labels']
batch = self.train_inputs(batch)
mlm_loss, logits, _= self.model(**batch)
if labels is not None:
cls_loss = self.loss_fn(logits, labels.view(-1))
loss = cls_loss + mlm_loss
ntotal = logits.size(0)
ncorrect = (logits.argmax(dim=-1) == labels).long().sum()
acc = ncorrect / ntotal
self.log('train_loss', loss, on_step=True, prog_bar=True)
self.log("train_acc", acc, on_step=True, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
labels = batch['labels']
batch = self.train_inputs(batch)
mlm_loss, logits, _ = self.model(**batch)
predict = logits.argmax(dim=-1).cpu().tolist()
if labels is not None:
cls_loss = self.loss_fn(logits, labels.view(-1))
loss = cls_loss + mlm_loss
ntotal = logits.size(0)
ncorrect = int((logits.argmax(dim=-1) == labels).long().sum())
acc = ncorrect / ntotal
self.log('valid_loss', loss, on_step=True, prog_bar=True)
self.log("valid_acc", acc, on_step=True, prog_bar=True)
return int(ncorrect), int(ntotal)
def configure_optimizers(self):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
paras = list(
filter(lambda p: p[1].requires_grad, self.named_parameters()))
paras = [{
'params':
[p for n, p in paras if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay
}, {
'params': [p for n, p in paras if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}]
optimizer = torch.optim.AdamW(paras, lr=self.args.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, int(self.total_step * self.args.warmup),
self.total_step)
return [{
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'interval': 'step',
'frequency': 1
}
}]
class TCBertPredict:
def __init__(self, model, tokenizer, args, prompt, prompt_label):
self.tokenizer = tokenizer
self.args = args
self.data_model = TCBertDataModel(
[], [], tokenizer, args, prompt, prompt_label)
self.model = model
def predict_inputs(self, batch):
# Filter reduntant information(for example: 'sentence') that will be passed to model.forward()
inputs = {
'input_ids': batch['input_ids'].cuda(),
'attention_mask': batch['attention_mask'].cuda(),
'token_type_ids': batch['token_type_ids'].cuda(),
}
return inputs
def predict(self, batch_data):
batch = [self.data_model.train_data.encode(
sample, labeled=False) for sample in batch_data]
batch = self.data_model.collate_fn(batch)
batch = self.predict_inputs(batch)
_, logits, _ = self.model.model(**batch)
probs = torch.nn.functional.softmax(logits, dim=-1)
predicts = torch.argmax(probs, dim=-1).detach().cpu().numpy()
return predicts
| 13,919 | 36.929155 | 130 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/tcbert/__init__.py
| 0 | 0 | 0 |
py
|
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/albert/modeling_albert.py
|
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ALBERT model. """
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers import AlbertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "albert-base-v2"
_CONFIG_FOR_DOC = "AlbertConfig"
_TOKENIZER_FOR_DOC = "AlbertTokenizer"
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"albert-base-v1",
"albert-large-v1",
"albert-xlarge-v1",
"albert-xxlarge-v1",
"albert-base-v2",
"albert-large-v2",
"albert-xlarge-v2",
"albert-xxlarge-v2",
# See all ALBERT models at https://huggingface.co/models?filter=albert
]
def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
print(name)
for name, array in zip(names, arrays):
original_name = name
# If saved from the TF HUB module
name = name.replace("module/", "")
# Renaming and simplifying
name = name.replace("ffn_1", "ffn")
name = name.replace("bert/", "albert/")
name = name.replace("attention_1", "attention")
name = name.replace("transform/", "")
name = name.replace("LayerNorm_1", "full_layer_layer_norm")
name = name.replace("LayerNorm", "attention/LayerNorm")
name = name.replace("transformer/", "")
# The feed forward layer had an 'intermediate' step which has been abstracted away
name = name.replace("intermediate/dense/", "")
name = name.replace("ffn/intermediate/output/dense/", "ffn_output/")
# ALBERT attention was split between self and output which have been abstracted away
name = name.replace("/output/", "/")
name = name.replace("/self/", "/")
# The pooler is a linear layer
name = name.replace("pooler/dense", "pooler")
# The classifier was simplified to predictions from cls/predictions
name = name.replace("cls/predictions", "predictions")
name = name.replace("predictions/attention", "predictions")
# Naming was changed to be more explicit
name = name.replace("embeddings/attention", "embeddings")
name = name.replace("inner_group_", "albert_layers/")
name = name.replace("group_", "albert_layer_groups/")
# Classifier
if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name):
name = "classifier/" + name
# No ALBERT model currently handles the next sentence prediction task
if "seq_relationship" in name:
name = name.replace("seq_relationship/output_", "sop_classifier/classifier/")
name = name.replace("weights", "weight")
name = name.split("/")
# Ignore the gradients applied by the LAMB/ADAM optimizers.
if (
"adam_m" in name
or "adam_v" in name
or "AdamWeightDecayOptimizer" in name
or "AdamWeightDecayOptimizer_1" in name
or "global_step" in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print(f"Initialize PyTorch weight {name} from {original_name}")
pointer.data = torch.from_numpy(array)
return model
class AlbertEmbeddings(nn.Module):
"""
Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
persistent=False,
)
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class AlbertAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads}"
)
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pruned_heads = set()
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1)
# Update hyper params and store pruned heads
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = self.attention_head_size * self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.attention_dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(2, 1).flatten(2)
projected_context_layer = self.dense(context_layer)
projected_context_layer_dropout = self.output_dropout(projected_context_layer)
layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout)
return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,)
class AlbertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = AlbertAttention(config)
self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False
):
attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions)
ffn_output = apply_chunking_to_forward(
self.ff_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output[0],
)
hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])
return (hidden_states,) + attention_output[1:] # add attentions if we output them
def ff_chunk(self, attention_output):
ffn_output = self.ffn(attention_output)
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(ffn_output)
return ffn_output
class AlbertLayerGroup(nn.Module):
def __init__(self, config):
super().__init__()
self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
def forward(
self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False
):
layer_hidden_states = ()
layer_attentions = ()
for layer_index, albert_layer in enumerate(self.albert_layers):
layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions)
hidden_states = layer_output[0]
if output_attentions:
layer_attentions = layer_attentions + (layer_output[1],)
if output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if output_hidden_states:
outputs = outputs + (layer_hidden_states,)
if output_attentions:
outputs = outputs + (layer_attentions,)
return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
class AlbertTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_hidden_states = (hidden_states,) if output_hidden_states else None
all_attentions = () if output_attentions else None
head_mask = [None] * self.config.num_hidden_layers if head_mask is None else head_mask
for i in range(self.config.num_hidden_layers):
# Number of layers in a hidden group
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
# Index of the hidden group
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
hidden_states,
attention_mask,
head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
output_attentions,
output_hidden_states,
)
hidden_states = layer_group_output[0]
if output_attentions:
all_attentions = all_attentions + layer_group_output[-1]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class AlbertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = AlbertConfig
load_tf_weights = load_tf_weights_in_albert
base_model_prefix = "albert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class AlbertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.AlbertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
sop_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
sop_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
ALBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Args:
config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ALBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.AlbertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.__call__` and :meth:`transformers.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
class AlbertModel(AlbertPreTrainedModel):
config_class = AlbertConfig
base_model_prefix = "albert"
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = AlbertEmbeddings(config)
self.encoder = AlbertTransformer(config)
if add_pooling_layer:
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.pooler_activation = nn.Tanh()
else:
self.pooler = None
self.pooler_activation = None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has
a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT
model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers.
These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
while [2,3] correspond to the two inner groups of the second hidden layer.
Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more
information about head pruning
"""
for layer, heads in heads_to_prune.items():
group_idx = int(layer / self.config.inner_group_num)
inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) #
extended_attention_mask = attention_mask[:, None, :, :]
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
`sentence order prediction (classification)` head.
""",
ALBERT_START_DOCSTRING,
)
class AlbertForPreTraining(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config)
self.predictions = AlbertMLMHead(config)
self.sop_classifier = AlbertSOPHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.predictions.decoder = new_embeddings
def get_input_embeddings(self):
return self.albert.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
sentence_order_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
sentence_order_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``. ``0`` indicates original order (sequence
A, then sequence B), ``1`` indicates switched order (sequence B, then sequence A).
Returns:
Example::
>>> from transformers import AlbertTokenizer, AlbertForPreTraining
>>> import torch
>>> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
>>> model = AlbertForPreTraining.from_pretrained('albert-base-v2')
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_logits = outputs.prediction_logits
>>> sop_logits = outputs.sop_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores = self.predictions(sequence_output)
sop_scores = self.sop_classifier(pooled_output)
total_loss = None
if labels is not None and sentence_order_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))
total_loss = masked_lm_loss + sentence_order_loss
if not return_dict:
output = (prediction_scores, sop_scores) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return AlbertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
sop_logits=sop_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class AlbertMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.LayerNorm = nn.LayerNorm(config.embedding_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
self.activation = ACT2FN[config.hidden_act]
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states)
prediction_scores = hidden_states
return prediction_scores
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
class AlbertSOPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, pooled_output):
dropout_pooled_output = self.dropout(pooled_output)
logits = self.classifier(dropout_pooled_output)
return logits
@add_start_docstrings(
"Albert Model with a `language modeling` head on top.",
ALBERT_START_DOCSTRING,
)
class AlbertForMaskedLM(AlbertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config, add_pooling_layer=False)
self.predictions = AlbertMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.predictions.decoder = new_embeddings
def get_input_embeddings(self):
return self.albert.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_outputs = outputs[0]
prediction_scores = self.predictions(sequence_outputs)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
ALBERT_START_DOCSTRING,
)
class AlbertForSequenceClassification(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ...,
config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ALBERT_START_DOCSTRING,
)
class AlbertForTokenClassification(AlbertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config, add_pooling_layer=False)
classifier_dropout_prob = (
config.classifier_dropout_prob
if config.classifier_dropout_prob is not None
else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ALBERT_START_DOCSTRING,
)
class AlbertForQuestionAnswering(AlbertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ALBERT_START_DOCSTRING,
)
class AlbertForMultipleChoice(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where `num_choices` is the size of the second dimension of the input tensors. (see
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 56,887 | 40.706745 | 168 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/transfo_xl_paraphrase/generate.py
|
import torch
import torch.nn.functional as F
from fengshen.models.transfo_xl_paraphrase import TransfoXLModel
from fengshen.utils import top_k_logits, get_masks_and_position_ids
from transformers import T5Tokenizer
def get_batch(context_tokens, mem_length, batch_size=1):
tokens = context_tokens
tokens = tokens.view(batch_size, -1).contiguous()
# Get the masks and postition ids.
attention_mask, position_ids = get_masks_and_position_ids(tokens, mem_length=mem_length)
return tokens, attention_mask, position_ids
def paraphrase_generate(model,
tokenizer,
input_text,
device=0,
mem_length=512,
temperature=1.,
top_p=0.9,
eod_token=50000):
''' Generate with fixed prompt pretrained '''
prompt = f"“{input_text}”的相似句是“"
counter = 0
prompt_tokens = tokenizer.encode(prompt)[:-1]
tokens, attention_mask, position_ids = get_batch(
torch.LongTensor(prompt_tokens), mem_length, batch_size=1)
tokens, attention_mask, position_ids = tokens.cuda(
device), attention_mask.cuda(device), position_ids.cuda(device)
org_context_length = tokens.shape[-1]
model = model.cuda(device)
while counter < 100:
if counter == 0:
mems = [] # empty at the begining
output = model(input_ids=tokens, attention_mask=attention_mask,
position_ids=position_ids, hidden_states=mems)
logits, mems = output.logits, output.hidden_states
else:
index = org_context_length + counter
output = model(input_ids=tokens[:, index - 1: index], position_ids=tokens.new_ones((1, 1)) * (index - 1),
attention_mask=tokens.new_ones(1, 1, 1, mem_length + 1, device=device,
dtype=torch.float), hidden_states=mems)
logits, mems = output.logits, output.hidden_states
logits = logits[:, -1]
logits /= temperature
logits = top_k_logits(logits, top_k=0, top_p=top_p)
log_probs = F.softmax(logits, dim=-1)
prev = torch.multinomial(log_probs, num_samples=1)[0]
is_end = prev == eod_token
if is_end:
break
tokens = torch.cat((tokens, prev.view(1, 1)), dim=1)
counter += 1
out_tokens = tokens.view(-1).contiguous().tolist()[len(prompt_tokens):]
res = tokenizer.decode(out_tokens).split('”')[0]
return res
if __name__ == "__main__":
device = 0
tokenizer = T5Tokenizer.from_pretrained('IDEA-CCNL/Randeng-TransformerXL-1.1B-Paraphrasing-Chinese',
eos_token='<|endoftext|>',
extra_ids=0)
model = TransfoXLModel.from_pretrained('IDEA-CCNL/Randeng-TransformerXL-1.1B-Paraphrasing-Chinese')
input_text = "年轻教师选择农村学校,还是县城学校?"
res = paraphrase_generate(model, tokenizer, input_text, device=device)
print(res)
| 3,069 | 42.857143 | 117 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/transfo_xl_paraphrase/__init__.py
|
from fengshen.models.transfo_xl_denoise.modeling_transfo_xl_denoise import TransfoXLDenoiseModel as TransfoXLModel
from .generate import paraphrase_generate
| 157 | 51.666667 | 114 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/auto/tokenization_auto.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Tokenizer class."""
import importlib
import json
import os
from collections import OrderedDict
from pathlib import Path
from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union
from transformers.configuration_utils import PretrainedConfig
from transformers.file_utils import (
cached_path,
get_list_of_files,
hf_bucket_url,
is_offline_mode,
is_sentencepiece_available,
is_tokenizers_available,
)
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
from transformers.utils import logging
# from ..encoder_decoder import EncoderDecoderConfig
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
config_class_to_model_type,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
from .dynamic import get_class_from_dynamic_module
logger = logging.get_logger(__name__)
if TYPE_CHECKING:
# This significantly improves completion suggestion performance when
# the transformers package is used with Microsoft's Pylance language server.
TOKENIZER_MAPPING_NAMES: OrderedDict[str,
Tuple[Optional[str], Optional[str]]] = OrderedDict()
else:
TOKENIZER_MAPPING_NAMES = OrderedDict(
[
("roformer", ("RoFormerTokenizer", None)),
("longformer", ("LongformerTokenizer", None)),
]
)
TOKENIZER_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, TOKENIZER_MAPPING_NAMES)
CONFIG_TO_TYPE = {v: k for k, v in CONFIG_MAPPING_NAMES.items()}
def tokenizer_class_from_name(class_name: str):
if class_name == "PreTrainedTokenizerFast":
return PreTrainedTokenizerFast
for module_name, tokenizers in TOKENIZER_MAPPING_NAMES.items():
if class_name in tokenizers:
module_name = model_type_to_module_name(module_name)
module = importlib.import_module(
f".{module_name}", "transformers.models")
return getattr(module, class_name)
for config, tokenizers in TOKENIZER_MAPPING._extra_content.items():
for tokenizer in tokenizers:
if getattr(tokenizer, "__name__", None) == class_name:
return tokenizer
return None
def get_tokenizer_config(
pretrained_model_name_or_path: Union[str, os.PathLike],
cache_dir: Optional[Union[str, os.PathLike]] = None,
force_download: bool = False,
resume_download: bool = False,
proxies: Optional[Dict[str, str]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
revision: Optional[str] = None,
local_files_only: bool = False,
**kwargs,
):
"""
Loads the tokenizer configuration from a pretrained model tokenizer configuration.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced
under a user or organization name, like `dbmdz/bert-base-german-cased`.
- a path to a *directory* containing a configuration file saved using the
[`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if they
exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
revision(`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
local_files_only (`bool`, *optional*, defaults to `False`):
If `True`, will only try to load the tokenizer configuration from local files.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Returns:
`Dict`: The configuration of the tokenizer.
Examples:
```python
# Download configuration from huggingface.co and cache.
tokenizer_config = get_tokenizer_config("bert-base-uncased")
# This model does not have a tokenizer config so the result will be an empty dict.
tokenizer_config = get_tokenizer_config("xlm-roberta-base")
# Save a pretrained tokenizer locally and you can reload its config
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
tokenizer.save_pretrained("tokenizer-test")
tokenizer_config = get_tokenizer_config("tokenizer-test")
```"""
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
# Will raise a ValueError if `pretrained_model_name_or_path` is not a valid path or model identifier
repo_files = get_list_of_files(
pretrained_model_name_or_path,
revision=revision,
use_auth_token=use_auth_token,
local_files_only=local_files_only,
)
if TOKENIZER_CONFIG_FILE not in [Path(f).name for f in repo_files]:
return {}
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(
pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE)
else:
config_file = hf_bucket_url(
pretrained_model_name_or_path, filename=TOKENIZER_CONFIG_FILE, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
)
except EnvironmentError:
logger.info(
"Could not locate the tokenizer configuration file, will try to use the model config instead.")
return {}
with open(resolved_config_file, encoding="utf-8") as reader:
return json.load(reader)
class AutoTokenizer:
r"""
This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when
created with the [`AutoTokenizer.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoTokenizer is designed to be instantiated "
"using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
@replace_list_option_in_docstrings(TOKENIZER_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r"""
Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary.
The tokenizer class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
- A path or url to a single saved vocabulary file if and only if the tokenizer only requires a
single vocabulary file (like Bert or XLNet), e.g.: `./my_model_directory/vocab.txt`. (Not
applicable to all derived classes)
inputs (additional positional arguments, *optional*):
Will be passed along to the Tokenizer `__init__()` method.
config ([`PretrainedConfig`], *optional*)
The configuration object used to dertermine the tokenizer class to instantiate.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download the model weights and configuration files and override the
cached versions if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
revision(`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
subfolder (`str`, *optional*):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
facebook/rag-token-base), specify it here.
use_fast (`bool`, *optional*, defaults to `True`):
Whether or not to try to load the fast version of the tokenizer.
tokenizer_type (`str`, *optional*):
Tokenizer type to be loaded.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (additional keyword arguments, *optional*):
Will be passed to the Tokenizer `__init__()` method. Can be used to set special tokens like
`bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
`additional_special_tokens`. See parameters in the `__init__()` for more details.
Examples:
```python
>>> from transformers import AutoTokenizer
>>> # Download vocabulary from huggingface.co and cache.
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
>>> # Download vocabulary from huggingface.co (user-uploaded) and cache.
>>> tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
>>> # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
>>> tokenizer = AutoTokenizer.from_pretrained("./test/bert_saved_model/")
```"""
config = kwargs.pop("config", None)
kwargs["_from_auto"] = True
use_fast = kwargs.pop("use_fast", True)
tokenizer_type = kwargs.pop("tokenizer_type", None)
trust_remote_code = kwargs.pop("trust_remote_code", False)
# First, let's see whether the tokenizer_type is passed so that we can leverage it
if tokenizer_type is not None:
tokenizer_class = None
tokenizer_class_tuple = TOKENIZER_MAPPING_NAMES.get(
tokenizer_type, None)
if tokenizer_class_tuple is None:
raise ValueError(
f"Passed `tokenizer_type` {tokenizer_type} does not exist. `tokenizer_type` should be one of "
f"{', '.join(c for c in TOKENIZER_MAPPING_NAMES.keys())}."
)
tokenizer_class_name, tokenizer_fast_class_name = tokenizer_class_tuple
if use_fast and tokenizer_fast_class_name is not None:
tokenizer_class = tokenizer_class_from_name(
tokenizer_fast_class_name)
if tokenizer_class is None:
tokenizer_class = tokenizer_class_from_name(
tokenizer_class_name)
if tokenizer_class is None:
raise ValueError(
f"Tokenizer class {tokenizer_class_name} is not currently imported.")
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
# Next, let's try to use the tokenizer_config file to get the tokenizer class.
tokenizer_config = get_tokenizer_config(
pretrained_model_name_or_path, **kwargs)
config_tokenizer_class = tokenizer_config.get("tokenizer_class")
tokenizer_auto_map = tokenizer_config.get("auto_map")
# If that did not work, let's try to use the config.
if config_tokenizer_class is None:
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
)
config_tokenizer_class = config.tokenizer_class
if hasattr(config, "auto_map") and "AutoTokenizer" in config.auto_map:
tokenizer_auto_map = config.auto_map["AutoTokenizer"]
# If we have the tokenizer class from the tokenizer config or the model config we're good!
if config_tokenizer_class is not None:
tokenizer_class = None
if tokenizer_auto_map is not None:
if not trust_remote_code:
raise ValueError(
f"Loading {pretrained_model_name_or_path} requires you to execute the tokenizer file in that repo "
"on your local machine. Make sure you have read the code there to avoid malicious use, then set "
"the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warn(
"Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure "
"no malicious code has been contributed in a newer revision."
)
if use_fast and tokenizer_auto_map[1] is not None:
class_ref = tokenizer_auto_map[1]
else:
class_ref = tokenizer_auto_map[0]
module_file, class_name = class_ref.split(".")
tokenizer_class = get_class_from_dynamic_module(
pretrained_model_name_or_path, module_file + ".py", class_name, **kwargs
)
elif use_fast and not config_tokenizer_class.endswith("Fast"):
tokenizer_class_candidate = f"{config_tokenizer_class}Fast"
tokenizer_class = tokenizer_class_from_name(
tokenizer_class_candidate)
if tokenizer_class is None:
tokenizer_class_candidate = config_tokenizer_class
tokenizer_class = tokenizer_class_from_name(
tokenizer_class_candidate)
if tokenizer_class is None:
raise ValueError(
f"Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported."
)
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
model_type = config_class_to_model_type(type(config).__name__)
if model_type is not None:
tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(
config)]
if tokenizer_class_fast and (use_fast or tokenizer_class_py is None):
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
if tokenizer_class_py is not None:
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
raise ValueError(
"This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed "
"in order to use this tokenizer."
)
raise ValueError(
f"Unrecognized configuration class {config.__class__} to build an AutoTokenizer.\n"
f"Model type should be one of {', '.join(c.__name__ for c in TOKENIZER_MAPPING.keys())}."
)
def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None):
"""
Register a new tokenizer in this mapping.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
slow_tokenizer_class ([`PretrainedTokenizer`], *optional*):
The slow tokenizer to register.
slow_tokenizer_class ([`PretrainedTokenizerFast`], *optional*):
The fast tokenizer to register.
"""
if slow_tokenizer_class is None and fast_tokenizer_class is None:
raise ValueError(
"You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class")
if slow_tokenizer_class is not None and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast):
raise ValueError(
"You passed a fast tokenizer in the `slow_tokenizer_class`.")
if fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizer):
raise ValueError(
"You passed a slow tokenizer in the `fast_tokenizer_class`.")
if (
slow_tokenizer_class is not None
and fast_tokenizer_class is not None
and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast)
and fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class
):
raise ValueError(
"The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not "
"consistent with the slow tokenizer class you passed (fast tokenizer has "
f"{fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those "
"so they match!"
)
# Avoid resetting a set slow/fast tokenizer if we are passing just the other ones.
if config_class in TOKENIZER_MAPPING._extra_content:
existing_slow, existing_fast = TOKENIZER_MAPPING[config_class]
if slow_tokenizer_class is None:
slow_tokenizer_class = existing_slow
if fast_tokenizer_class is None:
fast_tokenizer_class = existing_fast
TOKENIZER_MAPPING.register(
config_class, (slow_tokenizer_class, fast_tokenizer_class))
| 21,569 | 46.933333 | 126 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/auto/configuration_auto.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Config class."""
import importlib
import re
import warnings
from collections import OrderedDict
from typing import List, Union
from transformers.configuration_utils import PretrainedConfig
from transformers.file_utils import CONFIG_NAME
from transformers.utils import logging
from .dynamic import get_class_from_dynamic_module
logger = logging.get_logger(__name__)
CONFIG_MAPPING_NAMES = OrderedDict(
[
# Add configs here
("roformer", "RoFormerConfig"),
("longformer", "LongformerConfig"),
]
)
CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict(
[
# Add archive maps here
("roformer", "ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
("longformer", "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
]
)
MODEL_NAMES_MAPPING = OrderedDict(
[
# Add full (and cased) model names here
("roformer", "Roformer"),
("longformer", "Longformer"),
]
)
SPECIAL_MODEL_TYPE_TO_MODULE_NAME = OrderedDict([("openai-gpt", "openai")])
def model_type_to_module_name(key):
"""Converts a config key to the corresponding module."""
# Special treatment
if key in SPECIAL_MODEL_TYPE_TO_MODULE_NAME:
return SPECIAL_MODEL_TYPE_TO_MODULE_NAME[key]
return key.replace("-", "_")
def config_class_to_model_type(config):
"""Converts a config class name to the corresponding model type"""
for key, cls in CONFIG_MAPPING_NAMES.items():
if cls == config:
return key
return None
class _LazyConfigMapping(OrderedDict):
"""
A dictionary that lazily load its values when they are requested.
"""
def __init__(self, mapping):
self._mapping = mapping
self._extra_content = {}
self._modules = {}
def __getitem__(self, key):
if key in self._extra_content:
return self._extra_content[key]
if key not in self._mapping:
raise KeyError(key)
value = self._mapping[key]
module_name = model_type_to_module_name(key)
if module_name not in self._modules:
self._modules[module_name] = importlib.import_module(f".{module_name}", "fengshen.models")
return getattr(self._modules[module_name], value)
def keys(self):
return list(self._mapping.keys()) + list(self._extra_content.keys())
def values(self):
return [self[k] for k in self._mapping.keys()] + list(self._extra_content.values())
def items(self):
return [(k, self[k]) for k in self._mapping.keys()] + list(self._extra_content.items())
def __iter__(self):
return iter(list(self._mapping.keys()) + list(self._extra_content.keys()))
def __contains__(self, item):
return item in self._mapping or item in self._extra_content
def register(self, key, value):
"""
Register a new configuration in this mapping.
"""
if key in self._mapping.keys():
raise ValueError(f"'{key}' is already used by a Transformers config, pick another name.")
self._extra_content[key] = value
CONFIG_MAPPING = _LazyConfigMapping(CONFIG_MAPPING_NAMES)
class _LazyLoadAllMappings(OrderedDict):
"""
A mapping that will load all pairs of key values at the first access (either by indexing, requestions keys, values,
etc.)
Args:
mapping: The mapping to load.
"""
def __init__(self, mapping):
self._mapping = mapping
self._initialized = False
self._data = {}
def _initialize(self):
if self._initialized:
return
warnings.warn(
"ALL_PRETRAINED_CONFIG_ARCHIVE_MAP is deprecated and will be removed in v5 of Transformers. "
"It does not contain all available model checkpoints, far from it. Checkout hf.co/models for that.",
FutureWarning,
)
for model_type, map_name in self._mapping.items():
module_name = model_type_to_module_name(model_type)
module = importlib.import_module(f".{module_name}", "transformers.models")
mapping = getattr(module, map_name)
self._data.update(mapping)
self._initialized = True
def __getitem__(self, key):
self._initialize()
return self._data[key]
def keys(self):
self._initialize()
return self._data.keys()
def values(self):
self._initialize()
return self._data.values()
def items(self):
self._initialize()
return self._data.keys()
def __iter__(self):
self._initialize()
return iter(self._data)
def __contains__(self, item):
self._initialize()
return item in self._data
ALL_PRETRAINED_CONFIG_ARCHIVE_MAP = _LazyLoadAllMappings(CONFIG_ARCHIVE_MAP_MAPPING_NAMES)
def _get_class_name(model_class: Union[str, List[str]]):
if isinstance(model_class, (list, tuple)):
return " or ".join([f"[`{c}`]" for c in model_class if c is not None])
return f"[`{model_class}`]"
def _list_model_options(indent, config_to_class=None, use_model_types=True):
if config_to_class is None and not use_model_types:
raise ValueError("Using `use_model_types=False` requires a `config_to_class` dictionary.")
if use_model_types:
if config_to_class is None:
model_type_to_name = {model_type: f"[`{config}`]" for model_type, config in CONFIG_MAPPING_NAMES.items()}
else:
model_type_to_name = {
model_type: _get_class_name(model_class)
for model_type, model_class in config_to_class.items()
if model_type in MODEL_NAMES_MAPPING
}
lines = [
f"{indent}- **{model_type}** -- {model_type_to_name[model_type]} ({MODEL_NAMES_MAPPING[model_type]} model)"
for model_type in sorted(model_type_to_name.keys())
]
else:
config_to_name = {
CONFIG_MAPPING_NAMES[config]: _get_class_name(clas)
for config, clas in config_to_class.items()
if config in CONFIG_MAPPING_NAMES
}
config_to_model_name = {
config: MODEL_NAMES_MAPPING[model_type] for model_type, config in CONFIG_MAPPING_NAMES.items()
}
lines = [
f"{indent}- [`{config_name}`] configuration class: {config_to_name[config_name]} ({config_to_model_name[config_name]} model)"
for config_name in sorted(config_to_name.keys())
]
return "\n".join(lines)
def replace_list_option_in_docstrings(config_to_class=None, use_model_types=True):
def docstring_decorator(fn):
docstrings = fn.__doc__
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^(\s*)List options\s*$", lines[i]) is None:
i += 1
if i < len(lines):
indent = re.search(r"^(\s*)List options\s*$", lines[i]).groups()[0]
if use_model_types:
indent = f"{indent} "
lines[i] = _list_model_options(indent, config_to_class=config_to_class, use_model_types=use_model_types)
docstrings = "\n".join(lines)
else:
raise ValueError(
f"The function {fn} should have an empty 'List options' in its docstring as placeholder, current docstring is:\n{docstrings}"
)
fn.__doc__ = docstrings
return fn
return docstring_decorator
class AutoConfig:
r"""
This is a generic configuration class that will be instantiated as one of the configuration classes of the library
when created with the [`~AutoConfig.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoConfig is designed to be instantiated "
"using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
def for_model(cls, model_type: str, *args, **kwargs):
if model_type in CONFIG_MAPPING:
config_class = CONFIG_MAPPING[model_type]
return config_class(*args, **kwargs)
raise ValueError(
f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}"
)
@classmethod
@replace_list_option_in_docstrings()
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r"""
Instantiate one of the configuration classes of the library from a pretrained model configuration.
The configuration class to instantiate is selected based on the `model_type` property of the config object that
is loaded, or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or
namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing a configuration file saved using the
[`~PretrainedConfig.save_pretrained`] method, or the [`~PreTrainedModel.save_pretrained`] method,
e.g., `./my_model_directory/`.
- A path or url to a saved configuration JSON *file*, e.g.,
`./my_model_directory/configuration.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download the model weights and configuration files and override the
cached versions if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
revision(`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final configuration object.
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs(additional keyword arguments, *optional*):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
Examples:
```python
>>> from transformers import AutoConfig
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained("bert-base-uncased")
>>> # Download configuration from huggingface.co (user-uploaded) and cache.
>>> config = AutoConfig.from_pretrained("dbmdz/bert-base-german-cased")
>>> # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*).
>>> config = AutoConfig.from_pretrained("./test/bert_saved_model/")
>>> # Load a specific configuration file.
>>> config = AutoConfig.from_pretrained("./test/bert_saved_model/my_configuration.json")
>>> # Change some config attributes when loading a pretrained config.
>>> config = AutoConfig.from_pretrained("bert-base-uncased", output_attentions=True, foo=False)
>>> config.output_attentions
True
>>> config, unused_kwargs = AutoConfig.from_pretrained(
... "bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
... )
>>> config.output_attentions
True
>>> config.unused_kwargs
{'foo': False}
```"""
kwargs["_from_auto"] = True
kwargs["name_or_path"] = pretrained_model_name_or_path
trust_remote_code = kwargs.pop("trust_remote_code", False)
config_dict, _ = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
if "auto_map" in config_dict and "AutoConfig" in config_dict["auto_map"]:
if not trust_remote_code:
raise ValueError(
f"Loading {pretrained_model_name_or_path} requires you to execute the configuration file in that repo "
"on your local machine. Make sure you have read the code there to avoid malicious use, then set "
"the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warn(
"Explicitly passing a `revision` is encouraged when loading a configuration with custom code to "
"ensure no malicious code has been contributed in a newer revision."
)
class_ref = config_dict["auto_map"]["AutoConfig"]
module_file, class_name = class_ref.split(".")
config_class = get_class_from_dynamic_module(
pretrained_model_name_or_path, module_file + ".py", class_name, **kwargs
)
return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif "model_type" in config_dict:
config_class = CONFIG_MAPPING[config_dict["model_type"]]
return config_class.from_dict(config_dict, **kwargs)
else:
# Fallback: use pattern matching on the string.
for pattern, config_class in CONFIG_MAPPING.items():
if pattern in str(pretrained_model_name_or_path):
return config_class.from_dict(config_dict, **kwargs)
raise ValueError(
f"Unrecognized model in {pretrained_model_name_or_path}. "
f"Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings "
f"in its name: {', '.join(CONFIG_MAPPING.keys())}"
)
@staticmethod
def register(model_type, config):
"""
Register a new configuration for this class.
Args:
model_type (`str`): The model type like "bert" or "gpt".
config ([`PretrainedConfig`]): The config to register.
"""
if issubclass(config, PretrainedConfig) and config.model_type != model_type:
raise ValueError(
"The config you are passing has a `model_type` attribute that is not consistent with the model type "
f"you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they "
"match!"
)
CONFIG_MAPPING.register(model_type, config)
| 17,004 | 41.091584 | 141 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/auto/modeling_auto.py
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class."""
import warnings
from collections import OrderedDict
from transformers.utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
logger = logging.get_logger(__name__)
MODEL_MAPPING_NAMES = OrderedDict(
[
# Base model mapping
("roformer", "RoFormerModel"),
("longformer", "LongformerModel"),
]
)
MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
[
# Model for pre-training mapping
("longformer", "LongformerForMaskedLM"),
]
)
MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
[
# Model with LM heads mapping
("roformer", "RoFormerForMaskedLM"),
("longformer", "LongformerForMaskedLM"),
]
)
MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
[
# Model for Causal LM mapping
("roformer", "RoFormerForCausalLM"),
]
)
MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
[
# Model for Masked LM mapping
("roformer", "RoFormerForMaskedLM"),
("longformer", "LongformerForMaskedLM"),
]
)
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("t5", "T5ForConditionalGeneration"),
]
)
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict(
[
("speech-encoder-decoder", "SpeechEncoderDecoderModel"),
("speech_to_text", "Speech2TextForConditionalGeneration"),
]
)
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
[
# Model for Sequence Classification mapping
("roformer", "RoFormerForSequenceClassification"),
("longformer", "LongformerForSequenceClassification"),
]
)
MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
[
# Model for Question Answering mapping
("roformer", "RoFormerForQuestionAnswering"),
("longformer", "LongformerForQuestionAnswering"),
]
)
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
[
# Model for Table Question Answering mapping
("tapas", "TapasForQuestionAnswering"),
]
)
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
[
# Model for Token Classification mapping
("roformer", "RoFormerForTokenClassification"),
("longformer", "LongformerForTokenClassification"),
]
)
MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict(
[
# Model for Multiple Choice mapping
("roformer", "RoFormerForMultipleChoice"),
("longformer", "LongformerForMultipleChoice"),
]
)
MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_MAPPING_NAMES)
MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES)
MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_LM_HEAD_MAPPING_NAMES)
MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES)
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES
)
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES)
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES)
class AutoModel(_BaseAutoModelClass):
_model_mapping = MODEL_MAPPING
AutoModel = auto_class_update(AutoModel)
class AutoModelForPreTraining(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_PRETRAINING_MAPPING
AutoModelForPreTraining = auto_class_update(AutoModelForPreTraining, head_doc="pretraining")
# Private on purpose, the public class will add the deprecation warnings.
class _AutoModelWithLMHead(_BaseAutoModelClass):
_model_mapping = MODEL_WITH_LM_HEAD_MAPPING
_AutoModelWithLMHead = auto_class_update(_AutoModelWithLMHead, head_doc="language modeling")
class AutoModelForCausalLM(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING
AutoModelForCausalLM = auto_class_update(AutoModelForCausalLM, head_doc="causal language modeling")
class AutoModelForMaskedLM(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_MASKED_LM_MAPPING
AutoModelForMaskedLM = auto_class_update(AutoModelForMaskedLM, head_doc="masked language modeling")
class AutoModelForSeq2SeqLM(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
AutoModelForSeq2SeqLM = auto_class_update(
AutoModelForSeq2SeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class AutoModelForSequenceClassification(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
AutoModelForSequenceClassification = auto_class_update(
AutoModelForSequenceClassification, head_doc="sequence classification"
)
class AutoModelForQuestionAnswering(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING
AutoModelForQuestionAnswering = auto_class_update(AutoModelForQuestionAnswering, head_doc="question answering")
class AutoModelForTableQuestionAnswering(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
AutoModelForTableQuestionAnswering = auto_class_update(
AutoModelForTableQuestionAnswering,
head_doc="table question answering",
checkpoint_for_example="google/tapas-base-finetuned-wtq",
)
class AutoModelForTokenClassification(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
AutoModelForTokenClassification = auto_class_update(AutoModelForTokenClassification, head_doc="token classification")
class AutoModelForMultipleChoice(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_MULTIPLE_CHOICE_MAPPING
AutoModelForMultipleChoice = auto_class_update(AutoModelForMultipleChoice, head_doc="multiple choice")
class AutoModelForSpeechSeq2Seq(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
AutoModelForSpeechSeq2Seq = auto_class_update(
AutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeing"
)
class AutoModelWithLMHead(_AutoModelWithLMHead):
@classmethod
def from_config(cls, config):
warnings.warn(
"The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
"`AutoModelForSeq2SeqLM` for encoder-decoder models.",
FutureWarning,
)
return super().from_config(config)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
warnings.warn(
"The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
"`AutoModelForSeq2SeqLM` for encoder-decoder models.",
FutureWarning,
)
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
| 8,518 | 30.205128 | 119 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/auto/__init__.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from transformers.file_utils import _LazyModule, is_torch_available
_import_structure = {
"auto_factory": ["get_values"],
"configuration_auto": ["ALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CONFIG_MAPPING", "MODEL_NAMES_MAPPING", "AutoConfig"],
"tokenization_auto": ["TOKENIZER_MAPPING", "AutoTokenizer"],
}
if is_torch_available():
_import_structure["modeling_auto"] = [
"AutoModel",
"AutoModelForMaskedLM",
"AutoModelForMultipleChoice",
"AutoModelForPreTraining",
"AutoModelForQuestionAnswering",
"AutoModelForSequenceClassification",
"AutoModelForTokenClassification",
]
if TYPE_CHECKING:
from .auto_factory import get_values
from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, MODEL_NAMES_MAPPING, AutoConfig
from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
if is_torch_available():
from .modeling_auto import (
AutoModel,
AutoModelForMaskedLM,
AutoModelForMultipleChoice,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 2,003 | 34.157895 | 119 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/auto/auto_factory.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory function to build auto-model classes."""
import importlib
from collections import OrderedDict
from transformers.configuration_utils import PretrainedConfig
from transformers.file_utils import copy_func
from transformers.utils import logging
from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
from .dynamic import get_class_from_dynamic_module
logger = logging.get_logger(__name__)
CLASS_DOCSTRING = """
This is a generic model class that will be instantiated as one of the model classes of the library when created
with the [`~BaseAutoModelClass.from_pretrained`] class method or the [`~BaseAutoModelClass.from_config`] class
method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
FROM_CONFIG_DOCSTRING = """
Instantiates one of the model classes of the library from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights. It only affects the
model's configuration. Use [`~BaseAutoModelClass.from_pretrained`] to load the model weights.
Args:
config ([`PretrainedConfig`]):
The model class to instantiate is selected based on the configuration class:
List options
Examples:
```python
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained("checkpoint_placeholder")
>>> model = BaseAutoModelClass.from_config(config)
```
"""
FROM_PRETRAINED_TORCH_DOCSTRING = """
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
The model is set in evaluation mode by default using `model.eval()` (so for instance, dropout modules are
deactivated). To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
this case, `from_tf` should be set to `True` and a configuration object should be provided as
`config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args (additional positional arguments, *optional*):
Will be passed along to the underlying model `__init__()` method.
config ([`PretrainedConfig`], *optional*):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the *model id* string of a pretrained
model).
- The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
save directory.
- The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
configuration JSON file named *config.json* is found in the directory.
state_dict (*Dict[str, torch.Tensor]*, *optional*):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and
[`~PreTrainedModel.from_pretrained`] is not a simpler option.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (`bool`, *optional*, defaults to `False`):
Load the model weights from a TensorFlow checkpoint save file (see docstring of
`pretrained_model_name_or_path` argument).
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(`bool`, *optional*, defaults to `False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (additional keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`). Behaves differently depending on whether a `config` is provided or
automatically loaded:
- If a configuration is provided with `config`, `**kwargs` will be directly passed to the
underlying model's `__init__` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, `kwargs` will be first passed to the configuration class
initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
corresponds to a configuration attribute will be used to override said attribute with the
supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
will be passed to the underlying model's `__init__` function.
Examples:
```python
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")
>>> # Update configuration during loading
>>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_pretrained("./tf_model/shortcut_placeholder_tf_model_config.json")
>>> model = BaseAutoModelClass.from_pretrained(
... "./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index", from_tf=True, config=config
... )
```
"""
FROM_PRETRAINED_TF_DOCSTRING = """
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
using the provided conversion scripts and loading the TensorFlow model afterwards.
model_args (additional positional arguments, *optional*):
Will be passed along to the underlying model `__init__()` method.
config ([`PretrainedConfig`], *optional*):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the *model id* string of a pretrained
model).
- The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
save directory.
- The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
configuration JSON file named *config.json* is found in the directory.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_pt (`bool`, *optional*, defaults to `False`):
Load the model weights from a PyTorch checkpoint save file (see docstring of
`pretrained_model_name_or_path` argument).
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(`bool`, *optional*, defaults to `False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (additional keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`). Behaves differently depending on whether a `config` is provided or
automatically loaded:
- If a configuration is provided with `config`, `**kwargs` will be directly passed to the
underlying model's `__init__` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, `kwargs` will be first passed to the configuration class
initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
corresponds to a configuration attribute will be used to override said attribute with the
supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
will be passed to the underlying model's `__init__` function.
Examples:
```python
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")
>>> # Update configuration during loading
>>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")
>>> model = BaseAutoModelClass.from_pretrained(
... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config
... )
```
"""
FROM_PRETRAINED_FLAX_DOCSTRING = """
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
using the provided conversion scripts and loading the TensorFlow model afterwards.
model_args (additional positional arguments, *optional*):
Will be passed along to the underlying model `__init__()` method.
config ([`PretrainedConfig`], *optional*):
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the *model id* string of a pretrained
model).
- The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
save directory.
- The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
configuration JSON file named *config.json* is found in the directory.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_pt (`bool`, *optional*, defaults to `False`):
Load the model weights from a PyTorch checkpoint save file (see docstring of
`pretrained_model_name_or_path` argument).
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(`bool`, *optional*, defaults to `False`):
Whether or not to only look at local files (e.g., not try downloading the model).
revision(`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (additional keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`). Behaves differently depending on whether a `config` is provided or
automatically loaded:
- If a configuration is provided with `config`, `**kwargs` will be directly passed to the
underlying model's `__init__` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, `kwargs` will be first passed to the configuration class
initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
corresponds to a configuration attribute will be used to override said attribute with the
supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
will be passed to the underlying model's `__init__` function.
Examples:
```python
>>> from transformers import AutoConfig, BaseAutoModelClass
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")
>>> # Update configuration during loading
>>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")
>>> model = BaseAutoModelClass.from_pretrained(
... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config
... )
```
"""
def _get_model_class(config, model_mapping):
supported_models = model_mapping[type(config)]
if not isinstance(supported_models, (list, tuple)):
return supported_models
name_to_model = {model.__name__: model for model in supported_models}
architectures = getattr(config, "architectures", [])
for arch in architectures:
if arch in name_to_model:
return name_to_model[arch]
elif f"TF{arch}" in name_to_model:
return name_to_model[f"TF{arch}"]
elif f"Flax{arch}" in name_to_model:
return name_to_model[f"Flax{arch}"]
# If not architecture is set in the config or match the supported models, the first element of the tuple is the
# defaults.
return supported_models[0]
class _BaseAutoModelClass:
# Base class for auto models.
_model_mapping = None
def __init__(self, *args, **kwargs):
raise EnvironmentError(
f"{self.__class__.__name__} is designed to be instantiated "
f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
f"`{self.__class__.__name__}.from_config(config)` methods."
)
@classmethod
def from_config(cls, config, **kwargs):
trust_remote_code = kwargs.pop("trust_remote_code", False)
if hasattr(config, "auto_map") and cls.__name__ in config.auto_map:
if not trust_remote_code:
raise ValueError(
"Loading this model requires you to execute the modeling file in that repo "
"on your local machine. Make sure you have read the code there to avoid malicious use, then set "
"the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warn(
"Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure "
"no malicious code has been contributed in a newer revision."
)
class_ref = config.auto_map[cls.__name__]
module_file, class_name = class_ref.split(".")
model_class = get_class_from_dynamic_module(
config.name_or_path, module_file + ".py", class_name, **kwargs)
return model_class._from_config(config, **kwargs)
elif type(config) in cls._model_mapping.keys():
model_class = _get_model_class(config, cls._model_mapping)
return model_class._from_config(config, **kwargs)
raise ValueError(
f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop("config", None)
trust_remote_code = kwargs.pop("trust_remote_code", False)
kwargs["_from_auto"] = True
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, trust_remote_code=trust_remote_code, **kwargs
)
if hasattr(config, "auto_map") and cls.__name__ in config.auto_map:
if not trust_remote_code:
raise ValueError(
f"Loading {pretrained_model_name_or_path} requires you to execute the modeling file in that repo "
"on your local machine. Make sure you have read the code there to avoid malicious use, then set "
"the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warn(
"Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure "
"no malicious code has been contributed in a newer revision."
)
class_ref = config.auto_map[cls.__name__]
module_file, class_name = class_ref.split(".")
model_class = get_class_from_dynamic_module(
pretrained_model_name_or_path, module_file + ".py", class_name, **kwargs
)
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
elif type(config) in cls._model_mapping.keys():
model_class = _get_model_class(config, cls._model_mapping)
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
)
@classmethod
def register(cls, config_class, model_class):
"""
Register a new model for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
model_class ([`PreTrainedModel`]):
The model to register.
"""
if hasattr(model_class, "config_class") and model_class.config_class != config_class:
raise ValueError(
"The model class you are passing has a `config_class` attribute that is not consistent with the "
f"config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix "
"one of those so they match!"
)
cls._model_mapping.register(config_class, model_class)
def insert_head_doc(docstring, head_doc=""):
if len(head_doc) > 0:
return docstring.replace(
"one of the model classes of the library ",
f"one of the model classes of the library (with a {head_doc} head) ",
)
return docstring.replace(
"one of the model classes of the library ", "one of the base model classes of the library "
)
def auto_class_update(cls, checkpoint_for_example="bert-base-cased", head_doc=""):
# Create a new class with the right name from the base class
model_mapping = cls._model_mapping
name = cls.__name__
class_docstring = insert_head_doc(CLASS_DOCSTRING, head_doc=head_doc)
cls.__doc__ = class_docstring.replace("BaseAutoModelClass", name)
# Now we need to copy and re-register `from_config` and `from_pretrained` as class methods otherwise we can't
# have a specific docstrings for them.
from_config = copy_func(_BaseAutoModelClass.from_config)
from_config_docstring = insert_head_doc(
FROM_CONFIG_DOCSTRING, head_doc=head_doc)
from_config_docstring = from_config_docstring.replace(
"BaseAutoModelClass", name)
from_config_docstring = from_config_docstring.replace(
"checkpoint_placeholder", checkpoint_for_example)
from_config.__doc__ = from_config_docstring
from_config = replace_list_option_in_docstrings(
model_mapping._model_mapping, use_model_types=False)(from_config)
cls.from_config = classmethod(from_config)
if name.startswith("TF"):
from_pretrained_docstring = FROM_PRETRAINED_TF_DOCSTRING
elif name.startswith("Flax"):
from_pretrained_docstring = FROM_PRETRAINED_FLAX_DOCSTRING
else:
from_pretrained_docstring = FROM_PRETRAINED_TORCH_DOCSTRING
from_pretrained = copy_func(_BaseAutoModelClass.from_pretrained)
from_pretrained_docstring = insert_head_doc(
from_pretrained_docstring, head_doc=head_doc)
from_pretrained_docstring = from_pretrained_docstring.replace(
"BaseAutoModelClass", name)
from_pretrained_docstring = from_pretrained_docstring.replace(
"checkpoint_placeholder", checkpoint_for_example)
shortcut = checkpoint_for_example.split("/")[-1].split("-")[0]
from_pretrained_docstring = from_pretrained_docstring.replace(
"shortcut_placeholder", shortcut)
from_pretrained.__doc__ = from_pretrained_docstring
from_pretrained = replace_list_option_in_docstrings(
model_mapping._model_mapping)(from_pretrained)
cls.from_pretrained = classmethod(from_pretrained)
return cls
def get_values(model_mapping):
result = []
for model in model_mapping.values():
if isinstance(model, (list, tuple)):
result += list(model)
else:
result.append(model)
return result
def getattribute_from_module(module, attr):
if attr is None:
return None
if isinstance(attr, tuple):
return tuple(getattribute_from_module(module, a) for a in attr)
if hasattr(module, attr):
return getattr(module, attr)
# Some of the mappings have entries model_type -> object of another model type. In that case we try to grab the
# object at the top level.
transformers_module = importlib.import_module("fengshen")
return getattribute_from_module(transformers_module, attr)
class _LazyAutoMapping(OrderedDict):
"""
" A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.
Args:
- config_mapping: The map model type to config class
- model_mapping: The map model type to model (or tokenizer) class
"""
def __init__(self, config_mapping, model_mapping):
self._config_mapping = config_mapping
self._reverse_config_mapping = {
v: k for k, v in config_mapping.items()}
self._model_mapping = model_mapping
self._extra_content = {}
self._modules = {}
def __getitem__(self, key):
if key in self._extra_content:
return self._extra_content[key]
model_type = self._reverse_config_mapping[key.__name__]
if model_type not in self._model_mapping:
raise KeyError(key)
model_name = self._model_mapping[model_type]
return self._load_attr_from_module(model_type, model_name)
def _load_attr_from_module(self, model_type, attr):
module_name = model_type_to_module_name(model_type)
if module_name not in self._modules:
self._modules[module_name] = importlib.import_module(
f".{module_name}", "fengshen.models")
return getattribute_from_module(self._modules[module_name], attr)
def keys(self):
mapping_keys = [
self._load_attr_from_module(key, name)
for key, name in self._config_mapping.items()
if key in self._model_mapping.keys()
]
return mapping_keys + list(self._extra_content.keys())
def get(self, key, default):
try:
return self.__getitem__(key)
except KeyError:
return default
def __bool__(self):
return bool(self.keys())
def values(self):
mapping_values = [
self._load_attr_from_module(key, name)
for key, name in self._model_mapping.items()
if key in self._config_mapping.keys()
]
return mapping_values + list(self._extra_content.values())
def items(self):
mapping_items = [
(
self._load_attr_from_module(key, self._config_mapping[key]),
self._load_attr_from_module(key, self._model_mapping[key]),
)
for key in self._model_mapping.keys()
if key in self._config_mapping.keys()
]
return mapping_items + list(self._extra_content.items())
def __iter__(self):
return iter(self.keys())
def __contains__(self, item):
if item in self._extra_content:
return True
if not hasattr(item, "__name__") or item.__name__ not in self._reverse_config_mapping:
return False
model_type = self._reverse_config_mapping[item.__name__]
return model_type in self._model_mapping
def register(self, key, value):
"""
Register a new model in this mapping.
"""
if hasattr(key, "__name__") and key.__name__ in self._reverse_config_mapping:
model_type = self._reverse_config_mapping[key.__name__]
if model_type in self._model_mapping.keys():
raise ValueError(
f"'{key}' is already used by a Transformers model.")
self._extra_content[key] = value
| 35,460 | 53.978295 | 119 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/auto/dynamic.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to dynamically load model and tokenizer from the Hub."""
import importlib
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from transformers.file_utils import (
HF_MODULES_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
cached_path,
hf_bucket_url,
is_offline_mode,
)
from transformers.utils import logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def init_hf_modules():
"""
Creates the cache directory for modules with an init, and adds it to the Python path.
"""
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(HF_MODULES_CACHE)
os.makedirs(HF_MODULES_CACHE, exist_ok=True)
init_path = Path(HF_MODULES_CACHE) / "__init__.py"
if not init_path.exists():
init_path.touch()
def create_dynamic_module(name: Union[str, os.PathLike]):
"""
Creates a dynamic module in the cache directory for modules.
"""
init_hf_modules()
dynamic_module_path = Path(HF_MODULES_CACHE) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent)
os.makedirs(dynamic_module_path, exist_ok=True)
init_path = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def check_imports(filename):
"""
Check if the current Python environment contains all the libraries that are imported in a file.
"""
with open(filename, "r", encoding="utf-8") as f:
content = f.read()
# Imports of the form `import xxx`
imports = re.findall("^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE)
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE)
# Only keep the top-level module
imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")]
# Unique-ify and test we got them all
imports = list(set(imports))
missing_packages = []
for imp in imports:
try:
importlib.import_module(imp)
except ImportError:
missing_packages.append(imp)
if len(missing_packages) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`"
)
def get_class_in_module(class_name, module_path):
"""
Import a module on the cache directory for modules and extract a class from it.
"""
module_path = module_path.replace(os.path.sep, ".")
module = importlib.import_module(module_path)
return getattr(module, class_name)
def get_class_from_dynamic_module(
pretrained_model_name_or_path: Union[str, os.PathLike],
module_file: str,
class_name: str,
cache_dir: Optional[Union[str, os.PathLike]] = None,
force_download: bool = False,
resume_download: bool = False,
proxies: Optional[Dict[str, str]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
revision: Optional[str] = None,
local_files_only: bool = False,
**kwargs,
):
"""
Extracts a class from a module file, present in the local folder or repository of a model.
<Tip warning={true}>
Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should
therefore only be called on trusted repos.
</Tip>
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced
under a user or organization name, like `dbmdz/bert-base-german-cased`.
- a path to a *directory* containing a configuration file saved using the
[`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
module_file (`str`):
The name of the module file containing the class to look for.
class_name (`str`):
The name of the class to import in the module.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if they
exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
revision(`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
local_files_only (`bool`, *optional*, defaults to `False`):
If `True`, will only try to load the tokenizer configuration from local files.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Returns:
`type`: The class, dynamically imported from the module.
Examples:
```python
# Download module *modeling.py* from huggingface.co and cache then extract the class *MyBertModel* from this
# module.
cls = get_class_from_dynamic_module("sgugger/my-bert-model", "modeling.py", "MyBertModel")
```"""
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
# Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file.
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
module_file_or_url = os.path.join(pretrained_model_name_or_path, module_file)
submodule = "local"
else:
module_file_or_url = hf_bucket_url(
pretrained_model_name_or_path, filename=module_file, revision=revision, mirror=None
)
submodule = pretrained_model_name_or_path.replace("/", os.path.sep)
try:
# Load from URL or cache if already cached
resolved_module_file = cached_path(
module_file_or_url,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
)
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
raise
# Check we have all the requirements in our environment
check_imports(resolved_module_file)
# Now we move the module inside our cached dynamic modules.
full_submodule = TRANSFORMERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(full_submodule)
submodule_path = Path(HF_MODULES_CACHE) / full_submodule
if submodule == "local":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
module_name = module_file
shutil.copy(resolved_module_file, submodule_path / module_file)
else:
# The module file will end up being named module_file + the etag. This way we get the benefit of versioning.
resolved_module_file_name = Path(resolved_module_file).name
module_name_parts = [module_file.replace(".py", "")] + resolved_module_file_name.split(".")
module_name = "_".join(module_name_parts) + ".py"
if not (submodule_path / module_name).exists():
shutil.copy(resolved_module_file, submodule_path / module_name)
# And lastly we get the class inside our newly created module
final_module = os.path.join(full_submodule, module_name.replace(".py", ""))
return get_class_in_module(class_name, final_module)
| 9,861 | 40.788136 | 119 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron_t5/configuration_megatron_t5.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" T5 model configuration """
from collections import OrderedDict
from typing import Any, Dict, Iterable, Mapping, Optional
from transformers import PreTrainedTokenizer, TensorType
from transformers import is_torch_available
from transformers.configuration_utils import PretrainedConfig
from transformers.onnx import OnnxConfigWithPast
from transformers.utils import logging
logger = logging.get_logger(__name__)
T5_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"T5-small": "https://huggingface.co/T5-small/resolve/main/config.json",
"T5-base": "https://huggingface.co/T5-base/resolve/main/config.json",
"T5-large": "https://huggingface.co/T5-large/resolve/main/config.json",
"T5-3b": "https://huggingface.co/T5-3b/resolve/main/config.json",
"T5-11b": "https://huggingface.co/T5-11b/resolve/main/config.json",
}
class T5Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.T5Model` or a
:class:`~transformers.TFT5Model`. It is used to instantiate a T5 model according to the specified arguments,
defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration
to that of the T5 `T5-small <https://huggingface.co/T5-small>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Arguments:
vocab_size (:obj:`int`, `optional`, defaults to 32128):
Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.T5Model` or :class:`~transformers.TFT5Model`.
d_model (:obj:`int`, `optional`, defaults to 512):
Size of the encoder layers and the pooler layer.
d_kv (:obj:`int`, `optional`, defaults to 64):
Size of the key, query, value projections per attention head. :obj:`d_kv` has to be equal to :obj:`d_model
// num_heads`.
d_ff (:obj:`int`, `optional`, defaults to 2048):
Size of the intermediate feed forward layer in each :obj:`T5Block`.
num_layers (:obj:`int`, `optional`, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_decoder_layers (:obj:`int`, `optional`):
Number of hidden layers in the Transformer decoder. Will use the same value as :obj:`num_layers` if not
set.
num_heads (:obj:`int`, `optional`, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
relative_attention_num_buckets (:obj:`int`, `optional`, defaults to 32):
The number of buckets to use for each attention layer.
dropout_rate (:obj:`float`, `optional`, defaults to 0.1):
The ratio for all dropout layers.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-6):
The epsilon used by the layer normalization layers.
initializer_factor (:obj:`float`, `optional`, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
feed_forward_proj (:obj:`string`, `optional`, defaults to :obj:`"relu"`):
Type of feed forward layer to be used. Should be one of :obj:`"relu"` or :obj:`"gated-gelu"`. T5v1.1 uses
the :obj:`"gated-gelu"` feed forward projection. Original T5 uses :obj:`"relu"`.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
"""
model_type = "T5"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=32128,
d_model=512,
d_kv=64,
d_ff=2048,
num_layers=6,
num_decoder_layers=None,
num_heads=8,
relative_attention_num_buckets=32,
dropout_rate=0.1,
layer_norm_epsilon=1e-5,
initializer_factor=1.0,
feed_forward_proj="gelu",
is_encoder_decoder=True,
use_cache=True,
pad_token_id=0,
eos_token_id=1,
gradient_checkpointing=False,
**kwargs
):
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
**kwargs,
)
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
@property
def hidden_size(self):
return self.d_model
@property
def num_attention_heads(self):
return self.num_heads
@property
def num_hidden_layers(self):
return self.num_layers
class T5OnnxConfig(OnnxConfigWithPast):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
common_inputs = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch"}),
("decoder_attention_mask", {0: "batch"}),
]
)
if self.use_past:
for i in range(0, self._config.num_layers):
common_inputs[f"past_key_values.{i}.decoder.key"] = {
0: "batch", 2: "past_sequence"}
common_inputs[f"past_key_values.{i}.decoder.value"] = {
0: "batch", 2: "past_sequence"}
common_inputs[f"past_key_values.{i}.encoder.key"] = {
0: "batch", 2: "past_sequence"}
common_inputs[f"past_key_values.{i}.encoder.value"] = {
0: "batch", 2: "past_sequence"}
return common_inputs
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
common_outputs = super().outputs
if "last_hidden_state" in common_outputs:
common_outputs["last_hidden_state"] = {
0: "batch", 1: "decoder_sequence"}
if self.use_past:
for i in range(self._config.num_layers):
common_outputs[f"present.{i}.decoder.key"] = {
0: "batch", 2: "decoder_sequence"}
common_outputs[f"present.{i}.decoder.value"] = {
0: "batch", 2: "decoder_sequence"}
common_outputs[f"present.{i}.encoder.key"] = {
0: "batch", 2: "encoder_sequence"}
common_outputs[f"present.{i}.encoder.value"] = {
0: "batch", 2: "encoder_sequence"}
if self.task == "default":
common_outputs["encoder_last_hidden_state"] = {
0: "batch", 2: "encoder_sequence"}
return common_outputs
def generate_dummy_inputs(
self,
tokenizer: PreTrainedTokenizer,
batch_size: int = -1,
seq_length: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
) -> Mapping[str, Any]:
# Generate encoder inputs
encoder_inputs = super().generate_dummy_inputs(
tokenizer, batch_size, seq_length, is_pair, framework)
# Generate decoder inputs
decoder_inputs = super().generate_dummy_inputs(
tokenizer, batch_size, 1, is_pair, framework)
decoder_inputs = {f"decoder_{name}": tensor for name,
tensor in decoder_inputs.items()}
ordered_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if not is_torch_available():
raise ValueError(
"Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
batch = encoder_inputs["input_ids"].shape[0]
encoder_seq_length = encoder_inputs["input_ids"].shape[1]
encoder_shape = (
batch,
self._config.num_heads,
encoder_seq_length,
self._config.hidden_size // self._config.num_heads,
)
decoder_shape = (batch, self._config.num_heads, 1,
self._config.hidden_size // self._config.num_heads)
ordered_inputs["past_key_values"] = []
for _ in range(self._config.num_layers):
ordered_inputs["past_key_values"].append(
(
torch.zeros(decoder_shape),
torch.zeros(decoder_shape),
torch.zeros(encoder_shape),
torch.zeros(encoder_shape),
)
)
return ordered_inputs
@staticmethod
def flatten_output_collection_property(name: str, field: Iterable[Any]) -> Dict[str, Any]:
if name in ["present", "past_key_values"]:
flatten_output = {}
for idx, t in enumerate(field):
flatten_output[f"{name}.{idx}.decoder.key"] = t[0]
flatten_output[f"{name}.{idx}.decoder.value"] = t[1]
flatten_output[f"{name}.{idx}.encoder.key"] = t[2]
flatten_output[f"{name}.{idx}.encoder.value"] = t[3]
return flatten_output
return super().flatten_output_collection_property(name, field)
| 10,955 | 41.796875 | 119 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron_t5/tokenization_megatron_t5.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" T5Tokenizer """
from transformers import BertTokenizer
class T5Tokenizer():
def __init__(self, extra_id_num=118):
self.extra_id_num = extra_id_num
@classmethod
def from_pretrained(self, vocab_path):
self.extra_id_num = 118
self.T5_special_tokens = ['[BOS]', '[EOS]']
for i in range(self.extra_id_num):
self.T5_special_tokens.append(f'<extra_id_{str(i)}>')
tokenizer = BertTokenizer.from_pretrained(vocab_path, additional_special_tokens=self.T5_special_tokens)
return tokenizer
| 1,172 | 34.545455 | 111 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron_t5/__init__.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from transformers.file_utils import _LazyModule, is_torch_available
_import_structure = {
"configuration_megatron_t5": ["T5Config"],
"tokenization_megatron_t5": ["T5Tokenizer"],
}
if is_torch_available():
_import_structure["modeling_megatron_t5"] = [
"T5Model",
"T5EncoderModel",
"T5ForConditionalGeneration"
]
if TYPE_CHECKING:
from .configuration_megatron_t5 import T5Config
from .tokenization_megatron_t5 import T5Tokenizer
if is_torch_available():
from .modeling_megatron_t5 import (
T5Model,
T5EncoderModel,
T5ForConditionalGeneration
)
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__, globals()["__file__"], _import_structure)
| 1,425 | 27.52 | 74 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron_t5/modeling_megatron_t5.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch T5 model. """
import copy
import math
import os
import warnings
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.checkpoint import checkpoint
from transformers.activations import ACT2FN
from transformers.file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from transformers.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from .configuration_megatron_t5 import T5Config
import numpy as np
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
_CHECKPOINT_FOR_DOC = "T5-small"
####################################################
# This dict contains ids and associated url
# for the pretrained weights provided with the models
####################################################
T5_PRETRAINED_MODEL_ARCHIVE_LIST = [
"T5-small",
"T5-base",
"T5-large",
"T5-3b",
"T5-11b",
# See all T5 models at https://huggingface.co/models?filter=T5
]
####################################################
# This is a conversion method from TF 1.0 to PyTorch
# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28
####################################################
def load_tf_weights_in_T5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
elif scope_names[0] == "self_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[0]
elif scope_names[0] == "enc_dec_attention":
pointer = getattr(pointer, "layer")
pointer = pointer[1]
elif scope_names[0] == "dense_relu_dense":
pointer = getattr(pointer, "layer")
pointer = pointer[2]
elif scope_names[0] == "rms_norm":
if hasattr(pointer, "layer_norm"):
pointer = getattr(pointer, "layer_norm")
elif hasattr(pointer, "final_layer_norm"):
pointer = getattr(pointer, "final_layer_norm")
elif scope_names[0] == "scale":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
elif scope_names[0] == "decoder" and name[1] == "logits":
continue
elif scope_names[0] == "logits":
pointer = getattr(pointer, "lm_head")
elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
pointer = getattr(pointer, f"wi_{scope_names[1]}")
continue
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info(
f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info(
f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model
####################################################
# PyTorch Models are constructed by sub-classing
# - torch.nn.Module for the layers and
# - PreTrainedModel for the models (it-self a sub-class of nn.Module)
####################################################
PARALLELIZE_DOCSTRING = r"""
This is an experimental feature and is a subject to change at a moment's notice.
Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
it will evenly distribute blocks across all devices.
Args:
device_map (:obj:`Dict[int, list]`, optional, defaults to None):
A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
automatically mapped to the first device (for esoteric reasons). That means that the first device should
have fewer attention modules mapped to it than other devices. For reference, the T5 models have the
following number of attention modules:
- T5-small: 6
- T5-base: 12
- T5-large: 24
- T5-3b: 24
- T5-11b: 24
Example::
# Here is an example of a device map on a machine with 4 GPUs using T5-3b,
# which has a total of 24 attention modules:
model = T5ForConditionalGeneration.from_pretrained('T5-3b')
device_map = {0: [0, 1, 2],
1: [3, 4, 5, 6, 7, 8, 9],
2: [10, 11, 12, 13, 14, 15, 16],
3: [17, 18, 19, 20, 21, 22, 23]}
model.parallelize(device_map)
"""
DEPARALLELIZE_DOCSTRING = r"""
Moves the model to cpu from a model parallel state.
Example::
# On a 4 GPU machine with T5-3b:
model = T5ForConditionalGeneration.from_pretrained('T5-3b')
device_map = {0: [0, 1, 2],
1: [3, 4, 5, 6, 7, 8, 9],
2: [10, 11, 12, 13, 14, 15, 16],
3: [17, 18, 19, 20, 21, 22, 23]}
model.parallelize(device_map) # Splits the model across several devices
model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
"""
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# layer norm should always be calculated in float32
variance = hidden_states.to(torch.float32).pow(
2).mean(-1, keepdim=True)
hidden_states = hidden_states * \
torch.rsqrt(variance + self.variance_epsilon)
# convert into float16 if necessary
if self.weight.dtype == torch.float16:
hidden_states = hidden_states.to(torch.float16)
return self.weight * hidden_states
class T5DenseReluDense(nn.Module):
def __init__(self, config):
super().__init__()
# @IDEA modified -> bias=False -> bias=True
self.wi = nn.Linear(config.d_model, config.d_ff, bias=True)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=True)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = nn.functional.relu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
class T5DenseGeluDense(nn.Module):
def __init__(self, config):
super().__init__()
# @IDEA modified -> bias=False -> bias=True
self.wi = nn.Linear(config.d_model, config.d_ff, bias=True)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=True)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = nn.functional.gelu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
class T5DenseGatedGeluDense(nn.Module):
def __init__(self, config):
super().__init__()
# @IDEA modified -> bias=False -> bias=True
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=True)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=True)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=True)
self.dropout = nn.Dropout(config.dropout_rate)
self.gelu_act = ACT2FN["gelu_new"]
def forward(self, hidden_states):
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
class T5LayerFF(nn.Module):
def __init__(self, config):
super().__init__()
# @IDEA modified -> T5LayerNorm -> nn.LayerNorm
# self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.layer_norm = nn.LayerNorm(
config.d_model, eps=config.layer_norm_epsilon)
if config.feed_forward_proj == "relu":
self.DenseReluDense = T5DenseReluDense(config)
elif config.feed_forward_proj == "gelu":
self.DenseReluDense = T5DenseGeluDense(config)
else:
raise ValueError(
f"{self.config.feed_forward_proj} is not supported. Choose between `relu` and `gated-gelu`"
)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
class T5Attention(nn.Module):
def __init__(self, config: T5Config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
# @IDEA modified -> bias=False -> bias=True
self.q = nn.Linear(self.d_model, self.inner_dim, bias=True)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=True)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=True)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=True)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(
self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position >
0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = - \
torch.min(relative_position,
torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_postion_if_large = torch.min(
relative_postion_if_large, torch.full_like(
relative_postion_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small,
relative_position, relative_postion_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length):
"""Compute binned relative position bias"""
context_position = torch.arange(
query_length, dtype=torch.long, device=self.relative_attention_bias.weight.device
)[:, None]
memory_position = torch.arange(
key_length, dtype=torch.long, device=self.relative_attention_bias.weight.device
)[None, :]
relative_position = memory_position - \
context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
)
# shape (query_length, key_length, num_heads)
values = self.relative_attention_bias(relative_position_bucket)
# shape (1, num_heads, query_length, key_length)
values = values.permute([2, 0, 1]).unsqueeze(0)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if key_value_states is None else key_value_states.shape[
1]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
def unshape(states):
"""reshape"""
return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat(
[past_key_value, hidden_states], dim=2)
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
# (batch_size, n_heads, seq_length, dim_per_head)
query_states = shape(self.q(hidden_states))
# get key/value states
key_states = project(
hidden_states, self.k, key_value_states, past_key_value[
0] if past_key_value is not None else None
)
value_states = project(
hidden_states, self.v, key_value_states, past_key_value[
1] if past_key_value is not None else None
)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(real_seq_length, key_length)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1):, :]
if mask is not None:
# (batch_size, n_heads, seq_length, key_length)
position_bias = position_bias + mask
# @IDEA modified -> delete scores += position_bias, use absolute positional
# scores += position_bias
scores = scores / math.sqrt(self.key_value_proj_dim)
if mask is not None:
scores = scores + mask
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
scores
) # (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.dropout(
attn_weights, p=0, training=self.training
) # (batch_size, n_heads, seq_length, key_length)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
# (batch_size, seq_length, dim)
attn_output = unshape(torch.matmul(attn_weights, value_states))
attn_output = self.o(attn_output)
present_key_value_state = (key_states, value_states) if (
self.is_decoder and use_cache) else None
outputs = (attn_output,) + \
(present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
# @IDEA modified -> T5LayerNorm -> nn.LayerNorm
# self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.layer_norm = nn.LayerNorm(
config.d_model, eps=config.layer_norm_epsilon)
self.SelfAttention = T5Attention(
config, has_relative_attention_bias=has_relative_attention_bias)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
# add attentions if we output them
outputs = (hidden_states,) + attention_output[1:]
return outputs
class T5LayerCrossAttention(nn.Module):
def __init__(self, config):
super().__init__()
# @IDEA modified -> T5LayerNorm -> nn.LayerNorm
# self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.layer_norm = nn.LayerNorm(
config.d_model, eps=config.layer_norm_epsilon)
self.EncDecAttention = T5Attention(
config, has_relative_attention_bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
normed_hidden_states,
mask=attention_mask,
key_value_states=key_value_states,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
layer_output = hidden_states + self.dropout(attention_output[0])
# add attentions if we output them
outputs = (layer_output,) + attention_output[1:]
return outputs
class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
# @IDEA modified ->
# self.layer = nn.ModuleList()
# self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))
# if self.is_decoder:
# self.layer.append(T5LayerCrossAttention(config))
# self.layer.append(T5LayerFF(config))
self.T5LayerSelfAttention = T5LayerSelfAttention(
config, has_relative_attention_bias=has_relative_attention_bias)
if self.is_decoder:
self.T5LayerCrossAttention = T5LayerCrossAttention(
config)
self.T5LayerFF = T5LayerFF(config)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
return_dict=True,
):
if past_key_value is not None:
assert self.is_decoder, "Only decoder can use `past_key_values`"
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
if len(past_key_value) != expected_num_past_key_values:
raise ValueError(
f"There should be {expected_num_past_key_values} past states. "
f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
f"Got {len(past_key_value)} past key / value states"
)
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
# @IDEA modified -> self.layer[0] -> self.T5LayerSelfAttention
self_attention_outputs = self.T5LayerSelfAttention(
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
# Keep self-attention outputs and relative position weights
attention_outputs = self_attention_outputs[2:]
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(
hidden_states, min=-clamp_value, max=clamp_value)
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
# @IDEA modified -> self.layer[1] -> self.T5LayerCrossAttention
cross_attention_outputs = self.T5LayerCrossAttention(
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(
hidden_states, min=-clamp_value, max=clamp_value)
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = present_key_value_state + \
cross_attention_outputs[1]
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
# @IDEA modified -> self.layer[-1] -> self.T5LayerFF
hidden_states = self.T5LayerFF(hidden_states)
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(
hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if use_cache:
outputs = outputs + (present_key_value_state,) + attention_outputs
else:
outputs = outputs + attention_outputs
# hidden-states, present_key_value_states, (self-attention position bias),
# (self-attention weights), (cross-attention position bias), (cross-attention weights)
return outputs
class T5PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = T5Config
load_tf_weights = load_tf_weights_in_T5
base_model_prefix = "transformer"
is_parallelizable = True
supports_gradient_checkpointing = True
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, T5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (T5Model, T5ForConditionalGeneration, T5EncoderModel)):
# Mesh TensorFlow embeddings initialization
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d
# /mesh_tensorflow/layers.py#L1624
# @IDEA modified -> module.shared.weight -> module.shared.word_embeddings.weight
# module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
module.shared.word_embeddings.weight.data.normal_(
mean=0.0, std=factor * 1.0)
module.shared.position_embeddings.weight.data.normal_(
mean=0.0, std=factor * 1.0)
elif isinstance(module, T5DenseReluDense):
# Mesh TensorFlow FF initialization
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow
# /transformer/transformer_layers.py#L56
# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/
# mesh_tensorflow/layers.py#L89
module.wi.weight.data.normal_(
mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(
mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5DenseGeluDense):
module.wi_0.weight.data.normal_(
mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
module.wi_0.bias.data.zero_()
module.wi_1.weight.data.normal_(
mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(
mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5Attention):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d
# /mesh_tensorflow/transformer/attention.py#L136
d_model = self.config.d_model
key_value_proj_dim = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(
mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
module.k.weight.data.normal_(
mean=0.0, std=factor * (d_model ** -0.5))
module.v.weight.data.normal_(
mean=0.0, std=factor * (d_model ** -0.5))
module.o.weight.data.normal_(
mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(
mean=0.0, std=factor * ((d_model) ** -0.5))
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (T5Attention, T5Stack)):
module.gradient_checkpointing = value
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert (
decoder_start_token_id is not None
), "self.model.config.decoder_start_token_id has to be defined. "\
"In T5 it is usually set to the pad_token_id. See T5 docs for more information"
# shift inputs to the right
if is_torch_fx_proxy(input_ids):
# Item assignment is not supported natively for proxies.
shifted_input_ids = torch.full(
input_ids.shape[:-1] + (1,), decoder_start_token_id)
shifted_input_ids = torch.cat(
[shifted_input_ids, input_ids[..., :-1]], dim=-1)
else:
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(
), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class T5Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
# In Megatron, layer-norm is applied after the 1st dropout.
# self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.dropout_rate)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(
config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute")
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:,
past_key_values_length: seq_length + past_key_values_length]
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
embeddings = inputs_embeds
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
# Megatron BERT moves that layer norm after the drop-out (and to each layer).
# embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
# @IDEA modified -> has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)
# -> has_relative_attention_bias=False
self.block = nn.ModuleList(
[T5Block(config, has_relative_attention_bias=False)
for _ in range(config.num_layers)]
)
# @IDEA modified -> T5LayerNorm -> nn.LayerNorm
# self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.final_layer_norm = nn.LayerNorm(
config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
# Check validity of device_map
self.device_map = (
get_device_map(len(self.block), range(
torch.cuda.device_count())) if device_map is None else device_map
)
assert_device_map(self.device_map, len(self.block))
self.model_parallel = True
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + \
str(min(self.device_map.keys()))
self.last_device = "cuda:" + str(max(self.device_map.keys()))
# Load onto devices
for k, v in self.device_map.items():
for layer in v:
cuda_device = "cuda:" + str(k)
self.block[layer] = self.block[layer].to(cuda_device)
# Set embed_tokens to first layer
self.embed_tokens = self.embed_tokens.to(self.first_device)
self.embeddings = self.embeddings.to(self.first_device)
# Set final layer norm to last device
self.final_layer_norm = self.final_layer_norm.to(self.last_device)
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def deparallelize(self):
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
for i in range(len(self.block)):
self.block[i] = self.block[i].to("cpu")
self.embed_tokens = self.embed_tokens.to("cpu")
self.final_layer_norm = self.final_layer_norm.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(self.first_device)
self.embed_tokens = self.embed_tokens.to(self.first_device)
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
# @IDEA modified -> self.embed_tokens(input_ids=input_ids) ->
# self.embed_tokens(input_ids=input_ids,osition_ids=position_ids,)
# inputs_embeds = self.embed_tokens(input_ids=input_ids)
inputs_embeds = self.embed_tokens(input_ids=input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + \
seq_length if past_key_values is not None else seq_length
if use_cache is True:
assert self.is_decoder, f":obj:`use_cache` can only be set to `True` if {self} is used as a decoder"
if attention_mask is None:
attention_mask = torch.ones(
batch_size, mask_seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, inputs_embeds.device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (
encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(
encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(
cross_attn_head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions and self.is_decoder) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if position_bias is not None:
position_bias = position_bias.to(hidden_states.device)
if encoder_hidden_states is not None:
encoder_hidden_states = encoder_hidden_states.to(
hidden_states.device)
if encoder_extended_attention_mask is not None:
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
hidden_states.device)
if encoder_decoder_position_bias is not None:
encoder_decoder_position_bias = encoder_decoder_position_bias.to(
hidden_states.device)
if layer_head_mask is not None:
layer_head_mask = layer_head_mask.to(hidden_states.device)
if cross_attn_layer_head_mask is not None:
cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(
hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return tuple(module(*inputs, use_cache, output_attentions))
return custom_forward
layer_outputs = checkpoint(
create_custom_forward(layer_module),
hidden_states,
extended_attention_mask,
position_bias,
encoder_hidden_states,
encoder_extended_attention_mask,
encoder_decoder_position_bias,
layer_head_mask,
cross_attn_layer_head_mask,
None, # past_key_value is always None with gradient checkpointing
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
layer_head_mask=layer_head_mask,
cross_attn_layer_head_mask=cross_attn_layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
if use_cache is False:
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
hidden_states, present_key_value_state = layer_outputs[:2]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
position_bias = layer_outputs[2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + \
(present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[3],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + \
(layer_outputs[5],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
present_key_value_states,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
T5_START_DOCSTRING = r"""
The T5 model was proposed in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer
<https://arxiv.org/abs/1910.10683>`__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,
Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text
denoising generative setting.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
T5_INPUTS_DOCSTRING = """
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using :class:`~transformers.T5Tokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
detail.
`What are input IDs? <../glossary.html#input-ids>`__
To know more on how to prepare :obj:`input_ids` for pretraining take a look a `T5 Training
<./T5.html#training>`__.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.T5Tokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are decoder input IDs? <../glossary.html#decoder-input-ids>`__
T5 uses the :obj:`pad_token_id` as the starting token for :obj:`decoder_input_ids` generation. If
:obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
To know more on how to prepare :obj:`decoder_input_ids` for pretraining take a look at `T5 Training
<./T5.html#training>`__.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape
:obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in ``[0,
1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or
:obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in ``[0,
1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_heads,)` or
:obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, :obj:`optional`: `hidden_states`, :obj:`optional`:
`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a
sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of
the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having
4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)
`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
T5_ENCODER_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using :class:`~transformers.T5Tokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
detail.
To know more on how to prepare :obj:`input_ids` for pretraining take a look a `T5 Training
<./T5.html#training>`__.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
# Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
__HEAD_MASK_WARNING_MSG = """
The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
num_heads)`.
"""
@add_start_docstrings(
"The bare T5 Model transformer outputting raw hidden-states without any specific head on top.",
T5_START_DOCSTRING,
)
class T5LMHead(nn.Module):
"""Masked LM head for T5
Arguments:
mpu_vocab_size: model parallel size of vocabulary.
hidden_size: hidden size
init_method: init method for weight initialization
layernorm_epsilon: tolerance for layer norm divisions
parallel_output: wether output logits being distributed or not.
"""
def __init__(self, config):
super(T5LMHead, self).__init__()
self.bias = torch.nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states, word_embeddings_weight):
output = torch.nn.functional.linear(hidden_states,
word_embeddings_weight,
bias=self.bias)
return output
class T5Model(T5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config: T5Config):
super().__init__(config)
# @IDEA modified -> nn.Embedding -> T5Embeddings
# self.shared = nn.Embedding(config.vocab_size, config.d_model)
self.shared = T5Embeddings(config)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block),
range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.decoder.parallelize(self.device_map)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.encoder.deparallelize()
self.decoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.decoder = self.decoder.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example::
>>> from transformers import T5Tokenizer, T5Model
>>> tokenizer = T5Tokenizer.from_pretrained('T5-small')
>>> model = T5Model.from_pretrained('T5-small')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you",
return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> # forward pass
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(
encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(
encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(
self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(
self.decoder.first_device)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""T5 Model with a `language modeling` head on top. """, T5_START_DOCSTRING)
class T5ForConditionalGeneration(T5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
r"lm_head\.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
# @IDEA modified -> nn.Embedding -> T5Embeddings
# self.shared = nn.Embedding(config.vocab_size, config.d_model)
self.shared = T5Embeddings(config)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
# @IDEA modified -> add self.lm_head_bias
self.lm_head_bias = torch.nn.Parameter(torch.zeros(config.vocab_size))
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block),
range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.decoder.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.decoder.first_device)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.encoder.deparallelize()
self.decoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.decoder = self.decoder.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_output_embeddings(self):
return self.lm_head_bias
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def generate(self, input_ids=None, max_length=512):
input_ids = torch.tensor(input_ids)
if len(input_ids.shape) < 2:
input_ids = input_ids.unsqueeze(0)
decode_input_id = [21128] # [BOS]的token_id为21128
for i in range(max_length):
tensor_decode_input_id = torch.tensor([decode_input_id])
forword_output = self.forward(input_ids=input_ids,
decoder_input_ids=tensor_decode_input_id)
logits = forword_output.logits
logits = torch.nn.functional.softmax(
logits, dim=-1).cpu().detach().numpy()[0]
last_output_id = int(np.random.choice(
logits.shape[1], p=logits[-1]))
if last_output_id == 21129: # [EOS]的token_id为21129
break
else:
decode_input_id.append(last_output_id)
return decode_input_id
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,
config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for
labels in ``[0, ..., config.vocab_size]``
Returns:
Examples::
>>> from transformers import T5Tokenizer, T5ForConditionalGeneration
>>> tokenizer = T5Tokenizer.from_pretrained('T5-small')
>>> model = T5ForConditionalGeneration.from_pretrained('T5-small')
>>> # training
>>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids
>>> labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2>', return_tensors='pt').input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> # inference
>>> input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you",
return_tensors="pt").input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> # studies have shown that owning a dog is good for you.
"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(
encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(
encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(
self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(
self.decoder.first_device)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs.last_hidden_state
# Set device for model parallelism
# if self.model_parallel:
# torch.cuda.set_device(self.encoder.first_device)
# self.lm_head = self.lm_head.to(self.encoder.first_device)
# sequence_output = sequence_output.to(self.lm_head.weight.device)
# if self.config.tie_word_embeddings:
# # Rescale output before projecting on vocab
# # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/
# mesh_tensorflow/transformer/transformer.py#L586
# sequence_output = sequence_output * (self.model_dim ** -0.5)
lm_logits = torch.nn.functional.linear(
sequence_output, self.shared.word_embeddings.weight, bias=self.lm_head_bias)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(
lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
# @IDEA modified(thom): Add z_loss https://github.com/tensorflow/mesh/blob/
# fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache,
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def _reorder_cache(self, past, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past is None:
logger.warning(
"You might want to consider setting `use_cache=True` to speed up decoding")
return past
reordered_decoder_past = ()
for layer_past_states in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(
0, beam_idx.to(layer_past_state.device)),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + \
(reordered_layer_past_states,)
return reordered_decoder_past
@add_start_docstrings(
"The bare T5 Model transformer outputting encoder's raw hidden-states without any specific head on top.",
T5_START_DOCSTRING,
)
class T5EncoderModel(T5PreTrainedModel):
authorized_missing_keys = [
r"encoder\.embed_tokens\.weight",
]
def __init__(self, config: T5Config):
super().__init__(config)
# @IDEA modified -> nn.Embedding -> T5Embeddings
# self.shared = nn.Embedding(config.vocab_size, config.d_model)
self.shared = T5Embeddings(config)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block),
range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.encoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_ENCODER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example::
>>> from transformers import T5Tokenizer, T5EncoderModel
>>> tokenizer = T5Tokenizer.from_pretrained('T5-small')
>>> model = T5EncoderModel.from_pretrained('T5-small')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you",
return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids)
>>> last_hidden_states = outputs.last_hidden_state
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return encoder_outputs
| 90,226 | 42.23287 | 143 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/roformer/configuration_roformer.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" RoFormer model configuration """
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
RoFormer_PRETRAINED_CONFIG_ARCHIVE_MAP = {
# See all RoFormer models at https://huggingface.co/models?filter=bert
}
class RoFormerConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.RoFormerModel`. It is
used to instantiate a RoFormer model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the RoFormer
`megatron-bert-uncased-345m <https://huggingface.co/nvidia/megatron-bert-uncased-345m>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 29056):
Vocabulary size of the RoFormer model. Defines the number of different tokens that can be represented
by the :obj:`inputs_ids` passed when calling :class:`~transformers.RoFormerModel`.
hidden_size (:obj:`int`, `optional`, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, `optional`, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, `optional`, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, `optional`, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, `optional`, defaults to 2):
The vocabulary size of the :obj:`token_type_ids` passed when calling
:class:`~transformers.RoFormerModel`.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
The epsilon used by the layer normalization layers.
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
position_embedding_type (:obj:`str`, `optional`, defaults to :obj:`"absolute"`):
Type of position embedding. Choose one of :obj:`"absolute"`, :obj:`"relative_key"`,
:obj:`"relative_key_query"`. For positional embeddings use :obj:`"absolute"`. For more information on
:obj:`"relative_key"`, please refer to `Self-Attention with Relative Position Representations (Shaw et al.)
<https://arxiv.org/abs/1803.02155>`__. For more information on :obj:`"relative_key_query"`, please refer to
`Method 4` in `Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)
<https://arxiv.org/abs/2009.13658>`__.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if ``config.is_decoder=True``.
Examples::
>>> from transformers import RoFormerModel, RoFormerConfig
>>> # Initializing a RoFormer bert-base-uncased style configuration
>>> configuration = RoFormerConfig()
>>> # Initializing a model from the bert-base-uncased style configuration
>>> model = RoFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "roformer"
def __init__(
self,
vocab_size=29056,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
gradient_checkpointing=False,
position_embedding_type="absolute",
use_cache=True,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.gradient_checkpointing = gradient_checkpointing
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
| 6,869 | 50.268657 | 119 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/roformer/tokenization_roformer.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import BertTokenizer as RoFormerTokenizer
| 675 | 38.764706 | 74 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/roformer/__init__.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from transformers.file_utils import _LazyModule, is_torch_available
_import_structure = {
"configuration_roformer": ["RoFormerConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
if is_torch_available():
_import_structure["modeling_roformer"] = [
"RoFormerModel",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerPreTrainedModel",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
]
if TYPE_CHECKING:
from .configuration_roformer import RoFormerConfig
from .tokenization_roformer import RoFormerTokenizer
if is_torch_available():
from .modeling_roformer import (
RoFormerModel,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerPreTrainedModel,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
)
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__, globals()["__file__"], _import_structure)
| 1,785 | 29.793103 | 74 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/roformer/modeling_roformer.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch RoFormer model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from .configuration_roformer import RoFormerConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "RoFormerConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
_CHECKPOINT_FOR_DOC = "nvidia/megatron-bert-cased-345m"
RoFormer_PRETRAINED_MODEL_ARCHIVE_LIST = [
"nvidia/megatron-bert-cased-345m",
# See all RoFormer models at https://huggingface.co/models?filter=RoFormer
]
def load_tf_weights_in_RoFormer(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class RoFormerEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
# @IDEA modified -> roformer removed the position_embedding, and add the totary position embedding in the self_attention_layer
# self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
# In Megatron, layer-norm is applied after the 1st dropout.
# self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(
config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute")
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:,
past_key_values_length: seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
# @IDEA modified -> roformer removed the position_embedding
# if self.position_embedding_type == "absolute":
# position_embeddings = self.position_embeddings(position_ids)
# embeddings += position_embeddings
# Megatron BERT moves that layer norm after the drop-out (and to each layer).
# embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class RoPEmbedding(nn.Module):
def __init__(self, d_model):
super(RoPEmbedding, self).__init__()
self.d_model = d_model
div_term = torch.exp(torch.arange(
0, d_model, 2).float() * (-math.log(10000.0) / d_model))
self.register_buffer('div_term', div_term)
def forward(self, x, seq_dim=0):
# x 是 [s, b, np, hn],例如query和key
x = x.permute(2, 1, 0, 3)
t = torch.arange(x.size(seq_dim), device=x.device).type_as(
self.div_term)
sinusoid_inp = torch.outer(t, self.div_term)
sin, cos = sinusoid_inp.sin(), sinusoid_inp.cos() # [s, hn]
o_shape = (sin.size(0), 1, 1, sin.size(1))
sin, cos = sin.view(*o_shape), cos.view(*o_shape) # [s, 1, 1, hn]
sin = torch.repeat_interleave(sin, 2, dim=-1)
cos = torch.repeat_interleave(cos, 2, dim=-1)
x2 = torch.stack([-x[..., 1::2], x[..., ::2]], dim=-1).reshape_as(x)
x = cos * x + sin * x2
return x.permute(2, 1, 0, 3)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->RoFormer
class RoFormerSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(
2 * config.max_position_embeddings - 1, self.attention_head_size)
# @IDEA modified -> add rope positional embedding
self.rope_emb = RoPEmbedding(self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[
:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(
self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(
self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
# @IDEA modified -> add rope positional embedding
# print('query_layer.shape')
# print(query_layer.shape)
# query_layer.hsape -> [batch_size,num_head,seq_len,per_head_hidden_size]
query_layer = self.rope_emb(query_layer)
key_layer = self.rope_emb(key_layer)
attention_scores = torch.matmul(
query_layer, key_layer.transpose(-1, -2))
""" @IDEA modified -> removed the megatron positional
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
"""
attention_scores = attention_scores / \
math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RoFormerModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (
context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Based transformers.models.bert.modeling_bert.BertSelfOutput. Moved LayerNorm to RoFormerAttention below.
class RoFormerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, residual):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return residual + hidden_states
# Based transformers.models.bert.modeling_bert.BertAttention. Added LayerNorm.
class RoFormerAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.self = RoFormerSelfAttention(config)
self.output = RoFormerSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - \
len(heads)
self.self.all_head_size = self.self.attention_head_size * \
self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
ln_outputs = self.ln(hidden_states)
self_outputs = self.self(
ln_outputs,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
# add attentions if we output them
outputs = (attention_output,) + self_outputs[1:]
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->RoFormer
class RoFormerIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Based on transformers.models.bert.modeling_bert.BertOutput. Moved LayerNorm to RoFormerLayer below.
class RoFormerOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return input_tensor + hidden_states
# Based on transformers.models.bert.modeling_bert.BertLayer. Added LayerNorm.
class RoFormerLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RoFormerAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = RoFormerAttention(config)
self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.intermediate = RoFormerIntermediate(config)
self.output = RoFormerOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:
2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
# add self attentions if we output attention weights
outputs = self_attention_outputs[1:]
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:
] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
# add cross attentions if we output attention weights
outputs = outputs + cross_attention_outputs[1:-1]
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
ln_output = self.ln(attention_output)
intermediate_output = self.intermediate(ln_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def roformer_extended_attention_mask(attention_mask, tokentype_ids):
# copy from bert_model.py and
# https://github.com/bojone/bert4keras/blob/8836dc01fa99aa54947a15db5aa60a0ab6c0c036/bert4keras/models.py#L382
# We create a 3D attention mask from a 2D tensor mask.
# [b, 1, s]
attention_mask_b1s = attention_mask.unsqueeze(1)
# [b, s, 1]
attention_mask_bs1 = attention_mask.unsqueeze(2)
# [b, s, s]
padding_mask_bss = attention_mask_b1s * attention_mask_bs1
# Convert attention mask to binary:
padding_mask_bss = (padding_mask_bss < 0.5)
# 根据tokentype_ids来获取相应的双向或者单向mask,注意
# 这里改变了原本实现中的小于等于号,因为megatron中的mask
# 中非mask部分为0,mask部分为1
idx = torch.cumsum(tokentype_ids, dim=1)
causal_mask = idx[:, None, :] > idx[:, :, None]
# 合并两个mask
mask = torch.logical_or(causal_mask, padding_mask_bss)
mask = mask.unsqueeze(1) # [b, 1, s, s]
return mask
class RoFormerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([RoFormerLayer(config)
for _ in range(config.num_hidden_layers)])
# The final layer norm. We removed the 1st LN, moved LN to each hidden layer and this one
# is simply the final LN (Transformer's BERT has it attached to each hidden layer).
self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
# Because we moved the layer-norm at the end of the hidden layer, we have non-normali-
# zed data here. If that's really needed, we must apply LN to match Transformer's BERT.
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + \
(layer_outputs[2],)
# Finalize the hidden states.
hidden_states = self.ln(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->RoFormer
class RoFormerPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->RoFormer
class RoFormerPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->RoFormer
class RoFormerLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = RoFormerPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(
config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->RoFormer
class RoFormerOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = RoFormerLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->RoFormer
class RoFormerOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->RoFormer
class RoFormerPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = RoFormerLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class RoFormerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RoFormerConfig
load_tf_weights = load_tf_weights_in_RoFormer
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@dataclass
# Copied from transformers.models.bert.modeling_bert.BertForPreTrainingOutput with Bert->RoFormer
class RoFormerForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.RoFormerForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
RoFormer_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.RoFormerConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
RoFormer_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top.",
RoFormer_START_DOCSTRING,
)
class RoFormerModel(RoFormerPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RoFormerEmbeddings(config)
self.encoder = RoFormerEncoder(config)
self.pooler = RoFormerPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(RoFormer_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=device)
# @IDEA modified -> get_extended_attention_mask -> roformer_extended_attention_mask
extended_attention_mask = roformer_extended_attention_mask(
attention_mask, token_type_ids)
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
"""
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (
encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(
encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(
head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(
sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
RoFormer Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
`next sentence prediction (classification)` head.
""",
RoFormer_START_DOCSTRING,
)
class RoFormerForPreTraining(RoFormerPreTrainedModel):
def __init__(self, config, add_binary_head=True):
super().__init__(config)
self.bert = RoFormerModel(config)
self.cls = RoFormerPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(RoFormer_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=RoFormerForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BertTokenizer, RoFormerForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m')
>>> model = RoFormerForPreTraining.from_pretrained('nvidia/megatron-bert-cased-345m')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(
seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return RoFormerForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""RoFormer Model with a `language modeling` head on top for CLM fine-tuning. """,
RoFormer_START_DOCSTRING,
)
class RoFormerForCausalLM(RoFormerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [
r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning(
"If you want to use `RoFormerForCausalLM` as a standalone, add `is_decoder=True.`")
self.bert = RoFormerModel(config, add_pooling_layer=False)
self.cls = RoFormerOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(RoFormer_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BertTokenizer, RoFormerForCausalLM, RoFormerConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m')
>>> model = RoFormerLMHeadModel.from_pretrained('nvidia/megatron-bert-cased-345m', is_decoder=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:,
:-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(
shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx)
for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""RoFormer Model with a `language modeling` head on top. """, RoFormer_START_DOCSTRING)
class RoFormerForMaskedLM(RoFormerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler", r"seq_relationship"]
_keys_to_ignore_on_load_missing = [
r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RoFormerForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = RoFormerModel(config, add_pooling_layer=False)
self.cls = RoFormerOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(RoFormer_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat(
[attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""RoFormer Model with a `next sentence prediction (classification)` head on top. """,
RoFormer_START_DOCSTRING,
)
class RoFormerForNextSentencePrediction(RoFormerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"predictions"]
def __init__(self, config):
super().__init__(config)
self.bert = RoFormerModel(config)
self.cls = RoFormerOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(RoFormer_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring). Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example::
>>> from transformers import BertTokenizer, RoFormerForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('nvidia/megatron-bert-cased-345m')
>>> model = RoFormerForNextSentencePrediction.from_pretrained('nvidia/megatron-bert-cased-345m')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(
seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
RoFormer_START_DOCSTRING,
)
class RoFormerForSequenceClassification(RoFormerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = RoFormerModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(RoFormer_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(
logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output
and a softmax) e.g. for RocStories/SWAG tasks.
""",
RoFormer_START_DOCSTRING,
)
class RoFormerForMultipleChoice(RoFormerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = RoFormerModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(
RoFormer_INPUTS_DOCSTRING.format(
"batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)
) if input_ids is not None else None
attention_mask = attention_mask.view(
-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(
-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)
) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2),
inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
RoFormer_START_DOCSTRING,
)
class RoFormerForTokenClassification(RoFormerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = RoFormerModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(RoFormer_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(
loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(
logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
RoFormer_START_DOCSTRING,
)
class RoFormerForQuestionAnswering(RoFormerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = RoFormerModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(RoFormer_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 82,128 | 41.009719 | 213 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/layers/flash_attention.py
|
# Based on: https://github.com/HazyResearch/flash-attention/blob/4a6eaa9f27df6fff7ffb2c24e894938a687dd870/flash_attn/flash_attn_interface.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import flash_attn_cuda
def _flash_attn_forward(
q,
k,
v,
out,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
causal,
return_softmax,
num_splits=0,
generator=None,
):
"""
num_splits: how much to parallelize over the seqlen_q dimension. num_splits=0 means
it will be set by an internal heuristic. We're exposing num_splits mostly for benchmarking.
Don't change it unless you know what you're doing.
"""
softmax_lse, *rest = flash_attn_cuda.fwd(
q,
k,
v,
out,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
False,
causal,
return_softmax,
num_splits,
generator,
)
# if out.isnan().any() or softmax_lse.isnan().any():
# breakpoint()
S_dmask = rest[0] if return_softmax else None
return out, softmax_lse, S_dmask
def _flash_attn_backward(
dout,
q,
k,
v,
out,
softmax_lse,
dq,
dk,
dv,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
causal,
num_splits=0,
generator=None,
):
"""
num_splits: whether to parallelize over the seqlen_k dimension (num_splits > 1) or
not (num_splits = 1). num_splits=0 means it will be set by an internal heuristic.
Any value above 1 will call the same kernel (i.e. num_splits=2 would call the same kernel
as num_splits=3), so effectively the choices are 0, 1, and 2.
This hyperparameter can be tuned for performance, but default value (heuristic) should work fine.
"""
_, _, _, softmax_d = flash_attn_cuda.bwd(
dout,
q,
k,
v,
out,
softmax_lse,
dq,
dk,
dv,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
False,
causal,
num_splits,
generator,
)
# if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any():
# breakpoint()
return dq, dk, dv, softmax_d
class FlashAttnQKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
qkv,
cu_seqlens,
max_seqlen,
dropout_p,
softmax_scale,
causal,
return_softmax,
):
# Save rng_state because the backward pass will regenerate the dropout mask
rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None
if softmax_scale is None:
softmax_scale = qkv.shape[-1] ** (-0.5)
out, softmax_lse, S_dmask = _flash_attn_forward(
qkv[:, 0],
qkv[:, 1],
qkv[:, 2],
torch.empty_like(qkv[:, 0]),
cu_seqlens,
cu_seqlens,
max_seqlen,
max_seqlen,
dropout_p,
softmax_scale,
causal=causal,
return_softmax=return_softmax,
)
ctx.save_for_backward(qkv, out, softmax_lse, cu_seqlens, rng_state)
ctx.dropout_p = dropout_p
ctx.max_seqlen = max_seqlen
ctx.softmax_scale = softmax_scale
ctx.causal = causal
return out if not return_softmax else (out, softmax_lse, S_dmask)
@staticmethod
def backward(ctx, dout, *args):
qkv, out, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors
if rng_state is not None:
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state)
dqkv = torch.empty_like(qkv)
_flash_attn_backward(
dout,
qkv[:, 0],
qkv[:, 1],
qkv[:, 2],
out,
softmax_lse,
dqkv[:, 0],
dqkv[:, 1],
dqkv[:, 2],
cu_seqlens,
cu_seqlens,
ctx.max_seqlen,
ctx.max_seqlen,
ctx.dropout_p,
ctx.softmax_scale,
ctx.causal,
)
if rng_state is not None:
torch.cuda.set_rng_state(cur_rng_state)
return dqkv, None, None, None, None, None, None
def flash_attn_unpadded_qkvpacked_func(
qkv,
cu_seqlens,
max_seqlen,
dropout_p,
softmax_scale=None,
causal=False,
return_attn_probs=False,
):
return FlashAttnQKVPackedFunc.apply(
qkv, cu_seqlens, max_seqlen, dropout_p, softmax_scale, causal, return_attn_probs
)
| 4,804 | 24.833333 | 140 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/layers/positional_embeddings.py
|
# Copyright (c) 2021, EleutherAI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import math
class SinusoidalPositionalEmbedding(torch.nn.Module):
def __init__(self, dim, base=10000, precision=torch.half):
super().__init__()
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
self.precision = precision
def forward(self, x, seq_dim=1):
t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq)
sinusoid_inp = torch.einsum("i,j->ij", t, self.inv_freq)
if self.precision == torch.bfloat16:
sinusoid_inp = sinusoid_inp.float()
sin, cos = sinusoid_inp.sin(), sinusoid_inp.cos()
if self.precision == torch.bfloat16:
sin, cos = sin.bfloat16(), cos.bfloat16()
emb = torch.cat((sin, cos), dim=-1)
return emb[None, :, :]
class RotaryEmbedding(torch.nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
super().__init__()
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
self.register_buffer("inv_freq", inv_freq)
# Build here to make `torch.jit.trace` work.
self.max_seq_len_cached = max_position_embeddings
t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device,
dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
def forward(self, x, seq_len=None):
# x: [seq_len, bs, num_attention_heads, head_size]
# This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
if seq_len > self.max_seq_len_cached:
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
return (
self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
# q: [seq_len, bs, num_attention_heads, head_size]
gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices).contiguous()
sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices).contiguous()
cos, sin = cos.permute(2, 0, 1, 3).contiguous(), sin.permute(2, 0, 1, 3).contiguous()
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class AliBi(torch.nn.Module):
def __init__(self, num_heads, mp_size=1, mp_rank=1):
super().__init__()
# megatron splits across heads, so we need to make sure each
# head receives the correct matrix
assert mp_size <= num_heads and mp_rank <= mp_size
self.mp_size = mp_size
self.mp_rank = mp_rank
self.num_heads = num_heads
self.slice_size = num_heads // mp_size
self.cached_matrix = None
self.cached_seq_len = None
slopes = torch.Tensor(self._get_slopes(num_heads))[
mp_rank * self.slice_size: (mp_rank + 1) * self.slice_size
]
self.register_buffer("slopes", slopes)
def _get_slopes(self, n):
"""
Get slopes for Alibi positional embedding
n : int = number of heads.
For best performance, restrict n to a power of 2.
"""
def get_slopes_power_of_2(n):
start = 2 ** (-(2 ** -(math.log2(n) - 3)))
ratio = start
return [start * ratio**i for i in range(n)]
if math.log2(n).is_integer():
return get_slopes_power_of_2(n)
else:
closest_power_of_2 = 2 ** math.floor(math.log2(n))
return (
get_slopes_power_of_2(closest_power_of_2)
+ self._get_slopes(2 * closest_power_of_2)[0::2][
: n - closest_power_of_2
]
)
def forward(self, x):
# [b, np, sq, sk]
seq_len_q = x.shape[-2]
seq_len_k = x.shape[-1]
# Initialize the AliBi matrix to match the first provided key length; grow it exponentially
# afterwards if longer inputs are provided. This is important for inference, where we will
# encounter progressively longer samples; it should have no effect at training time.
if self.cached_seq_len is not None and self.cached_seq_len >= seq_len_k:
a = self.cached_matrix
else:
target_seq_len = (
seq_len_k if self.cached_seq_len is None else self.cached_seq_len * 4
)
a = -torch.tril(
torch.arange(target_seq_len)
.view(target_seq_len, 1)
.repeat(1, target_seq_len)
+ torch.arange(0, -target_seq_len, -1)
)
a = a.to(x.device).to(x.dtype)
slopes = self.slopes.to(a.device).to(a.dtype)
a = a * slopes.view(self.slopes.shape[0], 1, 1)
self.cached_seq_len = target_seq_len
self.cached_matrix = a
# If the AliBi matrix is larger than the key length, clip it.
if self.cached_seq_len > seq_len_k:
a = self.cached_matrix[:, :seq_len_k, :seq_len_k]
if seq_len_q != seq_len_k:
# In the train case x has dimensionality [b, np, sq, sk] with sq == sk
# The number of query tokens is equal to the number of key tokens
# At inference time with cache in layer_past sq is not equal to sk. sq only contains one token (the last one in the full sequence)
# In this case we use the appropriate token index of the cache matrix.
# As the cache matrix could already be bigger from a past inference, not the last token index in the sq sequence is used
assert (
seq_len_q == 1
), "assumption sq == sk unless at inference time with cache in layer_past with sq == 1"
a = a[:, seq_len_k - 1, :].view(
a.shape[0], 1, a.shape[2]
) # seq_len_k - 1 points to the last token index in the current inference batch.
return x + a
| 7,894 | 44.373563 | 142 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/layers/fused_bias_dropout.py
|
# Copyright (c) 2021, EleutherAI contributors
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from typing import Optional
from torch import Tensor
# flags required to enable jit fusion kernels
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
def bias_dropout_add(
x: Tensor, bias: Tensor, residual: Optional[Tensor], prob: float, training: bool
) -> Tensor:
out = torch.nn.functional.dropout(x + bias, p=prob, training=training)
if residual is not None:
out = residual + out
return out
def get_bias_dropout_add(training):
def _bias_dropout_add(x, bias, residual, prob):
return bias_dropout_add(x, bias, residual, prob, training)
return _bias_dropout_add
@torch.jit.script
def bias_dropout_add_fused_train(
x: Tensor, bias: Tensor, residual: Optional[Tensor], prob: float
) -> Tensor:
return bias_dropout_add(x, bias, residual, prob, True)
@torch.jit.script
def bias_dropout_add_fused_inference(
x: Tensor, bias: Tensor, residual: Optional[Tensor], prob: float
) -> Tensor:
return bias_dropout_add(x, bias, residual, prob, False)
| 1,871 | 32.428571 | 106 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/layers/utils.py
|
# Copyright (c) 2021 EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for models."""
import torch
from .norms import LayerNorm, RMSNorm, ScaleNorm
from .fused_softmax import SoftmaxFusionTypes
from types import GeneratorType
def get_attn_mask(seq_length, device):
"""
Get triangular attention mask for a given sequence length / device.
"""
# lower triangular attention mask
mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device)).view(
1, 1, seq_length, seq_length
)
# convert to binary
return mask < 0.5
def get_ltor_masks_and_position_ids(
data,
eod_token,
eod_mask_loss=False,
):
"""Build masks and position id for left to right model."""
# Extract batch size and sequence length.
batch_size, seq_length = data.size()
# Attention mask (lower triangular).
attention_mask = get_attn_mask(
seq_length=seq_length,
device=data.device,
)
# Loss mask.
loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
if eod_mask_loss:
loss_mask[data == eod_token] = 0.0
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
position_ids = position_ids.unsqueeze(0).expand_as(data)
return attention_mask, loss_mask, position_ids
def exists(x):
return x is not None
class Lambda(torch.nn.Module):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
class SequentialWrapper(torch.nn.Module):
"""
Used to convert a deepspeed PipelineModule to an nn.Sequential like model whilst retaining
activation checkpointing.
"""
def __init__(
self,
layers,
activation_checkpoint_interval,
activation_checkpoint_func,
parent_class_name=None,
):
super().__init__()
self.sequential = torch.nn.Sequential(*layers)
self.activation_checkpoint_interval = activation_checkpoint_interval
self.parent_class_name = parent_class_name
self.activation_checkpoint_func = activation_checkpoint_func
def _is_checkpointable(self, funcs):
params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)]
return any(len(list(p)) > 0 for p in params)
def inference_mode(self, use_cache=True):
"""
Sets up the model for inference by turning on k/v caching (if specified) and setting `parallel output` of the final layer to false,
so logits are gathered across model parallel ranks.
:param cache: (bool) True if you want to use caching during inference, False otherwise
"""
_set_use_cache(self.sequential, use_cache)
def train_mode(self):
"""
Sets up the model for training by turning off k/v caching.
"""
_set_use_cache(self.sequential, False)
def forward(self, forward_input):
def exec_range_func(start, end):
"""Helper function to be used with checkpoint()
Adapted from torch.utils.checkpoint:checkpoint_sequential()
"""
def exec_func(*inputs):
# Single tensor inputs need to be unwrapped
if len(inputs) == 1:
inputs = inputs[0]
for idx, layer in enumerate(self.sequential[start:end]):
inputs = layer(inputs)
return inputs
return exec_func
if self.activation_checkpoint_interval == 0:
func = exec_range_func(0, len(self.sequential))
x = func(forward_input)
else:
num_layers = len(self.sequential)
x = forward_input
for start_idx in range(0, num_layers, self.activation_checkpoint_interval):
end_idx = min(
start_idx + self.activation_checkpoint_interval, num_layers
)
funcs = self.sequential[start_idx:end_idx]
# Since we either pass tensors or tuples of tensors without unpacking, we
# need to be careful not to double-wrap tensors with tuple.
if not isinstance(x, tuple):
x = (x,)
if self._is_checkpointable(funcs):
x = self.activation_checkpoint_func(
exec_range_func(start_idx, end_idx), *x
)
else:
x = exec_range_func(start_idx, end_idx)(*x)
return x
def recursive_setattr(m, attr, value, assert_type=None, type_filter=None):
"""
Recursively set attributes on a pytorch module or an iterable of modules.
If an assert_type is provided, it will assert that the type of the value is the same as the assert_type.
If a type_filter is provided, it will only set attributes on modules that match that type.
"""
if assert_type is not None:
assert isinstance(value, assert_type), "Value is not the correct type."
# if m is a list or a generator, iterate over the elements
if isinstance(m, (list, GeneratorType)):
for i in m:
recursive_setattr(i, attr, value, assert_type, type_filter)
elif isinstance(m, torch.nn.Module):
if hasattr(m, attr):
if type_filter is None or isinstance(m, type_filter):
setattr(m, attr, value)
if hasattr(m, "children"):
recursive_setattr(m.children(), attr, value, assert_type, type_filter)
def _set_use_cache(modules, value: bool):
"""
Recursively sets an use_cache to `value` on a list of pytorch modules, if they have a use_cache attribute.
use_cache is used to decide whether we cache past key value activations or not in inference.
"""
recursive_setattr(modules, "use_cache", value, assert_type=bool)
def configure_sparse_attention(config, attention_type, num_attention_heads, mpu):
from deepspeed.ops.sparse_attention import (
SparseSelfAttention,
VariableSparsityConfig,
FixedSparsityConfig,
BigBirdSparsityConfig,
BSLongformerSparsityConfig,
)
from deepspeed.ops.sparse_attention.sparsity_config import (
LocalSlidingWindowSparsityConfig,
)
if attention_type == "sparse_fixed":
# you can think of local window size as `block_size` * `num_local_blocks`.
# so if you wanted to set a local window size of 256, set block size to 16 and `num_local_blocks` to 16
sparsity_config = FixedSparsityConfig(
num_heads=num_attention_heads,
block=config.sparsity_config.get("block", 16),
different_layout_per_head=config.sparsity_config.get(
"different_layout_per_head", False
),
num_local_blocks=config.sparsity_config.get("num_local_blocks", 4),
num_global_blocks=config.sparsity_config.get("num_global_blocks", 1),
num_different_global_patterns=config.sparsity_config.get(
"num_different_global_patterns", 1
),
attention="unidirectional",
horizontal_global_attention=False,
)
elif attention_type == "sparse_variable":
sparsity_config = VariableSparsityConfig(
num_heads=num_attention_heads,
block=config.sparsity_config.get("block", 16),
different_layout_per_head=config.sparsity_config.get(
"different_layout_per_head", False
),
num_random_blocks=config.sparsity_config.get("num_random_blocks", 0),
local_window_blocks=config.sparsity_config.get(
"local_window_blocks", [4]
),
global_block_indices=config.sparsity_config.get(
"global_block_indices", [0]
),
global_block_end_indices=config.sparsity_config.get(
"global_block_end_indices", None
),
attention="unidirectional",
horizontal_global_attention=False,
)
elif attention_type == "local":
# can configure with `num_local_blocks` or `num_sliding_window_blocks`
num_local_blocks = config.sparsity_config.get(
"num_local_blocks",
config.sparsity_config.get("num_sliding_window_blocks", 4),
)
sparsity_config = LocalSlidingWindowSparsityConfig(
num_heads=num_attention_heads,
block=config.sparsity_config.get("block", 16),
num_sliding_window_blocks=num_local_blocks,
attention="unidirectional",
)
elif attention_type == "bigbird":
sparsity_config = BigBirdSparsityConfig(
num_heads=num_attention_heads,
block=config.sparsity_config.get("block", 16),
different_layout_per_head=config.sparsity_config.get(
"different_layout_per_head", False
),
num_random_blocks=config.sparsity_config.get("num_random_blocks", 1),
num_sliding_window_blocks=config.sparsity_config.get(
"num_sliding_window_blocks", 3
),
num_global_blocks=config.sparsity_config.get("num_global_blocks", 1),
attention="unidirectional",
)
elif attention_type == "bslongformer":
sparsity_config = BSLongformerSparsityConfig(
num_heads=num_attention_heads,
block=config.sparsity_config.get("block", 16),
different_layout_per_head=config.sparsity_config.get(
"different_layout_per_head", False
),
num_sliding_window_blocks=config.sparsity_config.get(
"num_sliding_window_blocks", 3
),
global_block_indices=config.sparsity_config.get(
"global_block_indices", [0]
),
global_block_end_indices=config.sparsity_config.get(
"global_block_end_indices", None
),
attention="unidirectional",
)
else:
raise ValueError(f"Attention type {attention_type} not recognized")
return SparseSelfAttention(
sparsity_config=sparsity_config,
max_seq_length=config.max_position_embeddings,
attn_mask_mode="add",
mpu=mpu,
)
def get_fusion_type(config):
fusion_type = SoftmaxFusionTypes.none
if config.scaled_upper_triang_masked_softmax_fusion:
fusion_type = SoftmaxFusionTypes.upper_triang
elif config.scaled_masked_softmax_fusion:
fusion_type = SoftmaxFusionTypes.general
return fusion_type
| 11,316 | 37.104377 | 139 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/layers/norms.py
|
# Copyright (c) 2021, EleutherAI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.nn import LayerNorm as LayerNorm
from torch import nn
def get_norm(config):
if config.norm == "rmsnorm":
norm = RMSNorm
eps = config.rms_norm_epsilon
elif config.norm == "layernorm":
eps = config.layer_norm_eps
norm = LayerNorm
elif config.norm == "scalenorm":
eps = config.scalenorm_epsilon
norm = ScaleNorm
else:
raise ValueError(f"norm {config.norm} not recognized")
return norm, eps
class RMSNorm(torch.nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
LlamaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.scale = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.scale.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.scale.dtype)
return self.scale * hidden_states
class ScaleNorm(torch.nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.g = torch.nn.Parameter(torch.ones(1))
self.eps = eps
def forward(self, x):
n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
return x / n * self.g
| 2,075 | 31.4375 | 85 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/layers/init_functions.py
|
# Copyright (c) 2021, EleutherAI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
def init_method_normal(sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_
def scaled_init_method_normal(sigma, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
# orthogonal init does not support fp16, so have to patch it
def _orthogonal(tensor, gain=1):
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
rows = tensor.size(0)
cols = tensor.numel() // rows
flattened = tensor.new(rows, cols).normal_(0, 1)
if rows < cols:
flattened.t_()
# Compute the qr factorization
dt = flattened.dtype
flattened = flattened.to(torch.float32) # orthogonal init does not support fp16
q, r = torch.qr(flattened)
q, r = q.to(dtype=dt), r.to(dtype=dt)
# Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
d = torch.diag(r, 0)
ph = d.sign()
q *= ph
if rows < cols:
q.t_()
with torch.no_grad():
tensor.view_as(q).copy_(q)
tensor.mul_(gain)
return tensor
def orthogonal_init_method(n_layers=1):
"""Fills the input Tensor with a (semi) orthogonal matrix, as described in
Exact solutions to the nonlinear dynamics of learning in deep linear neural networks - Saxe, A. et al. (2013)
Optionally scaling by number of layers possible, as introduced in OBST - Nestler et. al. (2021, to be released)"""
def init_(tensor):
return _orthogonal(tensor, math.sqrt(2 / n_layers))
return init_
def xavier_uniform_init_method():
"""Fills the input Tensor with values according to the method described in Understanding the difficulty of
training deep feedforward neural networks - Glorot, X. & Bengio, Y. (2010), using a uniform distribution."""
def init_(tensor):
return torch.nn.init.xavier_uniform_(tensor)
return init_
def xavier_normal_init_method():
"""Fills the input Tensor with values according to the method described in Understanding the difficulty of
training deep feedforward neural networks - Glorot, X. & Bengio, Y. (2010), using a normal distribution."""
def init_(tensor):
return torch.nn.init.xavier_normal_(tensor)
return init_
def small_init_init_method(dim):
"""Fills the input Tensor with values according to the method described in Transformers without Tears: Improving
the Normalization of Self-Attention - Nguyen, T. & Salazar, J. (2010), using a normal distribution."""
std = math.sqrt(2 / (5 * dim))
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
def wang_init_method(n_layers, dim):
std = 2 / n_layers / math.sqrt(dim)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
def get_init_methods(args):
def _get(name):
if name == "normal":
return init_method_normal(args.init_method_std)
elif name == "scaled_normal":
return scaled_init_method_normal(args.init_method_std, args.num_hidden_layers)
elif name == "orthogonal":
return orthogonal_init_method()
elif name == "scaled_orthogonal":
return orthogonal_init_method(args.num_hidden_layers)
elif name == "xavier_uniform":
return xavier_uniform_init_method()
elif name == "xavier_normal":
return xavier_normal_init_method()
elif name == "wang_init":
return wang_init_method(args.num_hidden_layers, args.hidden_size)
elif name == "small_init":
return small_init_init_method(args.hidden_size)
else:
raise NotImplementedError(f"Unknown init method {name}")
return _get(args.init_method), _get(args.output_layer_init_method)
| 4,653 | 31.545455 | 118 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/layers/transformer.py
|
# Copyright (c) 2021 EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer."""
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
from .norms import get_norm
from .. import mpu
from .fused_softmax import FusedScaleMaskSoftmax
from .activations import get_activation
from .utils import exists, get_fusion_type
from .positional_embeddings import (
RotaryEmbedding,
apply_rotary_pos_emb,
AliBi,
)
from .fused_bias_dropout import (
get_bias_dropout_add,
bias_dropout_add_fused_train,
bias_dropout_add_fused_inference,
)
from .utils import configure_sparse_attention
# flags required to enable jit fusion kernels
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
""" We use the following notation throughout this file:
h: hidden size
n: number of attention heads
p: number of model parallel partitions
np: n/p
hp: h/p
hn: h/n
b: batch size
s: sequence length
l: number of layers
Transformer takes input of size [s, b, h] and returns a
tensor of the same size. We use the following arguments:
hyperparameters: transformer hyperparameters
attention_mask_func: a function that takes `unmasked-attention-scores`
with size [b, np, s, s] and an `attention-mask` and will apply
the masking. The function should return a masked score of the
same size [b, np, s, s].
masked-attention-scores = attention_mask_func(
unmasked-attention-scores, attention-mask)
"""
class ParallelMLP(nn.Module):
"""MLP.
MLP will take the input with h hidden state, project it to 4*h
hidden dimension, perform nonlinear transformation, and project the
state back into h hidden dimension. At the end, dropout is also
applied.
"""
def __init__(
self, config, init_method, output_layer_init_method, parallel_output=False
):
super().__init__()
self.activation_func = get_activation(config)
self.activation_type = config.hidden_act
self.bias_gelu_fusion = config.bias_gelu_fusion
# auto scale so geglu has equal parameters
ff_mult = 4 * 2 / 3 if self.activation_type == "geglu" else 4
ff_dim = (
int(ff_mult * config.hidden_size) * 2
if self.activation_type == "geglu"
else ff_mult * config.hidden_size
)
self.dense_h_to_4h = mpu.ColumnParallelLinear(
config=config,
input_size=config.hidden_size,
output_size=ff_dim,
gather_output=False,
init_method=init_method,
skip_bias_add=True,
)
ff_dim_in = ff_dim // 2 if self.activation_type == "geglu" else ff_dim
# Project back to h.
self.dense_4h_to_h = mpu.RowParallelLinear(
config=config,
input_size=ff_dim_in,
output_size=config.hidden_size,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
parallel_output=parallel_output,
)
def forward(self, hidden_states):
# [s, b, 4hp]
intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states)
if (
self.activation_type == "gelu" and self.bias_gelu_fusion
) or self.activation_type == "geglu":
intermediate_parallel = self.activation_func(
intermediate_parallel, bias_parallel
)
else:
intermediate_parallel = self.activation_func(
intermediate_parallel + bias_parallel
)
# [s, b, h]
output, output_bias = self.dense_4h_to_h(intermediate_parallel)
return output, output_bias
class ParallelLinear(nn.Module):
"""
A Parallel Linear Layer transforming the transformer outputs from hidden_size -> vocab_size
"""
def __init__(
self,
config,
parallel_output=True,
init_method=nn.init.xavier_normal_,
):
super().__init__()
parallelism = config.output_layer_parallelism
if parallelism == "column":
self.final_linear = mpu.ColumnParallelLinear(
config=config,
input_size=config.hidden_size,
output_size=config.vocab_size,
bias=False,
init_method=init_method,
gather_output=not parallel_output,
skip_bias_add=False,
)
else:
self.final_linear = mpu.RowParallelLinear(
config=config,
input_size=config.hidden_size,
output_size=config.vocab_size,
bias=False,
input_is_parallel=False,
init_method=init_method,
parallel_output=parallel_output,
skip_bias_add=False,
)
def forward(self, hidden_states):
return self.final_linear(hidden_states)
class ParallelSelfAttention(nn.Module):
"""Parallel self-attention layer abstract class.
Self-attention layer takes input with size [b, s, h]
and returns output of the same size.
"""
def __init__(
self,
config,
attention_mask_func,
init_method,
output_layer_init_method,
layer_number,
rpe=None,
rotary=False,
parallel_output=False,
):
super().__init__()
self.fp16 = config.torch_dtype == torch.half
self.bf16 = config.torch_dtype == torch.bfloat16
self.attention_mask_func = attention_mask_func
self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling
self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
if self.apply_query_key_layer_scaling:
self.attention_softmax_in_fp32 = True
self.layer_number = layer_number
# Per attention head and per partition values.
world_size = mpu.get_model_parallel_world_size()
self.hidden_size_per_partition = mpu.divide(config.hidden_size, world_size)
self.hidden_size_per_attention_head = mpu.divide(
config.hidden_size, config.num_attention_heads
)
self.num_attention_heads_per_partition = mpu.divide(
config.num_attention_heads, world_size
)
self.pos_emb = config.pos_emb
# Strided linear layer.
self.query_key_value = mpu.ColumnParallelLinear(
config=config,
input_size=config.hidden_size,
output_size=3 * config.hidden_size,
gather_output=False,
init_method=init_method,
bias=config.use_bias_in_attn_linear,
)
coeff = None
self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
if self.apply_query_key_layer_scaling:
coeff = max(1, self.layer_number)
self.norm_factor *= coeff
self.rpe = rpe
if self.pos_emb == "alibi":
self.alibi_embed = AliBi(
config.num_attention_heads,
config.model_parallel_size,
mpu.get_model_parallel_rank(),
)
# TODO: this arg shouldn't need to be passed in - get from config
if rotary:
if config.rotary_pct == 1:
self.rotary_ndims = None
else:
assert config.rotary_pct < 1
self.rotary_ndims = int(
self.hidden_size_per_attention_head * config.rotary_pct
)
dim = (
self.rotary_ndims
if self.rotary_ndims is not None
else self.hidden_size_per_attention_head
)
self.rotary_emb = RotaryEmbedding(
dim, base=config.rotary_emb_base, max_position_embeddings=config.max_position_embeddings
)
else:
self.rotary_emb = None
self.attention_type = config.attention_config[layer_number]
self.use_flash_attention = self.attention_type == "flash"
self.sparse = self.attention_type != "global" and not self.use_flash_attention
if self.sparse:
self.sparse_attn = configure_sparse_attention(
config,
self.attention_type,
self.num_attention_heads_per_partition,
mpu=mpu,
)
else:
if self.use_flash_attention:
from .flash_attention import (
flash_attn_unpadded_qkvpacked_func,
)
self.flash_attention_function = flash_attn_unpadded_qkvpacked_func
if self.pos_emb == "alibi":
raise ValueError(
"Flash attention is currently not compatible with AliBi positional embeddings. Use sinuisoidal, learned, or rotary embeddings instead."
)
self.scale_mask_softmax = FusedScaleMaskSoftmax(
input_in_fp16=self.fp16,
input_in_bf16=self.bf16,
fusion_type=get_fusion_type(config),
mask_func=self.attention_mask_func,
softmax_in_fp32=self.attention_softmax_in_fp32,
scale=coeff,
)
# Dropout. Note that for a single iteration, this layer will generate
# different outputs on different number of parallel partitions but
# on average it should not be partition dependent.
self.dropout_p = config.attention_dropout
self.attention_dropout = nn.Dropout(self.dropout_p)
# Output.
self.dense = mpu.RowParallelLinear(
config=config,
input_size=config.hidden_size,
output_size=config.hidden_size,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
parallel_output=parallel_output,
bias=config.use_bias_in_attn_linear,
)
def attention(
self, query_layer, key_layer, value_layer, layer_past, attention_mask, use_cache=False
):
# ===================================
# Raw attention scores. [b, np, s, s]
# ===================================
# [b, np, sq, sk]
output_size = (
query_layer.size(1),
query_layer.size(2),
query_layer.size(0),
key_layer.size(0),
)
# [sq, b, np, hn] -> [sq, b * np, hn]
query_layer = query_layer.view(
output_size[2], output_size[0] * output_size[1], -1
)
key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
# preallocating result tensor: [b * np, sq, sk]
matmul_result = torch.empty(
output_size[0] * output_size[1],
output_size[2],
output_size[3],
dtype=query_layer.dtype,
device=torch.cuda.current_device(),
)
# Raw attention scores. [b * np, sq, sk]
matmul_result = torch.baddbmm(
matmul_result,
query_layer.transpose(0, 1), # [b * np, sq, hn]
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
beta=0.0,
alpha=(1.0 / self.norm_factor),
)
# change view to [b, np, sq, sk]
attention_scores = matmul_result.view(*output_size)
# ==================================================
# Update attention mask for inference. [b, np, sq, sk]
# ==================================================
if use_cache:
with torch.no_grad():
attention_mask = attention_mask[
..., : attention_scores.size(3), : attention_scores.size(3)
]
# ===========================
# Attention probs and dropout
# ===========================
if exists(self.rpe):
rpe = self.rpe(query_layer.size(0), key_layer.size(0))
attention_scores += rpe # [1, np, sq, sk]
if self.pos_emb == "alibi":
attention_scores = self.alibi_embed(attention_scores)
# attention scores and attention mask [b, np, sq, sk]
attention_probs = self.scale_mask_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
with mpu.get_cuda_rng_tracker().fork():
attention_probs = self.attention_dropout(attention_probs)
# =========================
# Context layer. [sq, b, hp]
# =========================
# value_layer -> context layer.
# [sk, b, np, hn] --> [b, np, sq, hn]
# context layer shape: [b, np, sq, hn]
output_size = (
value_layer.size(1),
value_layer.size(2),
query_layer.size(0),
value_layer.size(3),
)
# change view [sk, b * np, hn]
value_layer = value_layer.view(
value_layer.size(0), output_size[0] * output_size[1], -1
)
# change view [b * np, sq, sk]
attention_probs = attention_probs.view(
output_size[0] * output_size[1], output_size[2], -1
)
# matmul: [b * np, sq, hn]
context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
# change view [b, np, sq, hn]
context_layer = context_layer.view(*output_size)
return context_layer
def flash_attention(self, query_layer, key_layer, value_layer):
# [b, np, sq, sk]
output_size = (
query_layer.size(1),
query_layer.size(2),
query_layer.size(0),
key_layer.size(0),
)
# [s, b, np, hn] -> [b, s, np, hn] -> [b * s, 1, np, hn]
query_layer = query_layer.transpose(0, 1).reshape(
output_size[0] * output_size[2], 1, output_size[1], -1
)
key_layer = key_layer.transpose(0, 1).reshape(
output_size[0] * output_size[3], 1, output_size[1], -1
)
value_layer = value_layer.transpose(0, 1).reshape(
output_size[0] * output_size[3], 1, output_size[1], -1
)
# Combined q/k/v into [b * s, 3, np, hn].
qkv = torch.concat([query_layer, key_layer, value_layer], dim=1)
batch_size = output_size[0]
seqlen = output_size[2]
max_s = seqlen
cu_seqlens = torch.arange(
0,
(batch_size + 1) * seqlen,
step=seqlen,
dtype=torch.int32,
device=qkv.device,
)
output = self.flash_attention_function(
qkv,
cu_seqlens,
max_s,
self.dropout_p if self.training else 0.0,
softmax_scale=None,
causal=True,
)
# [b * sq, np, hn] -> [b, sq, np, hn]
matmul_result = output.view(
output_size[0], output_size[2], output.shape[1], output.shape[2]
)
# [b, sq, np, hn] -> [b, np, sq, hn]
matmul_result = matmul_result.transpose(1, 2)
return matmul_result
def sparse_attention(self, query_layer, key_layer, value_layer, attention_mask):
# TODO: sparse attn dropout?
# TODO: pad to block size
# shape of q/k/v is [sq, b, np, hn] and needs to be transposed to [b, np, sq, hn]
query_layer, key_layer, value_layer = map(
lambda t: t.permute(1, 2, 0, 3).contiguous(),
(query_layer, key_layer, value_layer),
)
# output shape [b, np(heads), sq, hn]
attn_mask = attention_mask.to(query_layer.dtype) * -10000
if exists(self.rpe):
rpe = self.rpe(query_layer.size(0), key_layer.size(0))
else:
rpe = None
return self.sparse_attn(
query_layer, key_layer, value_layer, attn_mask=attn_mask, rpe=rpe
)
def forward(self, hidden_states, attention_mask, position_ids, layer_past=None, use_cache=False):
# hidden_states: [sq, b, h]
# =====================
# Query, Key, and Value
# =====================
# Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
mixed_x_layer, _ = self.query_key_value(hidden_states)
# [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
new_tensor_shape = mixed_x_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
3 * self.hidden_size_per_attention_head,
)
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
# [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
(query_layer, key_layer, value_layer) = mpu.split_tensor_along_last_dim(
mixed_x_layer, 3
)
if exists(self.rotary_emb):
if exists(self.rotary_ndims):
# partial rotary
query_rot, query_pass = (
query_layer[..., : self.rotary_ndims],
query_layer[..., self.rotary_ndims:],
)
key_rot, key_pass = (
key_layer[..., : self.rotary_ndims],
key_layer[..., self.rotary_ndims:],
)
else:
# full rotary
query_rot, key_rot = query_layer, key_layer
apply_rotary_fn = apply_rotary_pos_emb
seq_len = key_layer.shape[0]
if exists(layer_past) and layer_past.numel() > 0:
seq_len += layer_past[0].shape[0]
cos, sin = self.rotary_emb(value_layer, seq_len=seq_len)
query_layer, key_layer = apply_rotary_fn(
query_rot, key_rot, cos, sin, position_ids=position_ids
)
if exists(self.rotary_ndims):
query_layer = torch.cat((query_layer, query_pass), dim=-1)
key_layer = torch.cat((key_layer, key_pass), dim=-1)
# ==================================
# Cache key and value for inference
# ==================================
if exists(layer_past) and layer_past.numel() > 0:
past_key, past_value = layer_past
key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=0)
value_layer = torch.cat(
(past_value.type_as(value_layer), value_layer), dim=0
)
if use_cache:
present = torch.stack((key_layer, value_layer))
if self.use_flash_attention and not use_cache:
context_layer = self.flash_attention(query_layer, key_layer, value_layer)
elif not self.sparse:
context_layer = self.attention(
query_layer, key_layer, value_layer, layer_past, attention_mask, use_cache
)
else:
context_layer = self.sparse_attention(
query_layer, key_layer, value_layer, attention_mask
)
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
new_context_layer_shape = context_layer.size()[:-2] + (
self.hidden_size_per_partition,
)
context_layer = context_layer.view(*new_context_layer_shape)
# =================
# Output. [sq, b, h]
# =================
output, bias = self.dense(context_layer)
if use_cache:
output = [output, present]
return output, bias
class LLaMAParallelMLP(nn.Module):
"""LLaMA's MLP.
MLP will take the input with h hidden state, project it to 4*h
hidden dimension, perform nonlinear transformation, and project the
state back into h hidden dimension. At the end, dropout is also
applied.
"""
def __init__(
self, config, init_method, output_layer_init_method, parallel_output=False
):
super().__init__()
self.activation_func = get_activation(config)
self.activation_type = config.hidden_act
self.multiple_of = config.llama_mlp_multiple_of
ff_dim = int(2 * config.hidden_size * 4 / 3)
ff_dim = self.multiple_of * ((ff_dim + self.multiple_of - 1) // self.multiple_of)
self.w1 = mpu.ColumnParallelLinear(
config=config,
input_size=config.hidden_size,
output_size=ff_dim,
gather_output=False,
init_method=init_method,
skip_bias_add=True,
bias=False,
)
self.w3 = mpu.ColumnParallelLinear(
config=config,
input_size=config.hidden_size,
output_size=ff_dim,
gather_output=False,
init_method=init_method,
skip_bias_add=True,
bias=False,
)
self.w2 = mpu.RowParallelLinear(
config=config,
input_size=ff_dim,
output_size=config.hidden_size,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
parallel_output=parallel_output,
bias=False,
)
def forward(self, hidden_states):
w1_out, _ = self.w1(hidden_states)
w3_out, _ = self.w3(hidden_states)
return self.w2(self.activation_func(w1_out) * w3_out)
class ParallelTransformerLayer(nn.Module):
"""A single transformer layer.
Transformer layer takes input with size [b, s, h] and returns an
output of the same size.
"""
def __init__(
self,
config,
attention_mask_func,
init_method,
output_layer_init_method,
layer_number,
rpe=None,
rotary=False
):
super().__init__()
self.layer_number = layer_number
norm, eps = get_norm(config)
# Layernorm on the input data.
self.input_layernorm = norm(config.hidden_size, eps=eps)
self.hidden_dropout = config.hidden_dropout
self.gpt_j_residual = config.gpt_j_residual
self.gpt_j_tied = config.gpt_j_tied
if self.gpt_j_residual:
self.reduce = mpu.mappings.reduce_from_model_parallel_region
# Self attention.
self.attention = ParallelSelfAttention(
config=config,
attention_mask_func=attention_mask_func,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
rpe=rpe,
rotary=rotary,
parallel_output=self.gpt_j_residual,
)
# Layernorm on the output of the attention layer.
# If GPT-J residuals are used, this is surpurfulous but leaving it in
# leads to cleaner code
self.post_attention_layernorm = norm(config.hidden_size, eps=eps)
# MLP
self.mlp_type = "regular" if config.mlp_type is None else config.mlp_type
if self.mlp_type == "regular":
self.mlp = ParallelMLP(
config=config,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
parallel_output=self.gpt_j_residual,
)
elif self.mlp_type == "llama":
self.mlp = LLaMAParallelMLP(
config=config,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
parallel_output=self.gpt_j_residual,
)
else:
raise KeyError(self.mlp_type)
self.bias_dropout_fusion = config.bias_dropout_fusion
def _get_bias_dropout(self):
if self.bias_dropout_fusion:
fn = (
bias_dropout_add_fused_train
if self.training
else bias_dropout_add_fused_inference
)
else:
fn = get_bias_dropout_add(self.training)
return fn
def forward(self, x, attention_mask, position_ids, layer_past=None, use_cache=False):
bias_dropout_fn = self._get_bias_dropout()
# x: [b, s, h]
if self.gpt_j_residual:
# pseudocode:
# x = x + attn(ln(x)) + mlp(ln(x))
# this means we can avoid doing the allreduce in the attn / mlp outputs
# to save communication time (we can do a single allreduce after we add mlp / attn outputs).
# due to a bug, the two layernorms are not tied in GPT-NeoX-20B. This is non-desirable, but
# we preserve the functionality for backwards compatibility
residual = x
# applies the correct normalization depending on if the norms are tied
if self.gpt_j_tied:
x = self.input_layernorm(x)
x1, x2 = x, x
else:
x1, x2 = self.input_layernorm(x), self.post_attention_layernorm(x)
# attention operator
attention_output, attention_bias = self.attention(
x1, attention_mask, position_ids=position_ids, layer_past=layer_past, use_cache=use_cache
)
if use_cache:
attention_output, presents = attention_output
with torch.enable_grad():
attention_output = bias_dropout_fn(
attention_output,
bias=attention_bias.expand_as(attention_output),
residual=None,
prob=self.hidden_dropout,
)
# mlp operator
mlp_output, mlp_bias = self.mlp(x2)
with torch.enable_grad():
output = bias_dropout_fn(
mlp_output,
bias=mlp_bias.expand_as(mlp_output),
residual=attention_output,
prob=self.hidden_dropout,
)
# output = (x + attn(ln(x)) + mlp(ln(x))
output = residual + self.reduce(output)
else:
# pseudocode:
# x = x + attn(ln1(x))
# x = x + mlp(ln2(x))
residual = x
# x = x + attn(ln1(x))
attention_output, attention_bias = self.attention(
self.input_layernorm(x), attention_mask, position_ids=position_ids, layer_past=layer_past, use_cache=use_cache
)
if use_cache:
attention_output, presents = attention_output
with torch.enable_grad():
if attention_bias is not None:
# Use special bias_dropout_fn if we have a bias term from the above attention layer
attention_output = bias_dropout_fn(
attention_output,
bias=attention_bias.expand_as(residual),
residual=residual,
prob=self.hidden_dropout,
)
else:
attention_output = torch.nn.functional.dropout(
attention_output, p=self.hidden_dropout, training=self.training
) + residual
# output = x + mlp(ln2(x))
mlp_output, mlp_bias = self.mlp(
self.post_attention_layernorm(attention_output)
)
with torch.enable_grad():
if self.mlp_type == "llama":
# No dropout either
assert mlp_bias is None
output = mlp_output + attention_output
else:
output = bias_dropout_fn(
mlp_output,
bias=mlp_bias.expand_as(attention_output),
residual=attention_output,
prob=self.hidden_dropout,
)
return output if not use_cache else (output, presents)
def parallel_lm_logits(input_, word_embeddings_weight, parallel_output, bias=None):
"""LM logits using word embedding weights."""
# Parallel logits.
input_parallel = mpu.copy_to_model_parallel_region(input_)
# Matrix multiply.
if bias is None:
logits_parallel = F.linear(input_parallel, word_embeddings_weight)
else:
logits_parallel = F.linear(input_parallel, word_embeddings_weight, bias)
# Gather if needed.
if parallel_output:
return logits_parallel
return mpu.gather_from_model_parallel_region(logits_parallel)
| 29,401 | 35.031863 | 159 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/layers/gmlp.py
|
# Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from .fused_softmax import FusedScaleMaskSoftmax
from .activations import get_activation
from .norms import get_norm
from .utils import get_fusion_type
from fengshen.models.gpt_neox import mpu
class TinyAttention(nn.Module):
def __init__(self, config, d_attn, d_ff, mask_fn):
super().__init__()
self.proj_qkv = nn.Linear(d_ff * 2, 3 * d_attn)
self.scale = d_attn**-0.5
self.proj_ffn = nn.Linear(d_attn, d_ff)
self.softmax = FusedScaleMaskSoftmax(
input_in_fp16=config.torch_dtype == "float16",
input_in_bf16=config.torch_dtype == "bfloat16",
fusion_type=get_fusion_type(config),
mask_func=mask_fn,
softmax_in_fp32=config.attention_softmax_in_fp32,
scale=None,
)
def forward(self, x, attention_mask):
q, k, v = torch.chunk(self.proj_qkv(x), 3, dim=-1)
w = torch.einsum("bnd,bmd->bnm", q, k).unsqueeze(1) * self.scale
a = self.softmax(
w, mask=attention_mask[..., : w.size(-2), : w.size(-1)]
).squeeze(1)
x = torch.einsum("bnm,bmd->bnd", a, v)
return self.proj_ffn(x)
class SpatialGatingUnit(nn.Module):
def __init__(self, config, d_ff, d_attn=None, causal=True, mask_fn=None):
super().__init__()
self.causal = causal
self.use_attn = d_attn is not None
norm, eps = get_norm(config)
self.norm = norm(d_ff, eps=eps)
self.proj = nn.Linear(config.max_position_embeddings, config.max_position_embeddings)
if self.use_attn:
assert mask_fn is not None
self.attn = TinyAttention(
config, d_attn=d_attn, d_ff=d_ff, mask_fn=mask_fn
)
nn.init.zeros_(self.proj.weight)
nn.init.constant_(self.proj.bias, 1.0)
def forward(self, x, attention_mask):
device, n = x.device, x.shape[1]
x = x.transpose(0, 1) # [s, b, d] -> [b, s, d]
res, gate = x.chunk(2, dim=-1) # split along dim
gate = self.norm(gate)
weight, bias = self.proj.weight, self.proj.bias
if self.causal:
weight, bias = weight[:n, :n], bias[:n]
mask = torch.ones(weight.shape[:2], device=device).triu_(1).bool()
weight = weight.masked_fill(mask, 0.0)
gate = F.linear(gate.transpose(2, 1), weight, self.proj.bias).transpose(2, 1)
if self.use_attn:
gate = gate + self.attn(x, attention_mask)
return (gate * res).transpose(0, 1) # [b, s, d] -> [s, b, d]
class GMLPBlock(nn.Module):
def __init__(
self,
config,
init_method,
output_layer_init_method,
layer_number,
ff_mult=4,
mask_fn=None,
):
super().__init__()
self.layer_number = layer_number
ff_dim = config.intermediate_size
norm, eps = get_norm(config)
self.norm = norm(config.hidden_size, eps=eps)
self.input_linear = mpu.ColumnParallelLinear(
config=config,
input_size=config.hidden_size,
output_size=ff_dim * 2,
gather_output=False,
init_method=init_method,
skip_bias_add=True,
)
self.activation_func = get_activation(config)
ff_dim_parallel = mpu.divide(ff_dim, mpu.get_model_parallel_world_size())
if config.attention_config[layer_number] == "amlp":
d_attn = config.gmlp_attn_dim
else:
d_attn = None
self.sgu = SpatialGatingUnit(
config, ff_dim_parallel, d_attn, causal=True, mask_fn=mask_fn
)
self.output_linear = mpu.RowParallelLinear(
config=config,
input_size=ff_dim,
output_size=config.hidden_size,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
)
def forward(self, args):
assert len(args) == 2, "GMLPBlock expects 2 arguments"
x, attention_mask = args
x = self.norm(x)
x, _ = self.input_linear(x)
x = self.activation_func(x)
x = self.sgu(x, attention_mask)
x, _ = self.output_linear(x)
return x, attention_mask
| 4,997 | 34.197183 | 106 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/layers/fused_softmax.py
|
# Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import enum
from ..fused_kernels import load_fused_kernels
class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply upper triangular mask (typically used in gpt models).
3. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs, scale):
import scaled_upper_triang_masked_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_upper_triang_masked_softmax_cuda.forward(
inputs, scale_t[0]
)
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_upper_triang_masked_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_upper_triang_masked_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None
class ScaledMaskedSoftmax(torch.autograd.Function):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply the mask.
3. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs, mask, scale):
import scaled_masked_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_masked_softmax_cuda.forward(inputs, mask, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_masked_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_masked_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None, None
class SoftmaxFusionTypes(enum.Enum):
upper_triang = 1 # causal mask
general = 2 # general mask
none = 3 # no fusion
class FusedScaleMaskSoftmax(nn.Module):
"""
fused operation: scaling + mask + softmax
Arguments:
input_in_fp16: flag to indicate if input in fp16 data format.
input_in_bf16: flag to indicate if input in bf16 data format.
fusion_type: type of fusion to perform, should be either upper_triang, general or none. None will perform a regular torch softmax.
mask_func: mask function to be applied.
softmax_in_fp32: if true, softmax in performed at fp32 precision.
scale: scaling factor used in input tensor scaling.
"""
def __init__(
self,
input_in_fp16,
input_in_bf16,
fusion_type,
mask_func,
softmax_in_fp32,
scale,
):
super().__init__()
self.input_in_fp16 = input_in_fp16
self.input_in_bf16 = input_in_bf16
self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16
assert fusion_type in [
SoftmaxFusionTypes.upper_triang,
SoftmaxFusionTypes.general,
SoftmaxFusionTypes.none,
], f"Invalid fusion type {fusion_type}"
if fusion_type != SoftmaxFusionTypes.none:
load_fused_kernels() # check fused kernels are installed
self.upper_triang_mask_fusion = fusion_type == SoftmaxFusionTypes.upper_triang
self.general_mask_fusion = fusion_type == SoftmaxFusionTypes.general
self.fusion = fusion_type != SoftmaxFusionTypes.none
self.mask_func = mask_func
self.softmax_in_fp32 = softmax_in_fp32
self.scale = scale
assert (
self.scale is None or softmax_in_fp32
), "softmax should be in fp32 when scaled"
def forward(self, input, mask):
# [b, np, sq, sk]
assert input.dim() == 4
if self.is_kernel_available(mask, *input.size()):
return self.forward_fused_softmax(input, mask)
else:
return self.forward_torch_softmax(input, mask)
def is_kernel_available(self, mask, b, np, sq, sk):
attn_batches = b * np
if (
self.fusion # user wants to fuse
and self.input_in_float16 # input must be fp16
and mask is not None # mask tensor must not be None
and 16 < sk <= 2048 # sk must be 16 ~ 2048
and sq % 4 == 0 # sq must be divisor of 4
and attn_batches % 4 == 0 # np * b must be divisor of 4
):
if 0 <= sk <= 2048:
batch_per_block = self.get_batch_per_block(sq, sk, b, np)
if self.upper_triang_mask_fusion:
if attn_batches % batch_per_block == 0:
return True
else:
if sq % batch_per_block == 0:
return True
return False
def forward_fused_softmax(self, input, mask):
b, np, sq, sk = input.size()
scale = self.scale if self.scale is not None else 1.0
if self.upper_triang_mask_fusion:
assert sq == sk, "causal mask is only for self attention"
# input is 3D tensor (attn_batches, sq, sk)
input = input.view(-1, sq, sk)
probs = ScaledUpperTriangMaskedSoftmax.apply(input, scale)
return probs.view(b, np, sq, sk)
else:
# input is 4D tensor (b, np, sq, sk)
return ScaledMaskedSoftmax.apply(input, mask, scale)
def forward_torch_softmax(self, input, mask):
if self.input_in_float16 and self.softmax_in_fp32:
input = input.float()
if self.scale is not None:
input = input * self.scale
mask_output = self.mask_func(input, mask) if mask is not None else input
probs = torch.nn.Softmax(dim=-1)(mask_output)
if self.input_in_float16 and self.softmax_in_fp32:
if self.input_in_fp16:
probs = probs.half()
else:
probs = probs.bfloat16()
return probs
@staticmethod
def get_batch_per_block(sq, sk, b, np):
import scaled_masked_softmax_cuda
return scaled_masked_softmax_cuda.get_batch_per_block(sq, sk, b, np)
| 6,992 | 32.946602 | 138 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/layers/activations.py
|
# Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
def get_activation(config):
"""retrieves the activation function specified in config"""
if config.hidden_act == "geglu":
activation_func = GEGLU(config=config)
elif config.hidden_act == "gelu":
if config.bias_gelu_fusion:
activation_func = bias_gelu_impl
else:
activation_func = F.gelu
elif config.hidden_act == "relu":
activation_func = F.relu
elif config.hidden_act == "softsign":
activation_func = F.softsign
elif config.hidden_act == "swish":
activation_func = swish
elif config.hidden_act == "mish":
activation_func = mish
elif config.hidden_act == "silu":
activation_func = F.silu
else:
raise ValueError(f"Activation function {config.activation} not recognized")
return activation_func
###### BIAS GELU FUSION/ NO AUTOGRAD ################
# 1/sqrt(2*pi)-> 0.3989423
# 1/sqrt(2) -> 0.70710678
# sqrt(2/pi) -> 0.79788456
# this function is tanh approximation of gelu
# actual gelu is:
# x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
@torch.jit.script
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
# gradient of tanh approximation of gelu
# gradient of actual gelu is:
# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
@torch.jit.script
def bias_gelu_back(g, bias, y):
x = bias + y
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
ff = 0.5 * x * (
(1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)
) + 0.5 * (1 + tanh_out)
return ff * g
class GeLUFunction(torch.autograd.Function):
@staticmethod
# bias is an optional argument
def forward(ctx, input, bias):
ctx.save_for_backward(input, bias)
return bias_gelu(bias, input)
@staticmethod
def backward(ctx, grad_output):
input, bias = ctx.saved_tensors
tmp = bias_gelu_back(grad_output, bias, input)
return tmp, tmp
bias_gelu_impl = GeLUFunction.apply
# This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter
@torch.jit.script
def erf_gelu(x):
return (
x
* 0.5
* (
torch.erf(x / 1.41421).to(dtype=x.dtype)
+ torch.ones_like(x).to(dtype=x.dtype)
)
)
@torch.jit.script
def swish(x, beta: float = 1.0):
return x * torch.sigmoid(beta * x)
@torch.jit.script
def mish(x):
return x * torch.tanh(F.softplus(x))
class GEGLU(torch.nn.Module):
def __init__(self, config):
super(GEGLU, self).__init__()
self.activation_func = F.gelu
def forward(self, x, bias=None):
x, gate = x.chunk(2, dim=-1)
if bias is not None:
bias_1, bias_2 = bias.chunk(2, dim=-1)
x = x + bias_1
gate = gate + bias_2
intermediate_parallel = self.activation_func(gate)
return intermediate_parallel * x
| 4,034 | 29.338346 | 106 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/layers/word_embeddings.py
|
# Copyright (c) 2021, EleutherAI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import math
from .. import mpu
from .positional_embeddings import SinusoidalPositionalEmbedding
from .utils import exists
class Embedding(torch.nn.Module):
"""Language model embeddings.
Arguments:
hidden_size: hidden size
vocab_size: vocabulary size
max_sequence_length: maximum size of sequence. This
is used for positional embedding
embedding_dropout_prob: dropout probability for embeddings
init_method: weight initialization method
num_tokentypes: size of the token-type embeddings. 0 value
will ignore this embedding
"""
def __init__(
self,
config,
hidden_size,
vocab_size,
max_sequence_length,
embedding_dropout_prob,
init_method,
num_tokentypes=0,
use_pos_emb=True,
):
super(Embedding, self).__init__()
self.hidden_size = hidden_size
self.init_method = init_method
self.num_tokentypes = num_tokentypes
# Word embeddings (parallel).
self.word_embeddings = mpu.VocabParallelEmbedding(
config=config,
num_embeddings=vocab_size,
embedding_dim=self.hidden_size,
init_method=self.init_method,
)
self._word_embeddings_key = "word_embeddings"
self.embedding_module = torch.nn.Embedding
# Position embedding (serial).
self.use_pos_emb = use_pos_emb
if self.use_pos_emb:
self.embedding_type = config.pos_emb
if self.embedding_type == "learned":
self.position_embeddings = self.embedding_module(
max_sequence_length, self.hidden_size
)
self._position_embeddings_key = "position_embeddings"
# Initialize the position embeddings.
self.init_method(self.position_embeddings.weight)
elif self.embedding_type == "sinusoidal":
self.position_embeddings = SinusoidalPositionalEmbedding(
self.hidden_size
)
# Token type embedding.
# Add this as an optional field that can be added through
# method call so we can load a pretrain model without
# token types and add them as needed.
self._tokentype_embeddings_key = "tokentype_embeddings"
if self.num_tokentypes > 0:
self.tokentype_embeddings = self.embedding_module(
self.num_tokentypes, self.hidden_size
)
# Initialize the token-type embeddings.
self.init_method(self.tokentype_embeddings.weight)
else:
self.tokentype_embeddings = None
# Embeddings dropout
self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)
# For ticking position ids forward
self.layer_past = None
def add_tokentype_embeddings(self, num_tokentypes):
"""Add token-type embedding. This function is provided so we can add
token-type embeddings in case the pretrained model does not have it.
This allows us to load the model normally and then add this embedding.
"""
if self.tokentype_embeddings is not None:
raise Exception("tokentype embeddings is already initialized")
if torch.distributed.get_rank() == 0:
print(
"adding embedding for {} tokentypes".format(num_tokentypes), flush=True
)
self.num_tokentypes = num_tokentypes
self.tokentype_embeddings = self.embedding_module(
num_tokentypes, self.hidden_size
)
# Initialize the token-type embeddings.
self.init_method(self.tokentype_embeddings.weight)
def forward(self, input_ids, position_ids, tokentype_ids=None):
# Embeddings.
words_embeddings = self.word_embeddings(input_ids)
if self.use_pos_emb and self.embedding_type in ["learned", "sinusoidal"]:
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
else:
embeddings = words_embeddings
if tokentype_ids is not None:
assert self.tokentype_embeddings is not None
embeddings = embeddings + self.tokentype_embeddings(tokentype_ids)
else:
assert self.tokentype_embeddings is None
# Dropout.
embeddings = self.embedding_dropout(embeddings)
return embeddings
class EmbeddingPipe(Embedding):
"""Extends Embedding to forward attention_mask through the pipeline."""
@property
def word_embeddings_weight(self):
"""Easy accessory for the pipeline engine to tie embeddings across stages."""
return self.word_embeddings.weight
def forward(self, args):
assert (
len(args) == 3
), f"Expected 3 arguments (input_ids, position_ids, attention_mask), but got {len(args)}."
input_ids = args[0]
position_ids = args[1]
attention_mask = args[2]
embeddings = super().forward(input_ids, position_ids)
return embeddings, attention_mask
class SoftEmbedding(torch.nn.Module):
def __init__(
self,
config,
wte,
n_tokens: int = 10,
init_range: float = 0.5,
init_string: str = "",
tokenizer=None,
):
super(SoftEmbedding, self).__init__()
self.n_tokens = n_tokens
self.config = config
self.init_range = init_range
self.init_string = init_string
self.soft_embedding_weight = torch.nn.parameter.Parameter(
self.initialize_embedding(wte)
)
self.tokenizer = tokenizer
def initialize_embedding(self):
if self.init_string:
embeds = torch.LongTensor(
self.tokenizer(self.init_string)
).to(self.embedding_module.weight.device)
embeds = self.embedding_module(embeds)
if embeds.shape[0] >= self.n_tokens:
embeds = embeds[: self.n_tokens, :] # slice
else:
embeds = embeds.repeat(math.ceil(self.n_tokens / embeds.shape[0]), 1)[
: self.n_tokens, :
] # pad up to n_tokens
return embeds
return torch.Tensor(self.n_tokens, self.config.hidden_size).uniform_(
-self.random_range, self.random_range
)
def forward(self, args: tuple):
in_inference = len(args) == 3 # embeddings, layer_past, attention_mask
in_train = len(args) == 2 # embeddings, attention_mask
if in_train:
embedding, attention_mask = args
else:
embedding, layer_past, attention_mask = args
soft_embedding = self.soft_embedding_weight.repeat(
embedding.shape[0], 1, 1
) # repeat batch_size times
if in_train:
# append soft embedding at the beginning in training
embedding = torch.cat((soft_embedding, embedding), dim=1)
embedding = embedding[:, : self.config.max_position_embeddings, ...]
return embedding, attention_mask
else:
if not (exists(layer_past) and layer_past.numel() > 0):
# if in inference, on the first forward pass, we want to do the same as in training (append soft embedding)
embedding = torch.cat((soft_embedding, embedding), dim=1)
embedding = embedding[:, : self.config.max_position_embeddings, ...]
# otherwise, we're in incremental mode, and just want to forward the single embedding (since the soft prompt has already been cached)
return embedding, layer_past, attention_mask
| 8,382 | 37.810185 | 145 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/mpu/mappings.py
|
# Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .initialize import (
get_model_parallel_group,
get_model_parallel_world_size,
get_model_parallel_rank,
get_fp32_allreduce,
)
from .utils import split_tensor_along_last_dim
def _reduce(input_):
"""All-reduce the the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if get_model_parallel_world_size() == 1:
return input_
# Bf16 convert
dt = input_.dtype
if dt == torch.bfloat16 and get_fp32_allreduce():
input_ = input_.float()
# All-reduce.
torch.distributed.all_reduce(input_, group=get_model_parallel_group())
# Bf16 convert
if dt == torch.bfloat16 and get_fp32_allreduce():
input_ = input_.bfloat16()
return input_
def _split(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
world_size = get_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Bf16 convert
dt = input_.dtype
if dt == torch.bfloat16 and get_fp32_allreduce():
input_ = input_.float()
# Split along last dimension.
input_list = split_tensor_along_last_dim(input_, world_size)
# Note: torch.split does not create contiguous tensors by default.
rank = get_model_parallel_rank()
output = input_list[rank].contiguous()
# Bf16 convert
if dt == torch.bfloat16 and get_fp32_allreduce():
output = output.bfloat16()
return output
def _gather(input_):
"""Gather tensors and concatinate along the last dimension."""
world_size = get_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Bf16 convert
dt = input_.dtype
if dt == torch.bfloat16 and get_fp32_allreduce():
input_ = input_.float()
# Size and dimension.
last_dim = input_.dim() - 1
rank = get_model_parallel_rank()
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
torch.distributed.all_gather(tensor_list, input_, group=get_model_parallel_group())
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=last_dim).contiguous()
# Bf16 convert
if dt == torch.bfloat16 and get_fp32_allreduce():
output = output.bfloat16()
return output
class _CopyToModelParallelRegion(torch.autograd.Function):
"""Pass the input to the model parallel region."""
@staticmethod
def symbolic(graph, input_):
return input_
@staticmethod
def forward(ctx, input_):
return input_
@staticmethod
def backward(ctx, grad_output):
return _reduce(grad_output)
class _ReduceFromModelParallelRegion(torch.autograd.Function):
"""All-reduce the input from the model parallel region."""
@staticmethod
def symbolic(graph, input_):
return _reduce(input_)
@staticmethod
def forward(ctx, input_):
return _reduce(input_)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class _ScatterToModelParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
@staticmethod
def symbolic(graph, input_):
return _split(input_)
@staticmethod
def forward(ctx, input_):
return _split(input_)
@staticmethod
def backward(ctx, grad_output):
return _gather(grad_output)
class _GatherFromModelParallelRegion(torch.autograd.Function):
"""Gather the input from model parallel region and concatinate."""
@staticmethod
def symbolic(graph, input_):
return _gather(input_)
@staticmethod
def forward(ctx, input_):
return _gather(input_)
@staticmethod
def backward(ctx, grad_output):
return _split(grad_output)
# -----------------
# Helper functions.
# -----------------
def copy_to_model_parallel_region(input_):
return _CopyToModelParallelRegion.apply(input_)
def reduce_from_model_parallel_region(input_):
return _ReduceFromModelParallelRegion.apply(input_)
def scatter_to_model_parallel_region(input_):
return _ScatterToModelParallelRegion.apply(input_)
def gather_from_model_parallel_region(input_):
return _GatherFromModelParallelRegion.apply(input_)
| 5,182 | 25.854922 | 106 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/mpu/initialize.py
|
# Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model and data parallel groups."""
import torch
from .utils import ensure_divisibility
# Model parallel group that the current rank belongs to.
_MODEL_PARALLEL_GROUP = None
# Data parallel group that the current rank belongs to.
_DATA_PARALLEL_GROUP = None
# Pipeline parallel group that the current rank belongs to.
_PIPE_PARALLEL_GROUP = None
# A group used to sync during the IO process. Usually this is data_parallel_group(),
# but with pipeline parallelism it must also involve the last stage (which is not in the
# DP group of rank 0)
_IO_PARALLEL_GROUP = None
# These values enable us to change the mpu sizes on the fly.
_MPU_WORLD_SIZE = None
_MPU_RANK = None
# Used to query 3D topology
_MPU_TOPOLOGY = None
# Get fp32_allreduce flag
_FP32_ALLREDUCE = None
_INIT_PARAMS_IN_CUDA=True
def set_init_params_in_cuda(status):
global _INIT_PARAMS_IN_CUDA
_INIT_PARAMS_IN_CUDA = status
def get_init_params_in_cuda():
return _INIT_PARAMS_IN_CUDA
def is_unitialized():
"""Useful for code segments that may be accessed with or without mpu initialization"""
return _DATA_PARALLEL_GROUP is None
def initialize_model_parallel(model_parallel_size, topology=None, fp32_allreduce=False):
"""
Initialize model data parallel groups.
Arguments:
model_parallel_size: number of GPUs used to parallelize model.
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model. The present function will
create 4 model parallel groups and 2 data parallel groups as:
4 model parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 data parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
if torch.distributed.get_rank() == 0:
print("> initializing model parallel with size {}".format(model_parallel_size))
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
world_size = torch.distributed.get_world_size()
if world_size < model_parallel_size:
raise ValueError("world size cannot be smaller than model parallel size")
ensure_divisibility(world_size, model_parallel_size)
rank = torch.distributed.get_rank()
global _MPU_TOPOLOGY
if topology:
_MPU_TOPOLOGY = topology
# Build the data parallel groups.
global _DATA_PARALLEL_GROUP
assert _DATA_PARALLEL_GROUP is None, "data parallel group is already initialized"
if topology:
for dp_group in topology.get_axis_comm_lists("data"):
group = torch.distributed.new_group(ranks=dp_group)
if rank == 0:
print(f"MPU DP:", dp_group)
if rank in dp_group:
_DATA_PARALLEL_GROUP = group
else:
for i in range(model_parallel_size):
ranks = range(i, world_size, model_parallel_size)
group = torch.distributed.new_group(ranks)
if i == (rank % model_parallel_size):
_DATA_PARALLEL_GROUP = group
# Build pipeline parallel group
if topology is not None:
global _PIPE_PARALLEL_GROUP
for pp_group in topology.get_axis_comm_lists("pipe"):
group = torch.distributed.new_group(ranks=pp_group)
if rank == 0:
print(f"MPU PP:", pp_group)
if rank in pp_group:
_PIPE_PARALLEL_GROUP = group
# Build IO group
global _IO_PARALLEL_GROUP
if topology and topology.get_dim("pipe") > 1:
io_stages = [0, topology.get_dim("pipe") - 1]
io_group = []
for stage in io_stages:
io_group.extend(topology.filter_match(pipe=stage, model=0))
if rank == 0:
print(f"MPU IO:", io_group)
group = torch.distributed.new_group(ranks=io_group)
if rank in io_group:
_IO_PARALLEL_GROUP = group
else:
_IO_PARALLEL_GROUP = get_data_parallel_group()
# Build the model parallel groups.
global _MODEL_PARALLEL_GROUP
assert _MODEL_PARALLEL_GROUP is None, "model parallel group is already initialized"
if topology:
# Short circuit case without model parallelism.
# TODO: it would be nice to avoid this branching case?
if model_parallel_size == 1:
for group_rank in range(world_size):
group = torch.distributed.new_group(ranks=[group_rank])
if rank == 0:
print(f"MPU MP:", [group_rank])
if rank == group_rank:
_MODEL_PARALLEL_GROUP = group
return
for mp_group in topology.get_axis_comm_lists("model"):
group = torch.distributed.new_group(ranks=mp_group)
if rank == 0:
print(f"MPU MP:", mp_group)
if rank in mp_group:
_MODEL_PARALLEL_GROUP = group
else:
for i in range(world_size // model_parallel_size):
ranks = range(i * model_parallel_size, (i + 1) * model_parallel_size)
group = torch.distributed.new_group(ranks)
if i == (rank // model_parallel_size):
_MODEL_PARALLEL_GROUP = group
global _FP32_ALLREDUCE
assert _FP32_ALLREDUCE is None, "fp32_allreduce is already initialized"
_FP32_ALLREDUCE = fp32_allreduce
def model_parallel_is_initialized():
"""Check if model and data parallel groups are initialized."""
if _MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None:
return False
return True
def get_model_parallel_group():
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, "model parallel group is not initialized"
return _MODEL_PARALLEL_GROUP
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
assert _DATA_PARALLEL_GROUP is not None, "data parallel group is not initialized"
return _DATA_PARALLEL_GROUP
def get_io_parallel_group():
"""Get the IO parallel group the caller rank belongs to."""
assert _IO_PARALLEL_GROUP is not None, "IO parallel group is not initialized"
return _IO_PARALLEL_GROUP
def set_model_parallel_world_size(world_size):
"""Set the model parallel size"""
global _MPU_WORLD_SIZE
_MPU_WORLD_SIZE = world_size
def get_model_parallel_world_size():
"""Return world size for the model parallel group."""
global _MPU_WORLD_SIZE
if _MPU_WORLD_SIZE is not None:
return _MPU_WORLD_SIZE
return torch.distributed.get_world_size(group=get_model_parallel_group())
def set_model_parallel_rank(rank):
"""Set model parallel rank."""
global _MPU_RANK
_MPU_RANK = rank
def get_model_parallel_rank():
"""Return my rank for the model parallel group."""
global _MPU_RANK
if _MPU_RANK is not None:
return _MPU_RANK
return torch.distributed.get_rank(group=get_model_parallel_group())
def get_model_parallel_src_rank():
"""Calculate the global rank corresponding to a local rank zero
in the model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
def get_data_parallel_src_rank():
"""Calculate the global rank corresponding to a local rank zero
in the data parallel group."""
global_rank = torch.distributed.get_rank()
topo = get_topology()
if topo is None:
# we are just using model parallel
return global_rank % get_model_parallel_world_size()
else:
# We are using pipeline parallel
d = topo.get_axis_comm_lists("data")
for l in d:
if global_rank in l:
return l[0]
def get_data_parallel_world_size():
"""Return world size for the data parallel group."""
return torch.distributed.get_world_size(group=get_data_parallel_group())
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return torch.distributed.get_rank(group=get_data_parallel_group())
def get_topology():
return _MPU_TOPOLOGY
def get_pipe_parallel_group():
"""Get the pipe parallel group the caller rank belongs to."""
assert _PIPE_PARALLEL_GROUP is not None, "data parallel group is not initialized"
return _PIPE_PARALLEL_GROUP
def get_pipe_parallel_rank():
"""Return my rank for the pipe parallel group."""
return torch.distributed.get_rank(group=get_pipe_parallel_group())
def get_pipe_parallel_world_size():
"""Return world size for the pipe parallel group."""
return torch.distributed.get_world_size(group=get_pipe_parallel_group())
def destroy_model_parallel():
"""Set the groups to none."""
global _MODEL_PARALLEL_GROUP
_MODEL_PARALLEL_GROUP = None
global _DATA_PARALLEL_GROUP
_DATA_PARALLEL_GROUP = None
global _PIPE_PARALLEL_GROUP
_PIPE_PARALLEL_GROUP = None
global _IO_PARALLEL_GROUP
_IO_PARALLEL_GROUP = None
global _MPU_WORLD_SIZE
global _MPU_RANK
_MPU_WORLD_SIZE = None
_MPU_RANK = None
global _MPU_TOPOLOGY
_MPU_TOPOLOGY = None
global _FP32_ALLREDUCE
_FP32_ALLREDUCE = None
def get_fp32_allreduce():
"""Get the fp32 allreduce flag"""
assert _FP32_ALLREDUCE is not None, "fp32_allreduce is not Initialized"
return _FP32_ALLREDUCE
| 10,377 | 33.478405 | 106 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/mpu/cross_entropy.py
|
# Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .initialize import get_model_parallel_group
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .utils import VocabUtility
class _VocabParallelCrossEntropy(torch.autograd.Function):
@staticmethod
def forward(ctx, vocab_parallel_logits, target):
# Maximum value along vocab dimension across all GPUs.
logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
torch.distributed.all_reduce(
logits_max,
op=torch.distributed.ReduceOp.MAX,
group=get_model_parallel_group(),
)
# Subtract the maximum value.
vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1))
# Get the partition's vocab indices
get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
partition_vocab_size = vocab_parallel_logits.size()[-1]
rank = get_model_parallel_rank()
world_size = get_model_parallel_world_size()
vocab_start_index, vocab_end_index = get_vocab_range(
partition_vocab_size, rank, world_size
)
# Create a mask of valid vocab ids (1 means it needs to be masked).
target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
masked_target = target.clone() - vocab_start_index
masked_target[target_mask] = 0
# Get predicted-logits = logits[target].
# For Simplicity, we convert logits to a 2-D tensor with size
# [*, partition-vocab-size] and target to a 1-D tensor of size [*].
logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size)
masked_target_1d = masked_target.view(-1)
arange_1d = torch.arange(
start=0, end=logits_2d.size()[0], device=logits_2d.device
)
predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
predicted_logits_1d = predicted_logits_1d.clone().contiguous()
predicted_logits = predicted_logits_1d.view_as(target)
predicted_logits[target_mask] = 0.0
# All reduce is needed to get the chunks from other GPUs.
torch.distributed.all_reduce(
predicted_logits,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group(),
)
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_logits = vocab_parallel_logits
torch.exp(vocab_parallel_logits, out=exp_logits)
sum_exp_logits = exp_logits.sum(dim=-1)
torch.distributed.all_reduce(
sum_exp_logits,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group(),
)
# Loss = log(sum(exp(logits))) - predicted-logit.
loss = torch.log(sum_exp_logits) - predicted_logits
# Store softmax, target-mask and masked-target for backward pass.
exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
return loss
@staticmethod
def backward(ctx, grad_output):
# Retrieve tensors from the forward path.
softmax, target_mask, masked_target_1d = ctx.saved_tensors
# All the inputs have softmax as their gradient.
grad_input = softmax
# For simplicity, work with the 2D gradient.
partition_vocab_size = softmax.size()[-1]
grad_2d = grad_input.view(-1, partition_vocab_size)
# Add the gradient from matching classes.
arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device)
grad_2d[arange_1d, masked_target_1d] -= 1.0 - target_mask.view(-1).float()
# Finally elementwise multiplication with the output gradients.
grad_input.mul_(grad_output.unsqueeze(dim=-1))
return grad_input, None
def vocab_parallel_cross_entropy(vocab_parallel_logits, target):
"""Helper function for the cross entropy."""
return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target)
| 4,799 | 39.677966 | 106 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/mpu/utils.py
|
# Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, "{} is not divisible by {}".format(
numerator, denominator
)
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False):
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
last_dim_size = divide(tensor.size()[last_dim], num_partitions)
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
class VocabUtility:
"""Split the vocabulary into `world_size` chunks amd return the
first and last index of the vocabulary belonging to the `rank`
partition: Note that indices in [first, last]"""
@staticmethod
def vocab_range_from_per_partition_vocab_size(
per_partition_vocab_size, rank, world_size
):
index_f = rank * per_partition_vocab_size
index_l = index_f + per_partition_vocab_size
return index_f, index_l
@staticmethod
def vocab_range_from_global_vocab_size(global_vocab_size, rank, world_size):
per_partition_vocab_size = divide(global_vocab_size, world_size)
return VocabUtility.vocab_range_from_per_partition_vocab_size(
per_partition_vocab_size, rank, world_size
)
def params_dtype(precision):
"""
returns the datatype on the basis of configured precision
"""
if precision == "float16":
return torch.half
elif precision == "bfloat16":
return torch.bfloat16
else:
return torch.float
| 3,041 | 34.788235 | 106 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/mpu/data.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .initialize import get_model_parallel_group
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_src_rank
_MAX_DATA_DIM = 4
def _check_data_types(keys, data, target_dtype):
"""Check that all the keys have the same target data type."""
for key in keys:
assert (
data[key].dtype == target_dtype
), "{} has data type {} which " "is different than {}".format(
key, data[key].dtype, target_dtype
)
def _build_key_size_numel_dictionaries(keys, data):
"""Build the size on rank 0 and broadcast."""
max_dim = _MAX_DATA_DIM
sizes = [0 for _ in range(max_dim) for _ in keys]
# Pack the sizes on rank zero.
if get_model_parallel_rank() == 0:
offset = 0
for key in keys:
assert data[key].dim() < max_dim, "you should increase MAX_DATA_DIM"
size = data[key].size()
for i, s in enumerate(size):
sizes[i + offset] = s
offset += max_dim
# Move to GPU and broadcast.
sizes_cuda = torch.cuda.LongTensor(sizes)
torch.distributed.broadcast(
sizes_cuda, get_model_parallel_src_rank(), group=get_model_parallel_group()
)
# Move back to cpu and unpack.
sizes_cpu = sizes_cuda.cpu()
key_size = {}
key_numel = {}
total_numel = 0
offset = 0
for key in keys:
i = 0
size = []
numel = 1
while sizes_cpu[offset + i] > 0:
this_size = sizes_cpu[offset + i]
size.append(this_size)
numel *= this_size
i += 1
key_size[key] = size
key_numel[key] = numel
total_numel += numel
offset += max_dim
return key_size, key_numel, total_numel
def broadcast_data(keys, data, datatype):
"""Broadcast data from rank zero of each model parallel group to the
members of the same model parallel group.
Arguments:
keys: list of keys in the data dictionary to be broadcasted
data: data dictionary of string keys and cpu tensor values.
datatype: torch data type of all tensors in data associated
with keys.
"""
# Build (key, size) and (key, number of elements) dictionaries along
# with the total number of elements on all ranks.
key_size, key_numel, total_numel = _build_key_size_numel_dictionaries(keys, data)
# Pack on rank zero.
if get_model_parallel_rank() == 0:
# Check that all keys have the same data type.
_check_data_types(keys, data, datatype)
# Flatten the data associated with the keys
flatten_data = torch.cat(
[data[key].contiguous().view(-1) for key in keys], dim=0
).cuda()
else:
flatten_data = torch.empty(
total_numel, device=torch.cuda.current_device(), dtype=datatype
)
# Broadcast
torch.distributed.broadcast(
flatten_data, get_model_parallel_src_rank(), group=get_model_parallel_group()
)
# Unpack
output = {}
offset = 0
for key in keys:
size = key_size[key]
numel = key_numel[key]
output[key] = flatten_data.narrow(0, offset, numel).view(size)
offset += numel
return output
| 3,886 | 31.123967 | 85 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/mpu/layers.py
|
# Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import math
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_init_params_in_cuda
from .mappings import copy_to_model_parallel_region
from .mappings import gather_from_model_parallel_region
from .mappings import reduce_from_model_parallel_region
from .mappings import scatter_to_model_parallel_region
from .random import get_cuda_rng_tracker
from .utils import divide
from .utils import VocabUtility
def _initialize_affine_weight_gpu(weight, init_method, partition_dim, stride=1):
"""Initialize affine weight for model parallel on GPU."""
weight.model_parallel = True
weight.partition_dim = partition_dim
weight.partition_stride = stride
try:
with get_cuda_rng_tracker().fork():
init_method(weight)
except:
init_method(weight)
class VocabParallelEmbedding(torch.nn.Module):
"""Embedding parallelized in the vocabulary dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(
self, config, num_embeddings, embedding_dim, init_method=init.xavier_normal_
):
super(VocabParallelEmbedding, self).__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
# Set the detauls for compatibility.
self.padding_idx = None
self.max_norm = None
self.norm_type = 2.0
self.scale_grad_by_freq = False
self.sparse = False
self._weight = None
self.model_parallel_size = get_model_parallel_world_size()
# Divide the weight matrix along the vocabulary dimension.
(
self.vocab_start_index,
self.vocab_end_index,
) = VocabUtility.vocab_range_from_global_vocab_size(
self.num_embeddings, get_model_parallel_rank(), self.model_parallel_size
)
self.num_embeddings_per_partition = (
self.vocab_end_index - self.vocab_start_index
)
self.weight = Parameter(
torch.empty(
self.num_embeddings_per_partition,
self.embedding_dim,
device=torch.cuda.current_device() if get_init_params_in_cuda() else "cpu",
dtype=config.torch_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=0, stride=1
)
def forward(self, input_):
if self.model_parallel_size > 1:
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | (
input_ >= self.vocab_end_index
)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
else:
masked_input = input_
# Get the embeddings.
output_parallel = F.embedding(
masked_input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
# Mask the output embedding.
if self.model_parallel_size > 1:
output_parallel[input_mask, :] = 0.0
# Reduce across all the model parallel GPUs.
output = reduce_from_model_parallel_region(output_parallel)
return output
class ParallelRelativePositionBias(torch.nn.Module):
"""T5 Relative Position Bias parallelized in the heads dimension
Based on https://github.com/lucidrains/x-transformers/blob/6b93c21be0d0a679da6f7b9621d9bb638ab18428/x_transformers/x_transformers.py#L106 (14.12.2021)
and adapted for megatron's model parallelism
Arguments:
scale: scaling factor for the bias
causal: flag for causal/non-causal language modelling.
num_buckets: number of rp buckets.
max_distance: max distance in sequence dim for each bucket.
heads: number of attention heads (total)
"""
def __init__(
self,
config,
scale,
causal=True,
num_buckets=32,
max_distance=128,
heads=8,
init_method=init.xavier_normal_,
):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.heads = heads
# Set the defaults for compatibility.
self.padding_idx = None
self.max_norm = None
self.norm_type = 2.0
self.scale_grad_by_freq = False
self.sparse = False
self._weight = None
self.model_parallel_size = get_model_parallel_world_size()
self.model_parallel_rank = get_model_parallel_rank()
# Divide the weight matrix along the heads dimension.
self.head_start_index, self.head_end_index = self.get_heads_range(
self.heads, self.model_parallel_rank, self.model_parallel_size
)
self.num_heads_per_partition = self.head_end_index - self.head_start_index
self.weight = Parameter(
torch.empty(
self.num_buckets,
self.num_heads_per_partition,
device=torch.cuda.current_device() if get_init_params_in_cuda() else "cpu",
dtype=config.torch_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=1, stride=1
)
self._q_len_cached = None
self._k_len_cached = None
self._rel_pos_bucket_cached = None
@staticmethod
def get_heads_range(global_n_heads, rank, world_size):
per_partition_n_heads = divide(global_n_heads, world_size)
index_f = rank * per_partition_n_heads
index_l = index_f + per_partition_n_heads
return index_f, index_l
def _relative_position_bucket(
self, relative_position, num_buckets=32, max_distance=128
):
ret = 0
n = -relative_position
if not self.causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = (
max_exact
+ (
torch.log(n.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).long()
)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1)
)
ret += torch.where(is_small, n, val_if_large)
self._rel_pos_bucket_cached = ret
return self._rel_pos_bucket_cached
def forward(self, q_len, k_len):
if self._q_len_cached != q_len or self._k_len_cached != k_len:
# cache bucket if first step seq len stays constant
self._q_len_cached, self._k_len_cached = q_len, k_len
q_pos = torch.arange(
q_len, dtype=torch.long, device=torch.cuda.current_device()
)
k_pos = torch.arange(
k_len, dtype=torch.long, device=torch.cuda.current_device()
)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(
rel_pos, num_buckets=self.num_buckets, max_distance=self.max_distance
)
else:
rp_bucket = self._rel_pos_bucket_cached
values = F.embedding(
rp_bucket,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
bias = values.movedim(2, 0).unsqueeze(0)
return bias * self.scale
class ColumnParallelLinear(torch.nn.Module):
"""Linear layer with column parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its second dimension as A = [A_1, ..., A_p].
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias
gather_output: If true, call all-gather on output and make Y available
to all GPUs, otherwise, every GPU will have its output
which is Y_i = XA_i
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimations where bias
can be fused with other elementwise operations. we skip
adding bias but instead return it.
"""
def __init__(
self,
config,
input_size,
output_size,
bias=True,
gather_output=True,
init_method=init.xavier_normal_,
stride=1,
keep_master_weight_for_test=False,
skip_bias_add=False,
):
super(ColumnParallelLinear, self).__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.gather_output = gather_output
# Divide the weight matrix along the last dimension.
world_size = get_model_parallel_world_size()
self.output_size_per_partition = divide(output_size, world_size)
self.skip_bias_add = skip_bias_add
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
# Initialize weight.
self.weight = Parameter(
torch.empty(
self.output_size_per_partition,
self.input_size,
device=torch.cuda.current_device() if get_init_params_in_cuda() else "cpu",
dtype=config.torch_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=0, stride=stride
)
if bias:
self.bias = Parameter(
torch.empty(
self.output_size_per_partition,
device=torch.cuda.current_device() if get_init_params_in_cuda() else "cpu",
dtype=config.torch_dtype,
)
)
self.bias.model_parallel = True
self.bias.partition_dim = 0
self.bias.stride = stride
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter("bias", None)
def set_parallel_output(self, value: bool):
assert isinstance(value, bool)
self.gather_output = (
not value
) # if gather_output is True, parallel output is False, so we set the opposite
def forward(self, input_):
# Set up backprop all-reduce.
input_parallel = copy_to_model_parallel_region(input_)
# Matrix multiply.
bias = self.bias if not self.skip_bias_add else None
output_parallel = F.linear(input_parallel, self.weight, bias)
if self.gather_output:
# All-gather across the partitions.
output = gather_from_model_parallel_region(output_parallel)
else:
output = output_parallel
output_bias = self.bias if self.skip_bias_add else None
return output, output_bias
class RowParallelLinear(torch.nn.Module):
"""Linear layer with row parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its first dimension and X along its second dimension as:
- -
| A_1 |
| . |
A = | . | X = [X_1, ..., X_p]
| . |
| A_p |
- -
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias. Note that bias is not parallelized.
input_is_parallel: If true, we assume that the input is already
split across the GPUs and we do not split
again.
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimations where bias
can be fused with other elementwise operations. we skip
adding bias but instead return it.
"""
def __init__(
self,
config,
input_size,
output_size,
bias=True,
input_is_parallel=False,
init_method=init.xavier_normal_,
stride=1,
keep_master_weight_for_test=False,
skip_bias_add=False,
parallel_output=False,
):
super(RowParallelLinear, self).__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.input_is_parallel = input_is_parallel
# Divide the weight matrix along the last dimension.
world_size = get_model_parallel_world_size()
self.input_size_per_partition = divide(input_size, world_size)
self.skip_bias_add = skip_bias_add
self.parallel_output = parallel_output
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
# Initialize weight.
self.weight = Parameter(
torch.empty(
self.output_size,
self.input_size_per_partition,
device=torch.cuda.current_device() if get_init_params_in_cuda() else "cpu",
dtype=config.torch_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=1, stride=stride
)
if bias:
self.bias = Parameter(
torch.empty(
self.output_size,
device=torch.cuda.current_device() if get_init_params_in_cuda() else "cpu",
dtype=config.torch_dtype,
)
)
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter("bias", None)
def set_parallel_output(self, parallel_output: bool):
assert isinstance(parallel_output, bool)
self.parallel_output = parallel_output
def forward(self, input_):
# Set up backprop all-reduce.
if self.input_is_parallel:
input_parallel = input_
else:
input_parallel = scatter_to_model_parallel_region(input_)
# Matrix multiply.
output_parallel = F.linear(input_parallel, self.weight)
# All-reduce across all the partitions.
if not self.parallel_output:
output_ = reduce_from_model_parallel_region(output_parallel)
else:
output_ = output_parallel
if not self.skip_bias_add:
output = output_ + self.bias if self.bias is not None else output_
output_bias = None
else:
output = output_
output_bias = self.bias
return output, output_bias
| 17,089 | 35.284501 | 154 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/mpu/random.py
|
# Copyright (c) 2021, EleutherAI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# mostly moving to using checkpointing from deepspeed (identical code anyway) so currently this file is only imports
# TODO: should be able to get rid of this file entirely
import deepspeed
import deepspeed.runtime.activation_checkpointing.checkpointing as checkpointing
# Default name for the model parallel rng tracker.
_MODEL_PARALLEL_RNG_TRACKER_NAME = (
deepspeed.checkpointing._MODEL_PARALLEL_RNG_TRACKER_NAME
)
# Whether apply model parallelsim to checkpointed hidden states.
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = None
# RNG tracker object.
_CUDA_RNG_STATE_TRACKER = deepspeed.checkpointing._CUDA_RNG_STATE_TRACKER
# Deepspeed checkpointing functions
# TODO: replace calls to these in our codebase with calls to the deepspeed ones
_set_cuda_rng_state = checkpointing._set_cuda_rng_state
checkpoint = checkpointing.checkpoint
model_parallel_cuda_manual_seed = checkpointing.model_parallel_cuda_manual_seed
get_cuda_rng_tracker = checkpointing.get_cuda_rng_tracker
| 1,563 | 40.157895 | 116 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/mpu/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model parallel utility interface."""
from .cross_entropy import vocab_parallel_cross_entropy
from .data import broadcast_data
from .initialize import is_unitialized
from .initialize import destroy_model_parallel
from .initialize import get_data_parallel_group
from .initialize import get_data_parallel_rank
from .initialize import get_data_parallel_world_size
from .initialize import get_model_parallel_group
from .initialize import get_model_parallel_rank, set_model_parallel_rank
from .initialize import get_model_parallel_src_rank, get_data_parallel_src_rank
from .initialize import get_model_parallel_world_size, set_model_parallel_world_size
from .initialize import get_topology
from .initialize import get_pipe_parallel_group
from .initialize import get_pipe_parallel_rank
from .initialize import get_pipe_parallel_world_size
from .initialize import get_io_parallel_group
from .initialize import initialize_model_parallel
from .initialize import model_parallel_is_initialized
from .initialize import set_init_params_in_cuda
from .layers import ColumnParallelLinear
from .layers import RowParallelLinear
from .layers import VocabParallelEmbedding
from .layers import ParallelRelativePositionBias
from .mappings import copy_to_model_parallel_region
from .mappings import gather_from_model_parallel_region
from .mappings import reduce_from_model_parallel_region
from .mappings import scatter_to_model_parallel_region
from .random import checkpoint
from .random import get_cuda_rng_tracker
from .random import model_parallel_cuda_manual_seed
from .utils import divide
from .utils import split_tensor_along_last_dim
| 2,238 | 39.709091 | 84 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/fused_kernels/setup.py
|
from setuptools import setup, find_packages
from torch.utils import cpp_extension
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
from pathlib import Path
import subprocess
def _get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output(
[cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True
)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
srcpath = Path(__file__).parent.absolute()
cc_flag = []
_, bare_metal_major, _ = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
if int(bare_metal_major) >= 11:
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
nvcc_flags = [
"-O3",
"-gencode",
"arch=compute_70,code=sm_70",
"--use_fast_math",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
]
cuda_ext_args = {"cxx": ["-O3"], "nvcc": nvcc_flags + cc_flag}
layernorm_cuda_args = {
"cxx": ["-O3"],
"nvcc": nvcc_flags + cc_flag + ["-maxrregcount=50"],
}
setup(
name="fused_kernels",
version="0.0.1",
author="Sid Black & Alejandro Molina et al.",
author_email="[email protected]",
include_package_data=False,
ext_modules=[
CUDAExtension(
"scaled_upper_triang_masked_softmax_cuda",
[
str(srcpath / "scaled_upper_triang_masked_softmax.cpp"),
str(srcpath / "scaled_upper_triang_masked_softmax_cuda.cu"),
],
extra_compile_args=cuda_ext_args,
),
CUDAExtension(
"scaled_masked_softmax_cuda",
[
str(srcpath / "scaled_masked_softmax.cpp"),
str(srcpath / "scaled_masked_softmax_cuda.cu"),
],
extra_compile_args=cuda_ext_args,
),
],
cmdclass={"build_ext": BuildExtension},
)
| 2,105 | 29.521739 | 78 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/fused_kernels/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import subprocess
from torch.utils import cpp_extension
from pathlib import Path
srcpath = Path(__file__).parent.absolute()
# Setting this param to a list has a problem of generating different
# compilation commands (with different order of architectures) and
# leading to recompilation of fused kernels. Set it to empty string
# to avoid recompilation and assign arch flags explicitly in
# extra_cuda_cflags below
os.environ["TORCH_CUDA_ARCH_LIST"] = ""
def load_fused_kernels():
try:
import scaled_upper_triang_masked_softmax_cuda
import scaled_masked_softmax_cuda
except (ImportError, ModuleNotFoundError):
print("\n")
print("=" * 100)
print(
f'ERROR: Fused kernels configured but not installed. Please run `python {str(srcpath / "setup.py")} install` to install them'
)
print("=" * 100)
exit()
return
| 1,534 | 33.111111 | 137 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/megatron/fused_kernels/tests/test_fused_kernels.py
|
import math
import torch
from torch.nn import LayerNorm
from megatron.model.fused_softmax import FusedScaleMaskSoftmax
from megatron.model.gpt2_model import gpt2_attention_mask_func
def test_load_fused_kernels():
try:
import scaled_masked_softmax_cuda
import scaled_upper_triang_masked_softmax_cuda
import torch
print("[Success] load_fused_kernels")
except ImportError as e:
print("[Fail] load_fused_kernels")
raise e
def test_fused_softmax():
bert = BertModel.from_pretrained("bert-base-cased").cuda().half()
tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
test_text = (
"Hello. How are you? I am fine thank you and you? yes Good. "
"hi hi hi hi hi hi hi hi hi hi hi hi hi" # 32
)
tokens = tokenizer(
[test_text] * 4,
return_tensors="pt",
)
embedding_output = bert.embeddings(
input_ids=tokens["input_ids"].cuda(),
position_ids=None,
token_type_ids=tokens["token_type_ids"].cuda(),
inputs_embeds=None,
past_key_values_length=0,
)
# (bsz, 1, 1, seq_len)
mask = bert.get_extended_attention_mask(
attention_mask=tokens["attention_mask"].cuda(),
input_shape=tokens["input_ids"].shape,
device=bert.device,
)
# (bsz, 1, seq_len, seq_len)
mask = mask.repeat(1, 1, mask.size()[-1], 1)
attention = bert.encoder.layer[0].attention.self
key_layer = attention.transpose_for_scores(attention.key(embedding_output))
query_layer = attention.transpose_for_scores(attention.query(embedding_output))
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores /= math.sqrt(key_layer.size()[-1])
fused_softmax = (
FusedScaleMaskSoftmax(
input_in_fp16=True,
input_in_bf16=False,
mask_func=attention_mask_func,
scale=None,
softmax_in_fp32=False,
attn_mask_type=AttnMaskType.padding,
scaled_masked_softmax_fusion=True,
)
.cuda()
.half()
)
fused_softmax_output = fused_softmax(
attention_scores,
(mask != 0),
)
torch_softmax = (
FusedScaleMaskSoftmax(
input_in_fp16=True,
input_in_bf16=False,
mask_func=attention_mask_func,
scale=None,
softmax_in_fp32=False,
attn_mask_type=AttnMaskType.padding,
scaled_masked_softmax_fusion=False,
)
.cuda()
.half()
)
torch_softmax_output = torch_softmax(
attention_scores,
(mask != 0),
)
test_result = (fused_softmax_output - torch_softmax_output).abs()
while test_result.dim() != 1:
test_result = test_result.mean(dim=-1)
diff = test_result.mean(dim=-1)
if diff <= 1e-3:
print(
f"\n[Success] test_fused_softmax"
f"\n > mean_difference={diff}"
f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}"
f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}"
)
else:
print(
f"\n[Fail] test_fused_softmax"
f"\n > mean_difference={diff}, "
f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}, "
f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}"
)
def test_fused_upper_triangle_mask_softmax():
gpt = GPT2Model.from_pretrained("gpt2").cuda().half()
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
test_text = (
"Hello. How are you? I am fine thank you and you? yes Good. "
"hi hi hi hi hi hi hi" # 24
)
tokens = tokenizer(
[test_text] * 4,
return_tensors="pt",
)
attention_mask = tokens["attention_mask"].cuda()
attention_mask = attention_mask.view(attention_mask.size(0), -1)
attention_mask = attention_mask[:, None, None, :]
attention_mask = (1.0 - attention_mask) * -10000.0
attention_mask = attention_mask.repeat(1, 1, attention_mask.size()[-1], 1)
attn = gpt.h[0]
hidden_states = gpt.wte(tokens["input_ids"].cuda())
q, k, v = attn.attn.c_attn(hidden_states).split(768, dim=-1)
q = attn.attn._split_heads(q, attn.attn.num_heads, attn.attn.head_dim)
k = attn.attn._split_heads(k, attn.attn.num_heads, attn.attn.head_dim)
attn_weights = torch.matmul(q, k.transpose(-1, -2))
sq, sk = q.size(-2), k.size(-2)
causal_mask = attn.attn.bias[:, :, sk - sq : sk, :sk].bool()
total_mask = ~(causal_mask & (attention_mask == 0))
"""
tensor([[[[False, True, True, ..., True, True, True],
[False, False, True, ..., True, True, True],
[False, False, False, ..., True, True, True],
...,
[False, False, False, ..., False, True, True],
[False, False, False, ..., False, False, True],
[False, False, False, ..., False, False, False]]]
"""
fused_softmax = (
FusedScaleMaskSoftmax(
input_in_fp16=True,
input_in_bf16=False,
mask_func=attention_mask_func,
scale=None,
softmax_in_fp32=False,
attn_mask_type=AttnMaskType.causal,
scaled_masked_softmax_fusion=True,
)
.cuda()
.half()
)
fused_softmax_output = fused_softmax(
attn_weights,
total_mask,
)
torch_softmax = (
FusedScaleMaskSoftmax(
input_in_fp16=True,
input_in_bf16=False,
mask_func=attention_mask_func,
scale=None,
softmax_in_fp32=False,
attn_mask_type=AttnMaskType.causal,
scaled_masked_softmax_fusion=False,
)
.cuda()
.half()
)
torch_softmax_output = torch_softmax(
attn_weights,
total_mask,
)
test_result = (fused_softmax_output - torch_softmax_output).abs()
while test_result.dim() != 1:
test_result = test_result.mean(dim=-1)
diff = test_result.mean(dim=-1)
if diff <= 1e-3:
print(
f"\n[Success] test_fused_upper_triangle_mask_softmax"
f"\n > mean_difference={diff}"
f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}"
f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}"
)
else:
print(
f"\n[Fail] test_fused_upper_triangle_mask_softmax"
f"\n > mean_difference={diff}, "
f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}, "
f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}"
)
def test_layer_norm():
bert = BertModel.from_pretrained("bert-base-cased").cuda().half()
tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
test_text = (
"Hello. How are you? I am fine thank you and you? yes Good. "
"hi hi hi hi hi hi hi hi hi hi hi hi hi" # 32
)
tokens = tokenizer(
[test_text] * 4,
return_tensors="pt",
)
# [bsz, seq_len, d_model]
embedding_output = (
bert.embeddings(
input_ids=tokens["input_ids"].cuda(),
position_ids=None,
token_type_ids=tokens["token_type_ids"].cuda(),
inputs_embeds=None,
past_key_values_length=0,
)
.cuda()
.half()
)
fused_layernorm_layer = (
MixedFusedLayerNorm(normalized_shape=embedding_output.size(-1)).cuda().half()
)
torch_layernorm_layer = (
LayerNorm(normalized_shape=embedding_output.size(-1)).cuda().half()
)
fused_output = fused_layernorm_layer(embedding_output)
torch_output = torch_layernorm_layer(embedding_output)
test_result = (fused_output - torch_output).abs()
while test_result.dim() != 1:
test_result = test_result.mean(dim=-1)
diff = test_result.mean(dim=-1)
if diff <= 1e-3:
print(
f"\n[Success] test_layer_norm"
f"\n > mean_difference={diff}"
f"\n > fused_values={fused_output[-1][-1][:5].tolist()}"
f"\n > torch_values={torch_output[-1][-1][:5].tolist()}"
)
else:
print(
f"\n[Fail] test_layer_norm"
f"\n > mean_difference={diff}, "
f"\n > fused_values={fused_output[-1][-1][:5].tolist()}, "
f"\n > torch_values={torch_output[-1][-1][:5].tolist()}"
)
if __name__ == "__main__":
try:
from transformers import BertTokenizer, GPT2Tokenizer
from transformers.models.bert.modeling_bert import BertModel
from transformers.models.gpt2.modeling_gpt2 import GPT2Model
import transformers
transformers.logging.set_verbosity(
transformers.logging.FATAL,
)
except:
print("\n[Fail] Please install `transformers` package to test fused kernels\n")
exit(-1)
test_load_fused_kernels()
test_fused_softmax()
test_fused_upper_triangle_mask_softmax()
| 9,169 | 29.875421 | 87 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/ubert/modeling_ubert.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import basicConfig, setLogRecordFactory
import torch
from torch import nn
import json
from tqdm import tqdm
import os
import numpy as np
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
BertTokenizer,
file_utils
)
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import trainer, loggers
from torch.utils.data import Dataset, DataLoader
from transformers.optimization import get_linear_schedule_with_warmup
from transformers import BertForPreTraining, BertForMaskedLM, BertModel
from transformers import BertConfig, BertForTokenClassification, BertPreTrainedModel
import transformers
import unicodedata
import re
import argparse
transformers.logging.set_verbosity_error()
# os.environ["CUDA_VISIBLE_DEVICES"] = '6'
def search(pattern, sequence):
n = len(pattern)
res = []
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
res.append([i, i + n-1])
return res
class UbertDataset(Dataset):
def __init__(self, data, tokenizer, args, used_mask=True):
super().__init__()
self.tokenizer = tokenizer
self.max_length = args.max_length
self.num_labels = args.num_labels
self.used_mask = used_mask
self.data = data
self.args = args
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.encode(self.data[index], self.used_mask)
def encode(self, item, used_mask=False):
input_ids1 = []
attention_mask1 = []
token_type_ids1 = []
span_labels1 = []
span_labels_masks1 = []
input_ids0 = []
attention_mask0 = []
token_type_ids0 = []
span_labels0 = []
span_labels_masks0 = []
subtask_type = item['subtask_type']
for choice in item['choices']:
try:
texta = item['task_type'] + '[SEP]' + \
subtask_type + '[SEP]' + choice['entity_type']
textb = item['text']
encode_dict = self.tokenizer.encode_plus(texta, textb,
max_length=self.max_length,
padding='max_length',
truncation='longest_first')
encode_sent = encode_dict['input_ids']
encode_token_type_ids = encode_dict['token_type_ids']
encode_attention_mask = encode_dict['attention_mask']
span_label = np.zeros((self.max_length, self.max_length))
span_label_mask = np.zeros(
(self.max_length, self.max_length))-10000
if item['task_type'] == '分类任务':
span_label_mask[0, 0] = 0
span_label[0, 0] = choice['label']
else:
question_len = len(self.tokenizer.encode(texta))
span_label_mask[question_len:, question_len:] = np.zeros(
(self.max_length-question_len, self.max_length-question_len))
for entity in choice['entity_list']:
# if 'entity_name' in entity.keys() and entity['entity_name']=='':
# continue
entity_idx_list = entity['entity_idx']
if entity_idx_list == []:
continue
for entity_idx in entity_idx_list:
if entity_idx == []:
continue
start_idx_text = item['text'][:entity_idx[0]]
start_idx_text_encode = self.tokenizer.encode(
start_idx_text, add_special_tokens=False)
start_idx = question_len + \
len(start_idx_text_encode)
end_idx_text = item['text'][:entity_idx[1]+1]
end_idx_text_encode = self.tokenizer.encode(
end_idx_text, add_special_tokens=False)
end_idx = question_len + \
len(end_idx_text_encode) - 1
if start_idx < self.max_length and end_idx < self.max_length:
span_label[start_idx, end_idx] = 1
if np.sum(span_label) < 1:
input_ids0.append(encode_sent)
attention_mask0.append(encode_attention_mask)
token_type_ids0.append(encode_token_type_ids)
span_labels0.append(span_label)
span_labels_masks0.append(span_label_mask)
else:
input_ids1.append(encode_sent)
attention_mask1.append(encode_attention_mask)
token_type_ids1.append(encode_token_type_ids)
span_labels1.append(span_label)
span_labels_masks1.append(span_label_mask)
except:
print(item)
print(texta)
print(textb)
randomize = np.arange(len(input_ids0))
np.random.shuffle(randomize)
cur = 0
count = len(input_ids1)
while count < self.args.num_labels:
if cur < len(randomize):
input_ids1.append(input_ids0[randomize[cur]])
attention_mask1.append(attention_mask0[randomize[cur]])
token_type_ids1.append(token_type_ids0[randomize[cur]])
span_labels1.append(span_labels0[randomize[cur]])
span_labels_masks1.append(span_labels_masks0[randomize[cur]])
cur += 1
count += 1
while len(input_ids1) < self.args.num_labels:
input_ids1.append([0]*self.max_length)
attention_mask1.append([0]*self.max_length)
token_type_ids1.append([0]*self.max_length)
span_labels1.append(np.zeros((self.max_length, self.max_length)))
span_labels_masks1.append(
np.zeros((self.max_length, self.max_length))-10000)
input_ids = input_ids1[:self.args.num_labels]
attention_mask = attention_mask1[:self.args.num_labels]
token_type_ids = token_type_ids1[:self.args.num_labels]
span_labels = span_labels1[:self.args.num_labels]
span_labels_masks = span_labels_masks1[:self.args.num_labels]
span_labels = np.array(span_labels)
span_labels_masks = np.array(span_labels_masks)
if np.sum(span_labels) < 1:
span_labels[-1, -1, -1] = 1
span_labels_masks[-1, -1, -1] = 10000
sample = {
"input_ids": torch.tensor(input_ids).long(),
"token_type_ids": torch.tensor(token_type_ids).long(),
"attention_mask": torch.tensor(attention_mask).float(),
"span_labels": torch.tensor(span_labels).float(),
"span_labels_mask": torch.tensor(span_labels_masks).float()
}
return sample
class UbertDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--batchsize', default=8, type=int)
parser.add_argument('--max_length', default=128, type=int)
return parent_args
def __init__(self, train_data, val_data, tokenizer, args):
super().__init__()
self.batchsize = args.batchsize
self.train_data = UbertDataset(train_data, tokenizer, args, True)
self.valid_data = UbertDataset(val_data, tokenizer, args, False)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, batch_size=self.batchsize, pin_memory=False)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, batch_size=self.batchsize, pin_memory=False)
class biaffine(nn.Module):
def __init__(self, in_size, out_size, bias_x=True, bias_y=True):
super().__init__()
self.bias_x = bias_x
self.bias_y = bias_y
self.out_size = out_size
self.U = torch.nn.Parameter(torch.zeros(
in_size + int(bias_x), out_size, in_size + int(bias_y)))
torch.nn.init.normal_(self.U, mean=0, std=0.1)
def forward(self, x, y):
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), dim=-1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), dim=-1)
bilinar_mapping = torch.einsum('bxi,ioj,byj->bxyo', x, self.U, y)
return bilinar_mapping
class MultilabelCrossEntropy(nn.Module):
def __init__(self):
super().__init__()
def forward(self, y_pred, y_true):
y_true = y_true.float()
y_pred = torch.mul((1.0 - torch.mul(y_true, 2.0)), y_pred)
y_pred_neg = y_pred - torch.mul(y_true, 1e12)
y_pred_pos = y_pred - torch.mul(1.0 - y_true, 1e12)
zeros = torch.zeros_like(y_pred[..., :1])
y_pred_neg = torch.cat([y_pred_neg, zeros], axis=-1)
y_pred_pos = torch.cat([y_pred_pos, zeros], axis=-1)
neg_loss = torch.logsumexp(y_pred_neg, axis=-1)
pos_loss = torch.logsumexp(y_pred_pos, axis=-1)
loss = torch.mean(neg_loss + pos_loss)
return loss
class UbertModel(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.query_layer = torch.nn.Sequential(torch.nn.Linear(in_features=self.config.hidden_size,
out_features=self.config.biaffine_size),
torch.nn.GELU())
self.key_layer = torch.nn.Sequential(torch.nn.Linear(in_features=self.config.hidden_size, out_features=self.config.biaffine_size),
torch.nn.GELU())
self.biaffine_query_key_cls = biaffine(self.config.biaffine_size, 1)
self.loss_softmax = MultilabelCrossEntropy()
self.loss_sigmoid = torch.nn.BCEWithLogitsLoss(reduction='mean')
def forward(self,
input_ids,
attention_mask,
token_type_ids,
span_labels=None,
span_labels_mask=None):
batch_size, num_label, seq_len = input_ids.shape
input_ids = input_ids.view(-1, seq_len)
attention_mask = attention_mask.view(-1, seq_len)
token_type_ids = token_type_ids.view(-1, seq_len)
batch_size, seq_len = input_ids.shape
outputs = self.bert(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=True) # (bsz, seq, dim)
hidden_states = outputs[0]
batch_size, seq_len, hidden_size = hidden_states.shape
query = self.query_layer(hidden_states)
key = self.key_layer(hidden_states)
span_logits = self.biaffine_query_key_cls(
query, key).reshape(-1, num_label, seq_len, seq_len)
span_logits = span_logits + span_labels_mask
if span_labels == None:
return 0, span_logits
else:
soft_loss1 = self.loss_softmax(
span_logits.reshape(-1, num_label, seq_len*seq_len), span_labels.reshape(-1, num_label, seq_len*seq_len))
soft_loss2 = self.loss_softmax(span_logits.permute(
0, 2, 3, 1), span_labels.permute(0, 2, 3, 1))
sig_loss = self.loss_sigmoid(span_logits, span_labels)
all_loss = 10*(100*sig_loss+soft_loss1+soft_loss2)
return all_loss, span_logits
class UbertLitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--learning_rate', default=1e-5, type=float)
parser.add_argument('--weight_decay', default=0.1, type=float)
parser.add_argument('--warmup', default=0.01, type=float)
parser.add_argument('--num_labels', default=10, type=int)
return parent_args
def __init__(self, args, num_data=1):
super().__init__()
self.args = args
self.num_data = num_data
self.model = UbertModel.from_pretrained(
self.args.pretrained_model_path)
self.count = 0
def setup(self, stage) -> None:
if stage == 'fit':
num_gpus = self.trainer.gpus if self.trainer.gpus is not None else 0
self.total_step = int(self.trainer.max_epochs * self.num_data /
(max(1, num_gpus) * self.trainer.accumulate_grad_batches))
print('Total training step:', self.total_step)
def training_step(self, batch, batch_idx):
loss, span_logits = self.model(**batch)
span_acc, recall, precise = self.comput_metrix_span(
span_logits, batch['span_labels'])
self.log('train_loss', loss)
self.log('train_span_acc', span_acc)
self.log('train_span_recall', recall)
self.log('train_span_precise', precise)
return loss
def validation_step(self, batch, batch_idx):
loss, span_logits = self.model(**batch)
span_acc, recall, precise = self.comput_metrix_span(
span_logits, batch['span_labels'])
self.log('val_loss', loss)
self.log('val_span_acc', span_acc)
self.log('val_span_recall', recall)
self.log('val_span_precise', precise)
def predict_step(self, batch, batch_idx):
loss, span_logits = self.model(**batch)
span_acc = self.comput_metrix_span(span_logits, batch['span_labels'])
return span_acc.item()
def configure_optimizers(self):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
paras = list(
filter(lambda p: p[1].requires_grad, self.named_parameters()))
paras = [{
'params':
[p for n, p in paras if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay
}, {
'params': [p for n, p in paras if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}]
optimizer = torch.optim.AdamW(paras, lr=self.args.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, int(self.total_step * self.args.warmup),
self.total_step)
return [{
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'interval': 'step',
'frequency': 1
}
}]
def comput_metrix_span(self, logits, labels):
ones = torch.ones_like(logits)
zero = torch.zeros_like(logits)
logits = torch.where(logits < 0, zero, ones)
y_pred = logits.view(size=(-1,))
y_true = labels.view(size=(-1,))
corr = torch.eq(y_pred, y_true).float()
corr = torch.multiply(y_true, corr)
recall = torch.sum(corr.float())/(torch.sum(y_true.float())+1e-5)
precise = torch.sum(corr.float())/(torch.sum(y_pred.float())+1e-5)
f1 = 2*recall*precise/(recall+precise+1e-5)
return f1, recall, precise
class TaskModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--checkpoint_path',
default='./checkpoint/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_epochs', default=1, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
save_last=True,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.checkpoint_path,
filename=args.filename)
class OffsetMapping:
def __init__(self):
self._do_lower_case = True
@staticmethod
def stem(token):
if token[:2] == '##':
return token[2:]
else:
return token
@staticmethod
def _is_control(ch):
return unicodedata.category(ch) in ('Cc', 'Cf')
@staticmethod
def _is_special(ch):
return bool(ch) and (ch[0] == '[') and (ch[-1] == ']')
def rematch(self, text, tokens):
if self._do_lower_case:
text = text.lower()
normalized_text, char_mapping = '', []
for i, ch in enumerate(text):
if self._do_lower_case:
ch = unicodedata.normalize('NFD', ch)
ch = ''.join(
[c for c in ch if unicodedata.category(c) != 'Mn'])
ch = ''.join([
c for c in ch
if not (ord(c) == 0 or ord(c) == 0xfffd or self._is_control(c))
])
normalized_text += ch
char_mapping.extend([i] * len(ch))
text, token_mapping, offset = normalized_text, [], 0
for token in tokens:
if self._is_special(token):
token_mapping.append([offset])
offset += 1
else:
token = self.stem(token)
start = text[offset:].index(token) + offset
end = start + len(token)
token_mapping.append(char_mapping[start:end])
offset = end
return token_mapping
class extractModel:
'''
# 在我目前提交的这一版程序中,这个方法已经不再需要被调用了。
def get_actual_id(self, text, query_text, tokenizer, args):
text_encode = tokenizer.encode(text)
one_input_encode = tokenizer.encode(query_text)
text_start_id = search(text_encode[1:-1], one_input_encode)[0][0]
text_end_id = text_start_id+len(text_encode)-1
if text_end_id > args.max_length:
text_end_id = args.max_length
text_token = tokenizer.tokenize(text)
text_mapping = OffsetMapping().rematch(text, text_token)
return text_start_id, text_end_id, text_mapping, one_input_encode
'''
def extract_index(self, span_logits, sample_length, split_value=0.5):
result = []
sample_length = sample_length if sample_length < span_logits.shape[0] else span_logits.shape[0]
for i in range(sample_length):
for j in range(i, sample_length):
if span_logits[i, j] > split_value:
result.append((i, j, span_logits[i, j]))
return result
def extract_entity(self, text, entity_idx, text_start_id, text_mapping):
start_split = text_mapping[entity_idx[0]-text_start_id] if entity_idx[0] - \
text_start_id < len(text_mapping) and entity_idx[0]-text_start_id >= 0 else []
end_split = text_mapping[entity_idx[1]-text_start_id] if entity_idx[1] - \
text_start_id < len(text_mapping) and entity_idx[1]-text_start_id >= 0 else []
entity = ''
if start_split != [] and end_split != []:
entity = text[start_split[0]:end_split[-1]+1]
return entity
def extract(self, batch_data, model, tokenizer, args):
input_ids = []
attention_mask = []
token_type_ids = []
span_labels_masks = []
for item in batch_data:
input_ids0 = []
attention_mask0 = []
token_type_ids0 = []
span_labels_masks0 = []
for choice in item['choices']:
texta = item['task_type'] + '[SEP]' + \
item['subtask_type'] + '[SEP]' + choice['entity_type']
textb = item['text']
encode_dict = tokenizer.encode_plus(texta, textb,
max_length=args.max_length,
padding='max_length',
truncation='longest_first')
encode_sent = encode_dict['input_ids']
encode_token_type_ids = encode_dict['token_type_ids']
encode_attention_mask = encode_dict['attention_mask']
span_label_mask = np.zeros(
(args.max_length, args.max_length))-10000
if item['task_type'] == '分类任务':
span_label_mask[0, 0] = 0
else:
question_len = len(tokenizer.encode(texta))
span_label_mask[question_len:, question_len:] = np.zeros(
(args.max_length-question_len, args.max_length-question_len))
input_ids0.append(encode_sent)
attention_mask0.append(encode_attention_mask)
token_type_ids0.append(encode_token_type_ids)
span_labels_masks0.append(span_label_mask)
input_ids.append(input_ids0)
attention_mask.append(attention_mask0)
token_type_ids.append(token_type_ids0)
span_labels_masks.append(span_labels_masks0)
input_ids = torch.tensor(input_ids).to(model.device)
attention_mask = torch.tensor(attention_mask).to(model.device)
token_type_ids = torch.tensor(token_type_ids).to(model.device)
# 因为原有代码会导致deprecated警告,所以修改如下:
span_labels_mask = torch.tensor(np.array(span_labels_masks)).to(model.device)
_, span_logits = model.model(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
span_labels=None,
span_labels_mask=span_labels_mask)
# 因为原有代码会导致deprecated警告,所以修改如下:
span_logits = torch.sigmoid(span_logits)
span_logits = span_logits.cpu().detach().numpy()
for i, item in enumerate(batch_data):
if item['task_type'] == '分类任务':
cls_idx = 0
max_c = np.argmax(span_logits[i, :, cls_idx, cls_idx])
batch_data[i]['choices'][max_c]['label'] = 1
batch_data[i]['choices'][max_c]['score'] = span_logits[i,
max_c, cls_idx, cls_idx]
else:
'''
优化了代码效率,并修复了一些bug:
1.通过合理的调整程序,去掉了“text_start_id, text_end_id, offset_mapping, input_ids = self.get_actual_id(item['text'], texta+'[SEP]'+textb, tokenizer, args)”。
2.保证在一个item任务中,item['text']的“encode”、“tokenize”只需要执行一次,而不是像之前一样会因为item['choices']的多寡而重复执行。
3.修复了"抽取式阅读理解"无法在item['choices']中有多个实体的情况下,正确提取文字内容,以及提取的文字内容出现错位的问题。
4.在“抽取式阅读理解”任务下,增加了top_k的选项:可在预测数据的"choices"下,增加top_k属性,如:{"entity_type": "***", "top_k": 2},若未设置top_k属性,则默认为1。
5.为"抽取任务"下除"抽取式阅读理解"之外的子任务,增加了“entity_name”的过滤,保证“entity_name”唯一。
'''
textb = item['text']
offset_mapping = OffsetMapping().rematch(textb, tokenizer.tokenize(textb))
input_ids = tokenizer.encode('[SEP]' + textb,
max_length=args.max_length,
truncation='longest_first')
for c in range(len(item['choices'])):
texta = item['task_type'] + '[SEP]' + item['subtask_type'] + \
'[SEP]' + item['choices'][c]['entity_type']
text_start_id = len(tokenizer.encode(texta))
logits = span_logits[i, c, :, :]
entity_name_list = []
entity_list = []
if item['subtask_type'] == '抽取式阅读理解':
try:
top_k = int(item['choices'][c]['top_k'])
except KeyError:
top_k = 1
if( 0 >= top_k ):
top_k = 1
_, top_indices = torch.topk(torch.flatten(torch.tensor(logits)), top_k)
for top_idx in top_indices:
max_index = np.unravel_index(top_idx, logits.shape)
if logits[max_index] > args.threshold:
entity = self.extract_entity(
item['text'], (max_index[0], max_index[1]), text_start_id, offset_mapping)
entity = {
'entity_name': entity,
'score': logits[max_index]
}
entity_list.append(entity)
else:
sample_length = text_start_id + len(input_ids)
entity_idx_type_list = self.extract_index(
logits, sample_length, split_value=args.threshold)
for entity_idx in entity_idx_type_list:
entity = self.extract_entity(
item['text'], (entity_idx[0], entity_idx[1]), text_start_id, offset_mapping)
if entity not in entity_name_list:
entity_name_list.append(entity)
entity = {
'entity_name': entity,
'score': entity_idx[2]
}
entity_list.append(entity)
batch_data[i]['choices'][c]['entity_list'] = entity_list
return batch_data
class UbertPipelines:
@staticmethod
def pipelines_args(parent_args):
total_parser = parent_args.add_argument_group("pipelines args")
total_parser.add_argument(
'--pretrained_model_path', default='IDEA-CCNL/Erlangshen-Ubert-110M-Chinese', type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
total_parser.add_argument('--load_checkpoints_path',
default='', type=str)
total_parser.add_argument('--max_extract_entity_number',
default=1, type=float)
total_parser.add_argument('--train', action='store_true')
total_parser.add_argument('--threshold',
default=0.5, type=float)
total_parser = UbertDataModel.add_data_specific_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
total_parser = UbertLitModel.add_model_specific_args(total_parser)
total_parser = pl.Trainer.add_argparse_args(parent_args)
return parent_args
def __init__(self, args):
if args.load_checkpoints_path != '':
self.model = UbertLitModel.load_from_checkpoint(
args.load_checkpoints_path, args=args)
else:
self.model = UbertLitModel(args)
self.args = args
self.checkpoint_callback = TaskModelCheckpoint(args).callbacks
self.logger = loggers.TensorBoardLogger(save_dir=args.default_root_dir)
self.trainer = pl.Trainer.from_argparse_args(args,
logger=self.logger,
callbacks=[self.checkpoint_callback])
self.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path,
additional_special_tokens=['[unused'+str(i+1)+']' for i in range(99)])
self.em = extractModel()
def fit(self, train_data, dev_data):
data_model = UbertDataModel(
train_data, dev_data, self.tokenizer, self.args)
self.model.num_data = len(train_data)
self.trainer.fit(self.model, data_model)
'''
通过增加“桶”的概念实现了,在一批预测数据的“choices”中可以存在不同数量的实体。
'''
def predict(self, predict_data, cuda=True):
result = []
start = 0
if cuda:
self.model = self.model.cuda()
self.model.eval()
while start < len(predict_data):
batch_data = predict_data[start:start+self.args.batchsize]
start += self.args.batchsize
batch_data_bucket = {}
for item in batch_data:
choice_num = len(item['choices'])
try:
batch_data_bucket[choice_num].append(item)
except KeyError:
batch_data_bucket[choice_num] = []
batch_data_bucket[choice_num].append(item)
for k, batch_data in batch_data_bucket.items():
batch_result = self.em.extract(
batch_data, self.model, self.tokenizer, self.args)
result.extend(batch_result)
return result
| 30,911 | 39.673684 | 160 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/ubert/__init__.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .modeling_ubert import UbertPipelines, UbertModel, UbertDataset
| 685 | 37.111111 | 74 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/uniex/__init__.py
|
from .modeling_uniex import UniEXPipelines
| 42 | 42 | 42 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/uniex/modeling_uniex.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tarfile
import torch
from torch import nn
import json
from tqdm import tqdm
import os
import numpy as np
from transformers import AutoTokenizer,AutoConfig
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import trainer, loggers
from torch.utils.data import Dataset, DataLoader
from transformers.optimization import get_linear_schedule_with_warmup
from transformers import BertModel
from transformers import BertPreTrainedModel
import unicodedata
import re
import argparse
import copy
from typing import List, Optional
from torch import Tensor
import time
import gc
# os.environ["CUDA_VISIBLE_DEVICES"] = '7'
# pl.seed_everything(42)
def get_entity_f1(test_data,pred_data):
corr=0
y_true=0
y_pred=0
for i in range(len(test_data)):
tmp_corr=0
y_true_list=[]
for e in test_data[i]['entity_list']:
if (e['entity_type'],e['entity_index']) not in y_true_list:
y_true_list.append((e['entity_type'],e['entity_index']))
if y_true_list==[] and 'spo_list' in test_data[i].keys():
for spo in test_data[i]['spo_list']:
if (spo['subject']['entity_type'],spo['subject']['entity_index']) not in y_true_list:
y_true_list.append((spo['subject']['entity_type'],spo['subject']['entity_index']))
if (spo['object']['entity_type'],spo['object']['entity_index']) not in y_true_list:
y_true_list.append((spo['object']['entity_type'],spo['object']['entity_index']))
# y_true_list=list(set(y_true_list))
y_true+=len(y_true_list)
y_pred_list=[]
for e in pred_data[i]['entity_list']:
if (e['entity_type'],e['entity_index']) not in y_pred_list:
y_pred_list.append((e['entity_type'],e['entity_index']))
if y_pred_list==[] and 'spo_list' in pred_data[i].keys():
for spo in pred_data[i]['spo_list']:
if (spo['subject']['entity_type'],spo['subject']['entity_index']) not in y_pred_list:
y_pred_list.append((spo['subject']['entity_type'],spo['subject']['entity_index']))
if (spo['object']['entity_type'],spo['object']['entity_index']) not in y_pred_list:
y_pred_list.append((spo['object']['entity_type'],spo['object']['entity_index']))
y_pred+=len(y_pred_list)
for e in y_pred_list:
if e in y_true_list:
corr+=1
if y_pred<=0:
precise=0
else:
precise = corr/y_pred
if y_true<=0:
recall=0
else:
recall = corr/y_true
if precise+recall<=0:
f1=0
else:
f1=2*precise*recall/(precise+recall)
return f1,recall,precise
def get_entity_f1_strict(test_data,pred_data, strict_f1=True):
corr=0
y_true=0
y_pred=0
for i in range(len(test_data)):
tmp_corr=0
y_true_list=[]
y_pred_list=[]
if strict_f1:
for spo in test_data[i]['spo_list']:
tmp=(spo['subject']['entity_type'],spo['subject']['entity_text'],spo['predicate'],spo['object']['entity_type'],spo['object']['entity_text'])
if tmp not in y_true_list:
y_true_list.append(tmp)
for spo in pred_data[i]['spo_list']:
tmp=(spo['subject']['entity_type'],spo['subject']['entity_text'],spo['predicate'],spo['object']['entity_type'],spo['object']['entity_text'])
if tmp not in y_pred_list:
y_pred_list.append(tmp)
else:
for spo in test_data[i]['spo_list']:
tmp=(spo['subject']['entity_text'],spo['predicate'],spo['object']['entity_text'])
if tmp not in y_true_list:
y_true_list.append(tmp)
for spo in pred_data[i]['spo_list']:
tmp=(spo['subject']['entity_text'],spo['predicate'],spo['object']['entity_text'])
if tmp not in y_pred_list:
y_pred_list.append(tmp)
y_true+=len(y_true_list)
y_pred+=len(y_pred_list)
for e in y_pred_list:
if e in y_true_list:
corr+=1
if y_pred<=0:
precise=0
else:
precise = corr/y_pred
if y_true<=0:
recall=0
else:
recall = corr/y_true
if precise+recall<=0:
f1=0
else:
f1=2*precise*recall/(precise+recall)
return f1,recall,precise
def get_rel_f1(test_data,pred_data):
entity_f1,_,_=get_entity_f1(test_data,pred_data)
rel_f1,_,_=get_entity_f1_strict(test_data,pred_data,strict_f1=False)
rel_f1_strict,rel_p,rel_r=get_entity_f1_strict(test_data,pred_data,strict_f1=True)
return rel_f1_strict,rel_p,rel_r
def get_event_f1(test_data,pred_data, strict_f1=True):
corr_trigger=0
y_true_trigger=0
y_pred_trigger=0
corr_args=0
y_true_args=0
y_pred_args=0
for i in range(len(test_data)):
tmp_corr=0
y_true_list_trigger=[]
y_pred_list_trigger=[]
y_true_list_args=[]
y_pred_list_args=[]
for event in test_data[i]['event_list']:
for a in event['args']:
if a['entity_type']=='触发词':
if (a['entity_index'],a['entity_type'],event['event_type']) not in y_true_list_trigger:
y_true_list_trigger.append((a['entity_index'],a['entity_type'],event['event_type']))
else:
if (a['entity_index'],a['entity_type'],event['event_type']) not in y_true_list_args:
y_true_list_args.append((a['entity_index'],a['entity_type'],event['event_type']))
for event in pred_data[i]['event_list']:
for a in event['args']:
if a['entity_type']=='触发词':
if (a['entity_index'],a['entity_type'],event['event_type']) not in y_pred_list_trigger:
y_pred_list_trigger.append((a['entity_index'],a['entity_type'],event['event_type']))
else:
if (a['entity_index'],a['entity_type'],event['event_type']) not in y_pred_list_args:
y_pred_list_args.append((a['entity_index'],a['entity_type'],event['event_type']))
y_true_trigger+=len(y_true_list_trigger)
y_pred_trigger+=len(y_pred_list_trigger)
for e in y_pred_list_trigger:
if e in y_true_list_trigger:
corr_trigger+=1
y_true_args+=len(y_true_list_args)
y_pred_args+=len(y_pred_list_args)
for e in y_pred_list_args:
if e in y_true_list_args:
corr_args+=1
if y_pred_trigger<=0:
precise_trigger=0
else:
precise_trigger = corr_trigger/y_pred_trigger
if y_true_trigger<=0:
recall_trigger=0
else:
recall_trigger = corr_trigger/y_true_trigger
if precise_trigger+recall_trigger<=0:
f1_trigger=0
else:
f1_trigger=2*precise_trigger*recall_trigger/(precise_trigger+recall_trigger)
if y_pred_args<=0:
precise_args=0
else:
precise_args = corr_args/y_pred_args
if y_true_args<=0:
recall_args=0
else:
recall_args = corr_args/y_true_args
if precise_args+recall_args<=0:
f1_args=0
else:
f1_args=2*precise_args*recall_args/(precise_args+recall_args)
return f1_trigger+f1_args, f1_trigger, f1_args
class UniEXDataEncode:
def __init__(self, tokenizer, args, used_mask=False):
super().__init__()
self.tokenizer = tokenizer
self.max_length = args.max_length
self.used_mask = used_mask
self.args = args
def search_index(self, entity_idx, text):
start_idx_text = text[:entity_idx[0]]
start_idx_text_encode = self.tokenizer.encode(
start_idx_text, add_special_tokens=False)
start_idx = len(start_idx_text_encode)
end_idx_text = text[:entity_idx[1]+1]
end_idx_text_encode = self.tokenizer.encode(
end_idx_text, add_special_tokens=False)
end_idx = len(end_idx_text_encode)-1
return start_idx, end_idx
def search(self, pattern, sequence):
n = len(pattern)
res = []
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
res.append(i)
return res
def get_token_type(self, sep_idx, max_length):
token_type_ids = np.zeros(shape=(max_length,))
for i in range(len(sep_idx)-1):
if i % 2 == 0:
ty = np.ones(shape=(sep_idx[i+1]-sep_idx[i],))
else:
ty = np.zeros(shape=(sep_idx[i+1]-sep_idx[i],))
token_type_ids[sep_idx[i]:sep_idx[i+1]] = ty
return token_type_ids
def get_position_ids(self, max_length, entity_labels_idx, relation_labels_idx):
query_length = entity_labels_idx[0]
query_position_ids = np.arange(query_length)
entity_position_ids = np.arange(query_length, entity_labels_idx[-1])
for i in range(len(entity_labels_idx)-1):
entity_position_ids[entity_labels_idx[i]-query_length:entity_labels_idx[i+1]-query_length] = np.arange(
query_length, query_length+entity_labels_idx[i+1]-entity_labels_idx[i])
if relation_labels_idx==[]:
cur_pid=max(entity_position_ids)+1
text_position_ids = np.arange(
cur_pid, max_length+cur_pid-entity_labels_idx[-1])
position_ids = list(query_position_ids) + \
list(entity_position_ids)+list(text_position_ids)
else:
sep_pid = [max(entity_position_ids)+1]
cur_pid = max(entity_position_ids)+2
relation_position_ids = np.arange(relation_labels_idx[0], relation_labels_idx[-1])
for i in range(len(relation_labels_idx)-1):
relation_position_ids[relation_labels_idx[i]-relation_labels_idx[0]:relation_labels_idx[i+1]-relation_labels_idx[0]] = np.arange(
cur_pid, cur_pid + relation_labels_idx[i+1]-relation_labels_idx[i])
cur_pid=max(relation_position_ids)+1
text_position_ids = np.arange(
cur_pid, max_length+cur_pid-relation_labels_idx[-1])
position_ids = list(query_position_ids) + \
list(entity_position_ids)+sep_pid +list(relation_position_ids)+list(text_position_ids)
if max_length <= 512:
return position_ids[:max_length]
else:
for i in range(512, max_length):
if position_ids[i] > 511:
position_ids[i] = 511
return position_ids[:max_length]
def get_att_mask(self, attention_mask, entity_labels_idx, relation_labels_idx, entity_type_list=None, relation_type_list=None, headtail2relation=None):
max_length = len(attention_mask)
attention_mask = np.array(attention_mask)
attention_mask = np.tile(attention_mask[None, :], (max_length, 1))
zeros = np.zeros(
shape=(entity_labels_idx[-1]-entity_labels_idx[0], entity_labels_idx[-1]-entity_labels_idx[0]))
attention_mask[entity_labels_idx[0]:entity_labels_idx[-1],
entity_labels_idx[0]:entity_labels_idx[-1]] = zeros
attention_mask[0,1:entity_labels_idx[-1]] = np.zeros(shape=(entity_labels_idx[-1]-1,)) # 不让 【CLS】 关注到 option
attention_mask[1:entity_labels_idx[-1],0] = np.zeros(shape=(entity_labels_idx[-1]-1,))
for i in range(len(entity_labels_idx)-1):
label_token_length = entity_labels_idx[i+1]-entity_labels_idx[i]
ones = np.ones(shape=(label_token_length, label_token_length))
attention_mask[entity_labels_idx[i]:entity_labels_idx[i+1],
entity_labels_idx[i]:entity_labels_idx[i+1]] = ones
if relation_labels_idx == []:
return attention_mask
else:
zeros = np.zeros(
shape=(relation_labels_idx[-1]-relation_labels_idx[0], relation_labels_idx[-1]-relation_labels_idx[0]))
attention_mask[relation_labels_idx[0]:relation_labels_idx[-1],
relation_labels_idx[0]:relation_labels_idx[-1]] = zeros
attention_mask[0,1:relation_labels_idx[-1]] = np.zeros(shape=(relation_labels_idx[-1]-1,))
attention_mask[1:relation_labels_idx[-1],0] = np.zeros(shape=(relation_labels_idx[-1]-1,))
for i in range(len(relation_labels_idx)-1):
label_token_length = relation_labels_idx[i+1]-relation_labels_idx[i]
ones = np.ones(shape=(label_token_length, label_token_length))
attention_mask[relation_labels_idx[i]:relation_labels_idx[i+1],
relation_labels_idx[i]:relation_labels_idx[i+1]] = ones
zeros = np.zeros(shape=(entity_labels_idx[-1]-entity_labels_idx[0], relation_labels_idx[-1]-relation_labels_idx[0]))
attention_mask[entity_labels_idx[0]:entity_labels_idx[-1],
relation_labels_idx[0]:relation_labels_idx[-1]] = zeros
zeros = np.zeros(shape=(relation_labels_idx[-1]-relation_labels_idx[0], entity_labels_idx[-1]-entity_labels_idx[0]))
attention_mask[relation_labels_idx[0]:relation_labels_idx[-1],
entity_labels_idx[0]:entity_labels_idx[-1]] = zeros
for headtail,relation_list in headtail2relation.items():
if '|' in headtail:
headtail=headtail.split('|')
else:
headtail=[headtail]
for entity_type in headtail:
entity_idx = entity_labels_idx[entity_type_list.index(entity_type)]
entity_last_token_idx = entity_labels_idx[entity_type_list.index(entity_type)+1]
for relation_type in relation_list:
relation_idx = relation_labels_idx[relation_type_list.index(relation_type)]
relation_last_token_idx = relation_labels_idx[relation_type_list.index(relation_type)+1]
ones = np.ones(shape=(entity_last_token_idx-entity_idx, relation_last_token_idx-relation_idx))
attention_mask[entity_idx:entity_last_token_idx,
relation_idx:relation_last_token_idx] = ones
ones = np.ones(shape=(relation_last_token_idx-relation_idx, entity_last_token_idx-entity_idx))
attention_mask[relation_idx:relation_last_token_idx,
entity_idx:entity_last_token_idx] = ones
return attention_mask
def process_relation_choice(self, choice):
head_type = []
tail_type = []
entity_type = []
relation_type = []
headtail2relation={}
for c in choice:
if c[0] not in head_type:
head_type.append(c[0])
if c[2] not in tail_type:
tail_type.append(c[2])
if c[0] not in entity_type:
entity_type.append(c[0])
if c[2] not in entity_type:
entity_type.append(c[2])
if c[1] not in relation_type:
relation_type.append(c[1])
if c[0]+'|'+c[2] not in headtail2relation.keys():
headtail2relation[c[0]+'|'+c[2]]=[]
if c[1] not in headtail2relation[c[0]+'|'+c[2]]:
headtail2relation[c[0]+'|'+c[2]].append(c[1])
return relation_type, entity_type, head_type, tail_type, headtail2relation
def process_event_choice(self, choice):
event_type_list=[]
entity_type_list=[]
args2event={}
for event_type,args in choice[0].items():
if event_type not in event_type_list:
event_type_list.append(event_type)
for arg in args:
if arg not in entity_type_list:
entity_type_list.append(arg)
if arg not in args2event.keys():
args2event[arg]=[]
if event_type not in args2event[arg]:
args2event[arg].append(event_type)
event_type_list.append('触发词与要素')
return event_type_list, entity_type_list, args2event
def encode_entity(self, text,entity_list,entity_type_list,span_labels,span_labels_mask,span_index_token_list,query_length):
for entity in entity_list:
entity_type, entity_idx_list = entity['entity_type'], entity['entity_index']
for entity_idx in entity_idx_list:
start_idx, end_idx = self.search_index(
entity_idx, text)
# print(start_idx,end_idx,flush=True)
if start_idx != None and end_idx != None:
start_idx, end_idx = start_idx + \
query_length+1, end_idx+query_length+1
if start_idx < span_labels.shape[0] and end_idx < span_labels.shape[0]:
span_index_token_list.append(start_idx)
span_index_token_list.append(end_idx)
span_labels[start_idx, end_idx, 0] = 1
span_labels_mask[start_idx, end_idx, 0:len(entity_type_list)+1] = np.zeros((len(entity_type_list)+1,))
if entity_type in entity_type_list:
label = entity_type_list.index(entity_type) + 1 # 加1 是因为entity的 idx从1开始,0是CLS
span_labels[start_idx, end_idx, label] = 1
return span_labels,span_labels_mask,span_index_token_list
def encode_relation(self, text,spo_list,entity_type_list,relation_type_list,span_labels,span_labels_mask,span_index_token_list,query_length):
sample_entity_idx_list=[]
for spo in spo_list:
for entity_idx_subject in spo['subject']['entity_index']:
for entity_idx_object in spo['object']['entity_index']:
sub_start_idx, sub_end_idx = self.search_index(entity_idx_subject, text)
if sub_start_idx !=None and sub_end_idx != None:
sub_start_idx, sub_end_idx = sub_start_idx + query_length+1, sub_end_idx + query_length+1
sub_label= entity_type_list.index(spo['subject']['entity_type'])+1
if sub_start_idx < span_labels.shape[0] and sub_end_idx < span_labels.shape[0]:
span_index_token_list.append(sub_start_idx)
span_index_token_list.append(sub_end_idx)
span_labels[sub_start_idx, sub_end_idx, 0] = 1
span_labels[sub_start_idx,sub_end_idx, sub_label] = 1
span_labels_mask[sub_start_idx, sub_end_idx, 0:len(entity_type_list)+1] = np.zeros((len(entity_type_list)+1,))
if (sub_start_idx, sub_end_idx) not in sample_entity_idx_list:
sample_entity_idx_list.append((sub_start_idx, sub_end_idx))
ob_start_idx, ob_end_idx = self.search_index(entity_idx_object, text)
if ob_start_idx !=None and ob_end_idx != None:
ob_start_idx, ob_end_idx = ob_start_idx + query_length+1, ob_end_idx + query_length+1
ob_label= entity_type_list.index(spo['object']['entity_type'])+1
if ob_start_idx < span_labels.shape[0] and ob_end_idx < span_labels.shape[0]:
span_index_token_list.append(ob_start_idx)
span_index_token_list.append(ob_end_idx)
span_labels[ob_start_idx, ob_end_idx, 0] = 1
span_labels[ob_start_idx, ob_end_idx, ob_label] = 1
span_labels_mask[ob_start_idx, ob_end_idx, 0:len(entity_type_list)+1] = np.zeros((len(entity_type_list)+1,))
if (ob_start_idx, ob_end_idx) not in sample_entity_idx_list:
sample_entity_idx_list.append((ob_start_idx, ob_end_idx))
if ob_start_idx !=None and ob_end_idx != None and sub_start_idx !=None and sub_end_idx != None:
if spo['predicate'] in relation_type_list:
rel_label = len(entity_type_list) + relation_type_list.index(spo['predicate'])+1
if sub_start_idx < self.max_length and ob_start_idx < self.max_length:
span_labels[sub_start_idx,ob_start_idx, rel_label] = 1
span_labels_mask[sub_start_idx, ob_start_idx, len(entity_type_list)+1:len(entity_type_list)+len(relation_type_list)+1] = np.zeros((len(relation_type_list),))
if sub_end_idx < self.max_length and ob_end_idx < self.max_length:
span_labels[sub_end_idx,ob_end_idx, rel_label] = 1
span_labels_mask[sub_end_idx, ob_end_idx, len(entity_type_list)+1:len(entity_type_list)+len(relation_type_list)+1] = np.zeros((len(relation_type_list),))
for head_idx in sample_entity_idx_list:
for tail_idx in sample_entity_idx_list:
if head_idx != tail_idx:
span_labels_mask[head_idx[0], tail_idx[0], len(entity_type_list)+1:len(entity_type_list)+len(relation_type_list)+1] = np.zeros((len(relation_type_list),))
span_labels_mask[head_idx[1], tail_idx[1], len(entity_type_list)+1:len(entity_type_list)+len(relation_type_list)+1] = np.zeros((len(relation_type_list),))
return span_labels,span_labels_mask,span_index_token_list
def encode_event(self, text,event_list,entity_type_list,event_type_list,span_labels,span_labels_mask,span_index_token_list,query_length,num_labels):
trigger_args_idx_list=[]
for event in event_list:
trigger_idx_list=[]
args_idx_list=[]
event_type = event['event_type']
for entity in event['args']:
entity_type, entity_idx_list = entity['entity_type'], entity['entity_index']
for entity_idx in entity_idx_list:
start_idx, end_idx = self.search_index(
entity_idx, text)
if start_idx != None and end_idx != None:
start_idx, end_idx = start_idx + \
query_length+1, end_idx+query_length+1
if start_idx < span_labels.shape[0] and end_idx < span_labels.shape[0]:
span_index_token_list.append(start_idx)
span_index_token_list.append(end_idx)
entity_type_label = entity_type_list.index(entity_type) + 1 # 加1 是因为entity的 idx从1开始,0是CLS
event_type_label = event_type_list.index(event_type) + len(entity_type_list) + 1 # 加1 是因为entity的 idx从1开始,0是CLS
span_labels[start_idx, end_idx, 0] = 1
span_labels[start_idx, end_idx, entity_type_label] = 1
span_labels[start_idx, end_idx, event_type_label] = 1
span_labels_mask[start_idx, end_idx] = np.zeros((num_labels,))
if entity_type =='触发词':
trigger_idx_list.append((start_idx, end_idx))
else:
args_idx_list.append((start_idx, end_idx))
trigger_args_idx_list.append((trigger_idx_list,args_idx_list))
for trigger_idx in trigger_idx_list:
for args_idx in args_idx_list:
span_labels[trigger_idx[0], args_idx[0], -1] = 1
span_labels[trigger_idx[1], args_idx[1], -1] = 1
span_labels_mask[trigger_idx[0], args_idx[0], -1] = 0
span_labels_mask[trigger_idx[1], args_idx[1], -1] = 0
for i,(trigger_idx_list1, args_idx_list1) in enumerate(trigger_args_idx_list):
for j,(trigger_idx_list2, args_idx_list2) in enumerate(trigger_args_idx_list):
for trigger_idx1 in trigger_idx_list1:
for args_idx2 in args_idx_list2:
span_labels_mask[trigger_idx1[0], args_idx2[0], -1] = 0
span_labels_mask[trigger_idx1[1], args_idx2[1], -1] = 0
for trigger_idx2 in trigger_idx_list2:
for args_idx1 in args_idx_list1:
span_labels_mask[trigger_idx2[0], args_idx1[0], -1] = 0
span_labels_mask[trigger_idx2[1], args_idx1[1], -1] = 0
return span_labels,span_labels_mask,span_index_token_list
def encode(self, item,is_predict=False):
if isinstance(item['choice'][0], list):
relation_type_list, entity_type_list, _, _, headtail2relation = self.process_relation_choice(item['choice'])
event_type_list=[]
elif isinstance(item['choice'][0], dict): # event extraction task
event_type_list, entity_type_list, args2event = self.process_event_choice(item['choice'])
relation_type_list = []
else:
entity_type_list = item['choice']
relation_type_list = []
event_type_list = []
input_ids = []
entity_labels_idx = []
relation_labels_idx = []
event_labels_idx = []
sep_ids = [self.tokenizer.sep_token_id]
subtask_type_ids = self.tokenizer.encode(item['task_type'])
input_ids = subtask_type_ids
entity_labels_idx.append(len(input_ids))
entity_op_ids = self.tokenizer.encode(
'[unused1]', add_special_tokens=False)[0]
for c in entity_type_list:
input_ids = input_ids + \
[entity_op_ids]+self.tokenizer.encode(c, add_special_tokens=False)
entity_labels_idx.append(len(input_ids))
if relation_type_list !=[]: #如果不为空,则含有关系类型,添加关系类型
relation_op_ids = self.tokenizer.encode(
'[unused2]', add_special_tokens=False)[0]
input_ids = input_ids + sep_ids
relation_labels_idx.append(len(input_ids))
for c in relation_type_list:
input_ids = input_ids + \
[relation_op_ids]+self.tokenizer.encode(c, add_special_tokens=False)
relation_labels_idx.append(len(input_ids))
if event_type_list !=[]: #如果不为空,则含有事件类型数据
event_op_ids = self.tokenizer.encode(
'[unused1]', add_special_tokens=False)[0]
input_ids = input_ids + sep_ids
event_labels_idx.append(len(input_ids))
for c in event_type_list:
input_ids = input_ids + \
[event_op_ids]+self.tokenizer.encode(c, add_special_tokens=False)
event_labels_idx.append(len(input_ids))
if 'tokens' not in item.keys():
item['tokens'] = item['text'].split(' ')
if relation_labels_idx!=[]:
query_length=relation_labels_idx[-1]
elif event_labels_idx !=[]:
query_length=event_labels_idx[-1]
else:
query_length=entity_labels_idx[-1]
encode_dict = self.tokenizer(item['tokens'],
is_split_into_words=True,
max_length=self.max_length-query_length,
truncation=True,
add_special_tokens=False)
input_ids = input_ids+sep_ids+encode_dict['input_ids']
word_ids = encode_dict.word_ids()
input_ids = input_ids[:self.args.max_length-1]+sep_ids
sample_length = len(input_ids)
attention_mask = [1]*sample_length
if relation_labels_idx!=[]:
attention_mask = self.get_att_mask(
attention_mask, entity_labels_idx, relation_labels_idx, entity_type_list, relation_type_list, headtail2relation)
elif event_labels_idx!=[]:
attention_mask = self.get_att_mask(
attention_mask, entity_labels_idx, event_labels_idx, entity_type_list, event_type_list, args2event)
else:
attention_mask = self.get_att_mask(
attention_mask, entity_labels_idx, relation_labels_idx)
if relation_labels_idx !=[]:
position_ids = self.get_position_ids(
sample_length, entity_labels_idx, relation_labels_idx)
else:
position_ids = self.get_position_ids(
sample_length, entity_labels_idx, event_labels_idx)
if relation_type_list !=[]:
label_token_idx = entity_labels_idx[:-1]+relation_labels_idx[:-1]
num_labels = len(entity_type_list)+len(relation_type_list)+1 # 加1 是因为entity的 idx从1开始,0是CLS
elif event_labels_idx !=[]:
label_token_idx = entity_labels_idx[:-1]+event_labels_idx[:-1]
num_labels = len(entity_type_list)+len(event_type_list)+1 # 加的第一个1 是因为我们需要一个token来标记trigger和args的关系,第二个是因为entity的 idx从1开始,0是CLS
else:
label_token_idx = entity_labels_idx[:-1]
num_labels = len(entity_type_list)+1 # 加1 是因为entity的 idx从1开始,0是CLS
span_labels = np.zeros(
(sample_length, sample_length, num_labels))
span_mask = True if 'span_mask' in item.keys() and item['span_mask']=='mask' else False
if not self.args.fast_ex_mode and not span_mask:
span_labels_mask = np.zeros(
(sample_length, sample_length, num_labels))
span_labels_mask[:query_length,:query_length, :] = np.zeros(
(query_length, query_length, num_labels))-10000
# span_labels_mask[0, 0, :] = np.zeros((num_labels,))-10000
else:
span_labels_mask = np.zeros(
(sample_length, sample_length, num_labels))-10000
span_labels_mask[query_length:, query_length:, 0] = np.zeros(
(sample_length-query_length, sample_length-query_length))
# span_labels_mask[0, 0, :] = np.zeros((num_labels,))
span_index_token_list=[query_length]
if 'entity_list' in item.keys():
span_labels,span_labels_mask,span_index_token_list = self.encode_entity(item['text'],
item['entity_list'],
entity_type_list,
span_labels,
span_labels_mask,
span_index_token_list,
query_length)
if 'spo_list' in item.keys() and relation_type_list !=[]: # 关系抽取任务
span_labels,span_labels_mask,span_index_token_list = self.encode_relation(item['text'],
item['spo_list'],
entity_type_list,
relation_type_list,
span_labels,
span_labels_mask,
span_index_token_list,
query_length)
if 'event_list' in item.keys(): #针对事件抽取任务
span_labels,span_labels_mask,span_index_token_list = self.encode_event(item['text'],
item['event_list'],
entity_type_list,
event_type_list,
span_labels,
span_labels_mask,
span_index_token_list,
query_length,
num_labels)
token_type_ids = [0]*len(input_ids)
label_token_idx = [0] + label_token_idx # 加【0】 是因为entity的 idx从1开始,0是CLS
text_token_idx = []
span_index_token_list=sorted(list(set(span_index_token_list)))
if is_predict:
text_token_idx.extend([query_length+idx for idx in range(len(encode_dict['input_ids']))])
else:
if self.args.fast_ex_mode and self.args.train:
text_token_idx.extend(span_index_token_list)
else:
text_token_idx.extend([query_length+idx for idx in range(len(encode_dict['input_ids']))])
return {
"input_ids": torch.tensor(input_ids).long(),
"token_type_ids": torch.tensor(token_type_ids).long(),
"attention_mask": torch.tensor(attention_mask).float(),
"position_ids": torch.tensor(position_ids).long(),
"span_labels": torch.tensor(span_labels).float(),
"span_labels_mask": torch.tensor(span_labels_mask).float(),
"label_token_idx": torch.tensor(label_token_idx).long(),
"text_token_idx": torch.tensor(text_token_idx).long(),
"query_length": torch.tensor(query_length).long(),
}
def collate_fn(self, batch):
'''
Aggregate a batch data.
batch = [ins1_dict, ins2_dict, ..., insN_dict]
batch_data = {'sentence':[ins1_sentence, ins2_sentence...], 'input_ids':[ins1_input_ids, ins2_input_ids...], ...}
'''
batch_data = {}
for key in batch[0]:
batch_data[key] = [example[key] for example in batch]
batch_data['input_ids'] = nn.utils.rnn.pad_sequence(batch_data['input_ids'],
batch_first=True,
padding_value=0)
batch_size, batch_max_length = batch_data['input_ids'].shape
batch_data['label_token_idx'] = nn.utils.rnn.pad_sequence(batch_data['label_token_idx'],
batch_first=True,
padding_value=0)
batch_data['text_token_idx'] = nn.utils.rnn.pad_sequence(batch_data['text_token_idx'],
batch_first=True,
padding_value=0)
batch_data['query_length'] = torch.tensor(batch_data['query_length']).long()
batch_size, batch_max_labels = batch_data['label_token_idx'].shape
for k, v in batch_data.items():
if k in ['input_ids', 'label_token_idx','text_token_idx','query_length']:
continue
if k in ['token_type_ids', 'position_ids']:
batch_data[k] = nn.utils.rnn.pad_sequence(v,
batch_first=True,
padding_value=0)
elif k == 'attention_mask':
attention_mask = torch.zeros(
(batch_size, batch_max_length, batch_max_length))
for i, att in enumerate(v):
sample_length, _ = att.shape
attention_mask[i, :sample_length, :sample_length] = att
batch_data[k] = attention_mask
elif k == 'span_labels':
span = torch.zeros(
(batch_size, batch_max_length, batch_max_length, batch_max_labels))
for i, s in enumerate(v):
sample_length, _, sample_num_labels = s.shape
span[i, :sample_length, :sample_length,
:sample_num_labels] = s
batch_data[k] = span
elif k == 'span_labels_mask':
span = torch.zeros(
(batch_size, batch_max_length, batch_max_length, batch_max_labels))-10000
for i, s in enumerate(v):
sample_length, _, sample_num_labels = s.shape
span[i, :sample_length, :sample_length,
:sample_num_labels] = s
batch_data[k] = span
return batch_data
class UniEXDataset(Dataset):
def __init__(self, data, tokenizer, args, data_encode):
super().__init__()
self.tokenizer = tokenizer
self.max_length = args.max_length
self.data = data
self.args = args
self.data_encode = data_encode
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data_encode.encode(self.data[index])
class UniEXDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--batchsize', default=16, type=int)
parser.add_argument('--max_length', default=512, type=int)
return parent_args
def __init__(self, train_data, dev_data, tokenizer, args):
super().__init__()
self.args = args
self.data_encode = UniEXDataEncode(tokenizer, args)
self.train_data = UniEXDataset(train_data, tokenizer, args, self.data_encode)
self.valid_data = UniEXDataset(dev_data, tokenizer, args, self.data_encode)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, collate_fn=self.data_encode.collate_fn ,num_workers=self.args.num_workers, batch_size=self.args.batchsize, pin_memory=False)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, collate_fn=self.data_encode.collate_fn, num_workers=1, batch_size=self.args.batchsize, pin_memory=False)
class MultilabelCrossEntropy(nn.Module):
def __init__(self):
super().__init__()
def forward(self, y_pred, y_true):
y_true = y_true.float()
y_pred = torch.mul((1.0 - torch.mul(y_true, 2.0)), y_pred)
y_pred_neg = y_pred - torch.mul(y_true, 1e12)
y_pred_pos = y_pred - torch.mul(1.0 - y_true, 1e12)
zeros = torch.zeros_like(y_pred[..., :1])
y_pred_neg = torch.cat([y_pred_neg, zeros], axis=-1)
y_pred_pos = torch.cat([y_pred_pos, zeros], axis=-1)
neg_loss = torch.logsumexp(y_pred_neg, axis=-1)
pos_loss = torch.logsumexp(y_pred_pos, axis=-1)
loss = torch.mean(neg_loss + pos_loss)
return loss
class Triaffine(nn.Module):
def __init__(self, triaffine_hidden_size):
super().__init__()
self.triaffine_hidden_size = triaffine_hidden_size
self.weight = torch.nn.Parameter(torch.zeros(
triaffine_hidden_size, triaffine_hidden_size, triaffine_hidden_size))
torch.nn.init.normal_(self.weight, mean=0, std=0.1)
def forward(self, start_logits, end_logits, cls_logits):
span_logits = torch.einsum(
'bxi,ioj,byj->bxyo', start_logits, self.weight, end_logits)
span_logits = torch.einsum(
'bxyo,bzo->bxyz', span_logits, cls_logits)
return span_logits
class MLPLayer(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self.mlp = torch.nn.Sequential(torch.nn.Linear(
in_features=input_size, out_features=output_size), torch.nn.GELU())
def forward(self, hidden_state):
return self.mlp(hidden_state)
class UniEXBertModel(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.config = config
self.mlp_start = MLPLayer(
self.config.hidden_size, self.config.triaffine_hidden_size)
self.mlp_end = MLPLayer(self.config.hidden_size,
self.config.triaffine_hidden_size)
self.mlp_cls = MLPLayer(self.config.hidden_size,
self.config.triaffine_hidden_size)
self.triaffine = Triaffine(self.config.triaffine_hidden_size)
self.loss_softmax = MultilabelCrossEntropy()
self.loss_sigmoid = torch.nn.BCEWithLogitsLoss()
def span_gather(self,span_labels,text_token_idx):
"""从表为 【seq_len,seq_len】 的 span_labels 提取 只需要计算 loss 的 token 的labels,提取之后,size ->【token_len, token_len】
"""
try:
batch_size,seq_len,_,num_labels=span_labels.shape
_,text_len=text_token_idx.shape
e=torch.arange(seq_len)*seq_len
e=e.to(span_labels.device)
e=e.unsqueeze(0).unsqueeze(-1).repeat(batch_size,1, text_len)
e=e.gather(1, text_token_idx.unsqueeze(-1).repeat(1, 1, text_len))
text_token_idx = text_token_idx.unsqueeze(1).repeat(1, text_len, 1)
text_token_idx = text_token_idx+e
text_token_idx=text_token_idx.reshape(-1,text_len*text_len)
span_labels=span_labels.reshape(-1,seq_len*seq_len,num_labels)
span_labels=span_labels.gather(1, text_token_idx.unsqueeze(-1).repeat(1, 1, num_labels))
span_labels=span_labels.reshape(-1,text_len,text_len,num_labels)
except:
print(span_labels.shape)
print(text_token_idx.shape)
return span_labels
def forward(self,
input_ids,
attention_mask,
token_type_ids,
query_length=None,
position_ids=None,
span_labels=None,
span_labels_mask=None,
label_token_idx=None,
text_token_idx=None,
fast_ex_mode=False,
task_type_list=None,
threshold=0.5):
outputs = self.bert(input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
token_type_ids=token_type_ids,
output_hidden_states=True) # (bsz, seq, dim)
hidden_states = outputs[0]
batch_size, seq_len, hidden_size = hidden_states.shape
if span_labels_mask != None:
start_logits = self.mlp_start(hidden_states)
end_logits = self.mlp_end(hidden_states)
cls_logits = hidden_states.gather(
1, label_token_idx.unsqueeze(-1).repeat(1, 1, hidden_size))
cls_logits = self.mlp_cls(cls_logits)
span_logits = self.triaffine(start_logits, end_logits, cls_logits[:,[0],:])
span_logits = span_logits + span_labels_mask[:,:,:,[0]]
index_loss_sigmoid = self.loss_sigmoid(span_logits, span_labels[:,:,:,[0]])
start_logits = start_logits.gather(
1, text_token_idx.unsqueeze(-1).repeat(1, 1, self.config.triaffine_hidden_size))
end_logits = end_logits.gather(
1, text_token_idx.unsqueeze(-1).repeat(1, 1, self.config.triaffine_hidden_size))
span_logits = self.triaffine(start_logits, end_logits, cls_logits[:,1:,:])
span_labels = self.span_gather(span_labels[:,:,:,1:],text_token_idx)
span_labels_mask = self.span_gather(span_labels_mask[:,:,:,1:],text_token_idx)
span_logits = span_logits + span_labels_mask
span_loss_sigmoid = self.loss_sigmoid(span_logits, span_labels)
all_loss = 100000*span_loss_sigmoid + 100000*index_loss_sigmoid
return all_loss, span_logits, span_labels
else:
if not fast_ex_mode:
text_logits = hidden_states.gather(
1, text_token_idx.unsqueeze(-1).repeat(1, 1, hidden_size))
start_logits = self.mlp_start(text_logits)
end_logits = self.mlp_end(text_logits)
cls_logits = hidden_states.gather(
1, label_token_idx.unsqueeze(-1).repeat(1, 1, hidden_size))
cls_logits = self.mlp_cls(cls_logits)
span_logits = self.triaffine(start_logits, end_logits, cls_logits)
span_logits = torch.sigmoid(span_logits)
return span_logits
else:
text_logits = hidden_states.gather(
1, text_token_idx.unsqueeze(-1).repeat(1, 1, hidden_size))
start_logits = self.mlp_start(text_logits)
end_logits = self.mlp_end(text_logits)
cls_logits = hidden_states.gather(
1, label_token_idx.unsqueeze(-1).repeat(1, 1, hidden_size))
cls_logits = self.mlp_cls(cls_logits)
index_logits=cls_logits[:,:1,:]
type_logits=cls_logits[:,1:,:]
span_index_logits = self.triaffine(start_logits, end_logits, index_logits).squeeze(-1)
span_index_logits = torch.sigmoid(span_index_logits)
token_index = torch.zeros(size=(batch_size,seq_len)).to(input_ids.device)
max_num_index = 0
span_index_list=[]
span_index_labels=self.span_gather(span_labels[:,:,:,[0]],text_token_idx).squeeze(-1)
for idx in range(batch_size):
if task_type_list is not None and task_type_list[idx] in ['分类任务']:
span_index = span_index_labels[idx] > threshold
else:
span_index = span_index_logits[idx] > threshold
span_index = span_index.nonzero()
span_index_list.append(span_index)
span_index = span_index.reshape((-1,))
span_index = torch.unique(span_index,sorted=True)
num_span_index = span_index.shape[0]
token_index[idx,:num_span_index]=span_index
max_num_index = num_span_index if max_num_index < num_span_index else max_num_index
token_index = token_index[:,:max_num_index].long()
start_logits = start_logits.gather(
1, token_index.unsqueeze(-1).repeat(1, 1, self.config.triaffine_hidden_size))
end_logits = end_logits.gather(
1, token_index.unsqueeze(-1).repeat(1, 1, self.config.triaffine_hidden_size))
span_type_logits = self.triaffine(start_logits, end_logits, type_logits)
span_type_logits = torch.sigmoid(span_type_logits)
return span_index_logits, span_type_logits, span_index_list, token_index
class UniEXLitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--learning_rate', default=1e-5, type=float)
parser.add_argument('--weight_decay', default=0.1, type=float)
parser.add_argument('--warmup', default=0.01, type=float)
return parent_args
def __init__(self, args, dev_data=[],test_data=[], num_data=1):
super().__init__()
self.args = args
self.num_data = num_data
self.config=AutoConfig.from_pretrained(args.pretrained_model_path)
self.model = UniEXBertModel.from_pretrained(args.pretrained_model_path)
self.computacc=ComputAcc(args)
self.dev_data=dev_data
self.test_data=test_data
self.corr_count=0
self.gold_count=0
self.pred_count=0
def setup(self, stage) -> None:
if stage == 'fit':
num_gpus = self.trainer.num_devices if self.trainer.num_devices is not None else 0
self.total_step = int(self.trainer.max_epochs * self.num_data /
(max(1, num_gpus) * self.trainer.accumulate_grad_batches))
print('Total training step:', self.total_step)
def training_step(self, batch, batch_idx):
loss, span_logits, span_labels = self.model(**batch)
f1, recall, precise, _, _, _ = self.comput_metrix(
span_logits, span_labels)
self.log('train_loss', loss)
self.log('train_f1', f1)
self.log('train_recall', recall)
self.log('train_precise', precise)
return loss
def training_epoch_end(self,batch):
f1, recall, precise=self.computacc.predict(self.dev_data,self.model)
self.log('val_f1', f1)
self.log('val_recall', recall)
self.log('val_precise', precise)
if self.test_data!=[]:
f1, recall, precise=self.computacc.predict(self.test_data,self.model)
else:
f1, recall, precise=0,0,0
self.log('test_f1', f1)
self.log('test_recall', recall)
self.log('test_precise', precise)
gc.collect()
def configure_optimizers(self):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
paras = list(
filter(lambda p: p[1].requires_grad, self.named_parameters()))
paras = [{
'params':
[p for n, p in paras if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay
}, {
'params': [p for n, p in paras if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}]
optimizer = torch.optim.AdamW(paras, lr=self.args.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, int(self.total_step * self.args.warmup),
self.total_step)
return [{
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'interval': 'step',
'frequency': 1
}
}]
def comput_metrix(self, logits, labels):
logits = torch.nn.functional.sigmoid(logits) # [b,s,s]
ones = torch.ones_like(logits)
zero = torch.zeros_like(logits)
logits = torch.where(logits < 0.5, zero, ones)
y_pred = logits.reshape(shape=(-1,))
y_true = labels.reshape(shape=(-1,))
corr = torch.eq(y_pred, y_true).float()
corr = torch.multiply(y_true, corr)
if torch.sum(y_true.float()) <= 0:
recall = 0
else:
recall = torch.sum(corr.float())/(torch.sum(y_true.float()))
if torch.sum(y_pred.float()) <= 0:
precise = 0
else:
precise = torch.sum(corr.float())/(torch.sum(y_pred.float()))
if recall+precise <= 0:
f1 = 0
else:
f1 = 2*recall*precise/(recall+precise)
return f1, recall, precise, torch.sum(corr.float()), torch.sum(y_true.float()), torch.sum(y_pred.float())
class TaskModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./log/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_epochs', default=1, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
save_last=True,
every_n_epochs=args.every_n_epochs,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename)
class OffsetMapping:
def __init__(self):
self._do_lower_case = True
@staticmethod
def stem(token):
if token[:2] == '##':
return token[2:]
else:
return token
@staticmethod
def _is_control(ch):
return unicodedata.category(ch) in ('Cc', 'Cf')
@staticmethod
def _is_special(ch):
return bool(ch) and (ch[0] == '[') and (ch[-1] == ']')
def rematch(self, text, tokens):
if self._do_lower_case:
text = text.lower()
normalized_text, char_mapping = '', []
for i, ch in enumerate(text):
if self._do_lower_case:
ch = unicodedata.normalize('NFD', ch)
ch = ''.join([c for c in ch if unicodedata.category(c) != 'Mn'])
ch = ''.join([
c for c in ch
if not (ord(c) == 0 or ord(c) == 0xfffd or self._is_control(c))
])
normalized_text += ch
char_mapping.extend([i] * len(ch))
text, token_mapping, offset = normalized_text, [], 0
for token in tokens:
if self._is_special(token):
token_mapping.append([offset+oi for oi in range(len(token))])
offset+=1
else:
token = self.stem(token)
start = text[offset:].index(token) + offset
end = start + len(token)
token_mapping.append(char_mapping[start:end])
offset = end
return token_mapping
class FastExtractModel:
def __init__(self, tokenizer, args):
self.tokenizer = tokenizer
self.args = args
self.fast_ex_mode = True if args.fast_ex_mode else False
self.data_encode = UniEXDataEncode(tokenizer, args)
self.offset_mapping_model = OffsetMapping()
def extract_index(self, span_logits, sample_length, split_value=0.5):
result = []
for i in range(sample_length):
for j in range(i, sample_length):
if span_logits[i, j] > split_value:
result.append((i, j, span_logits[i, j]))
return result
def extract_entity(self, text, entity_idx, text_start_id, offset_mapping):
start_split=offset_mapping[entity_idx[0]-text_start_id] if entity_idx[0]-text_start_id<len(offset_mapping) and entity_idx[0]-text_start_id>=0 else []
end_split=offset_mapping[entity_idx[1]-text_start_id] if entity_idx[1]-text_start_id<len(offset_mapping) and entity_idx[1]-text_start_id>=0 else []
if start_split!=[] and end_split!=[]:
entity = text[start_split[0]:end_split[-1]+1]
return entity,start_split[0],end_split[-1]
else:
return '',0,0
def extract(self, batch_data, model):
batch = [self.data_encode.encode(
sample,is_predict=True) for sample in batch_data]
batch = self.data_encode.collate_fn(batch)
new_batch = {}
for k, v in batch.items():
if k not in ['span_labels_mask']:
new_batch[k]=v.cuda()
task_type_list=[item['task_type'] for item in batch_data]
span_index_logits, span_type_logits, span_index_list, token_index = model(**new_batch,fast_ex_mode=self.fast_ex_mode,task_type_list=task_type_list,threshold=self.args.threshold_index)
token_index=token_index.cpu().detach().numpy()
span_type_logits=span_type_logits.cpu().detach().numpy()
query_len = 1
for i, item in enumerate(batch_data):
if 'tokens' in batch_data[i].keys():
del batch_data[i]['tokens']
if isinstance(item['choice'][0], list):
relation_type_list, entity_type_list, head_type, tail_type, headtail2relation = self.data_encode.process_relation_choice(item['choice'])
else:
entity_type_list = item['choice']
relation_type_list = []
token_index2index={v:idx for idx,v in enumerate(token_index[i])}
tokens = self.tokenizer.tokenize(item['text'])
offset_mapping = self.offset_mapping_model.rematch(item['text'],tokens)
sample_length = min(query_len + len(tokens), self.args.max_length - 1)
encode_dict = self.tokenizer(item['tokens'],
is_split_into_words=True,
max_length=self.args.max_length,
truncation=True,
add_special_tokens=False)
word_ids = encode_dict.word_ids()
if item['task_type'] in ['实体识别'] and isinstance(item['choice'][0], str):
"""
实体抽取解码过程如下:
1、抽取实体的位置,span_index_list
2、遍历span_index_list,识别每个span的类别
"""
entity_logits = span_type_logits[i]
entity_list = []
for entity_idx in span_index_list[i].cpu().detach().numpy():
entity_start = token_index2index[entity_idx[0]]
entity_end = token_index2index[entity_idx[1]]
entity_type_idx = np.argmax(
entity_logits[entity_start, entity_end])
entity_type_score = entity_logits[entity_start, entity_end, entity_type_idx]
if entity_type_score>self.args.threshold_entity*0:
entity_type = entity_type_list[entity_type_idx]
entity,start_idx,end_idx = self.extract_entity(item['text'],[entity_idx[0], entity_idx[1]],query_len,offset_mapping)
entity = {
'entity_text': entity,
'entity_type': entity_type,
'type_score': float(entity_type_score),
'entity_index': [[start_idx,end_idx]]
}
if entity not in entity_list:
entity_list.append(entity)
batch_data[i]['entity_list'] = entity_list
if item['task_type'] in ['分类任务'] and isinstance(item['choice'][0], str):
entity_logits = span_type_logits[i]
entity_list = []
for entity_idx in span_index_list[i].cpu().detach().numpy():
entity_start = token_index2index[entity_idx[0]]
entity_end = token_index2index[entity_idx[1]]
entity_type_idx = np.argmax(
entity_logits[entity_start, entity_end])
entity_type_score = entity_logits[entity_start, entity_end, entity_type_idx]
if entity_type_score>self.args.threshold_entity*0:
entity_type = entity_type_list[entity_type_idx]
entity,start_idx,end_idx = self.extract_entity(item['text'],[entity_idx[0], entity_idx[1]],query_len,offset_mapping)
entity = {
'entity_text': entity,
'entity_type': entity_type,
'type_score': float(entity_type_score),
'entity_index': [[start_idx,end_idx]]
}
if entity not in entity_list:
entity_list.append(entity)
batch_data[i]['entity_list'] = entity_list
elif item['task_type'] in ['关系抽取','指代消解']:
"""
实体抽取解码过程如下:
1、抽取实体的位置,span_index_list
2、遍历span_index_list,识别每个span的类别,并确定是head还是tail的实体。得到entity_idx_type_list_head,entity_idx_type_list_tail
3、遍历head和tail,判断一对<head,tail>是否构成一对关系。
"""
assert isinstance(item['choice'][0], list)
relation_type_list, entity_type_list, head_type, tail_type, headtail2relation = self.data_encode.process_relation_choice(item['choice'])
entity_logits = span_type_logits[i][:, :, :len(entity_type_list)] # 3
rel_logits = span_type_logits[i][:, :, len(entity_type_list):len(entity_type_list)+len(relation_type_list)] # 2
entity_idx_list = span_index_list[i].cpu().detach().numpy()
if entity_idx_list.shape[0]>sample_length/2:
batch_data[i]['entity_list'] = []
batch_data[i]['spo_list'] = []
else:
entity_list = []
entity_idx_type_list_head = [] # head
entity_idx_type_list_tail = [] #尾
for entity_idx in entity_idx_list:
entity_start = token_index2index[entity_idx[0]]
entity_end = token_index2index[entity_idx[1]]
entity_type_idx = np.argmax(
entity_logits[entity_start, entity_end])
entity_type_score = entity_logits[entity_start, entity_end,entity_type_idx]
entity_type = entity_type_list[entity_type_idx]
if entity_type_score>self.args.threshold_entity*0:
if entity_type in head_type:
entity_idx_type_list_head.append((entity_idx[0], entity_idx[1], entity_type, entity_type_score))
if entity_type in tail_type:
entity_idx_type_list_tail.append((entity_idx[0], entity_idx[1], entity_type, entity_type_score))
spo_list = []
entity_list = []
for entity_head in entity_idx_type_list_head:
for entity_tail in entity_idx_type_list_tail:
subject_type = entity_head[2]
object_type = entity_tail[2]
if subject_type + '|' + object_type not in headtail2relation.keys():
continue
so_rel=headtail2relation[subject_type + '|' + object_type]
so_rel_idx=[relation_type_list.index(r) for r in so_rel]
predicate=None
if len(so_rel_idx)>=1:
so_rel_logits = rel_logits[:, :, so_rel_idx]
hh = np.argmax(
so_rel_logits[token_index2index[entity_head[0]], token_index2index[entity_tail[0]]])
tt = np.argmax(
so_rel_logits[token_index2index[entity_head[1]], token_index2index[entity_tail[1]]])
hh_score=so_rel_logits[token_index2index[entity_head[0]], token_index2index[entity_tail[0]], hh]
tt_score=so_rel_logits[token_index2index[entity_head[1]], token_index2index[entity_tail[1]], tt]
if hh_score>tt_score:
idx=hh
ht_score = hh_score
else:
idx=tt
ht_score = tt_score
if ht_score>self.args.threshold_relation:
predicate = so_rel[idx]
predicate_score = ht_score
entity_subject,start_idx,end_idx = self.extract_entity(item['text'], [entity_head[0], entity_head[1]],query_len,offset_mapping)
subject_dict = {
'entity_text': entity_subject,
'entity_type': subject_type,
'score': float(entity_head[3]),
'entity_index': [[start_idx,end_idx]]}
entity_object,start_idx,end_idx = self.extract_entity(item['text'], [entity_tail[0], entity_tail[1]],query_len,offset_mapping)
object_dict = {
'entity_text': entity_object,
'entity_type': object_type,
'score': float(entity_tail[3]),
'entity_index': [[start_idx,end_idx]]}
entity_list.append(subject_dict)
entity_list.append(object_dict)
if predicate != None:
spo = {
'predicate': predicate,
'score': predicate_score,
'subject': subject_dict,
'object': object_dict,
}
spo_list.append(spo)
batch_data[i]['entity_list'] = entity_list
batch_data[i]['spo_list'] = spo_list
elif item['task_type'] in ['事件抽取'] and isinstance(item['choice'][0], dict):
"""
实体抽取解码过程如下:
1、抽取实体的位置,span_index_list
2、遍历span_index_list,同时识别该 span 的 event_type 和 entity_type 。取event_type_score+entity_type_score对应的类别
3、遍历head和tail,判断一对<head,tail>是否构成一对关系。
"""
event_type_list, entity_type_list, args2event = self.data_encode.process_event_choice(item['choice'])
event_args_type_list=[]
for at,et_list in args2event.items():
for et in et_list:
if (et,at) not in event_args_type_list:
event_args_type_list.append((et,at))
entity_logits = span_type_logits[i][:, :, :len(entity_type_list)]
event_logits = span_type_logits[i][:, :, len(entity_type_list): len(entity_type_list)+len(event_type_list)]
trigger_args_logits = span_type_logits[i][:,:,-1]
entity_logits = np.tile(entity_logits[:, :, np.newaxis, :],[1,1,len(event_type_list),1])
event_logits = np.tile(event_logits[:, :, :, np.newaxis],[1,1,1,len(entity_type_list)])
event_entity_logits = (event_logits+entity_logits)/2
seq_len,seq_len,etl,atl=event_entity_logits.shape
for ei,et in enumerate(event_type_list):
for ai,at in enumerate(entity_type_list):
if (et,at) not in event_args_type_list:
event_entity_logits[:,:,ei,ai]=np.zeros((seq_len,seq_len))
entity_idx_list = span_index_list[i].cpu().detach().numpy()
pred_event_type_list = []
args_list=[]
trigger_list=[]
for entity_idx in entity_idx_list:
entity_start = token_index2index[entity_idx[0]]
entity_end = token_index2index[entity_idx[1]]
event_entity_type_idx = np.unravel_index(np.argmax(event_entity_logits[entity_start, entity_end], axis=None), event_entity_logits[entity_start, entity_end].shape)
entity_type_score = entity_logits[entity_start, entity_end,event_entity_type_idx[0],event_entity_type_idx[1]]
entity_type = entity_type_list[event_entity_type_idx[1]]
event_type = event_type_list[event_entity_type_idx[0]]
entity,start_idx,end_idx = self.extract_entity(item['text'],[entity_idx[0], entity_idx[1]],query_len,offset_mapping)
if entity != '':
entity = {
'entity_text': entity,
'entity_type': entity_type,
'entity_score': float(entity_type_score),
'entity_index': [[start_idx,end_idx]]
}
event={}
event['event_type']=event_type
event['args']= [entity]
if event_type not in pred_event_type_list:
pred_event_type_list.append(event_type)
if entity_type == '触发词':
trigger_list.append((event,entity_start, entity_end))
else:
args_list.append((event,entity_start, entity_end))
if len(trigger_list)+len(args_list)>sample_length/4:
batch_data[i]['event_list'] = []
continue
event_list=[]
for event_type in pred_event_type_list:
tmp_e_list=[]
trigger_idx_list=[]
for e in trigger_list:
if e[0]['event_type']==event_type:
tmp={}
tmp['event_type']=event_type
tmp['args']=e[0]['args']
tmp_e_list.append(tmp)
trigger_idx_list.append(e)
if tmp_e_list==[]:
tmp={}
tmp['event_type']=event_type
tmp['args']=[]
tmp_e_list.append(tmp)
for e in args_list:
if e[0]['event_type']==event_type:
if trigger_idx_list==[]:
tmp_e_list[0]['args'].extend(e[0]['args'])
else:
scores=[]
for t in trigger_idx_list:
score=trigger_args_logits[e[1],t[1]]+trigger_args_logits[e[2],t[2]]
scores.append(score)
et=scores.index(max(scores))
tmp_e_list[et]['args'].extend(e[0]['args'])
event_list.extend(tmp_e_list)
batch_data[i]['event_list'] = event_list
return batch_data
class ExtractModel:
def __init__(self, tokenizer, args):
self.tokenizer = tokenizer
self.args = args
self.fast_ex_mode = True if args.fast_ex_mode else False
self.data_encode= UniEXDataEncode(tokenizer, args)
self.offset_mapping_model = OffsetMapping()
def extract_index(self, span_logits, sample_length, split_value=0.5):
result = []
for i in range(span_logits.shape[0]):
for j in range(i, span_logits.shape[1]):
c = np.argmax(span_logits[i, j])
# for c in range(span_logits.shape[2]):
if span_logits[i, j, c] > split_value:
result.append((i, j, c, span_logits[i, j, c]))
return result
def extract_entity(self, text,entity_idx,text_start_id,offset_mapping):
start_split=offset_mapping[entity_idx[0]-text_start_id] if entity_idx[0]-text_start_id<len(offset_mapping) and entity_idx[0]-text_start_id>=0 else []
end_split=offset_mapping[entity_idx[1]-text_start_id] if entity_idx[1]-text_start_id<len(offset_mapping) and entity_idx[1]-text_start_id>=0 else []
if start_split!=[] and end_split!=[]:
entity = text[start_split[0]:end_split[-1]+1]
return entity,start_split[0],end_split[-1]
else:
return '',0,0
def extract(self, batch_data, model):
batch = [self.data_encode.encode(
sample,is_predict=True) for sample in batch_data]
batch = self.data_encode.collate_fn(batch)
new_batch = {}
for k, v in batch.items():
if k not in ['span_labels','span_labels_mask']:
new_batch[k]=v.to(model.device)
span_logits = model(**new_batch, fast_ex_mode=self.fast_ex_mode)
span_logits = span_logits.cpu().detach().numpy()
span_logits = span_logits[:,:,:,1:]
query_len = 1
for i, item in enumerate(batch_data):
if 'tokens' in batch_data[i].keys():
del batch_data[i]['tokens']
if isinstance(item['choice'][0], list):
relation_type_list, entity_type_list, head_type, tail_type, headtail2relation = self.data_encode.process_relation_choice(item['choice'])
else:
entity_type_list = item['choice']
relation_type_list = []
tokens = self.tokenizer.tokenize(item['text'])
offset_mapping = self.offset_mapping_model.rematch(item['text'],tokens)
sample_length = min(query_len + len(tokens), self.args.max_length - 1)
if item['task_type'] in ['实体识别'] and isinstance(item['choice'][0], str):
entity_idx_type_list = self.extract_index(
span_logits[i], sample_length)
entity_list = []
for entity_idx_type in entity_idx_type_list:
entity_start, entity_end, entity_type, score = entity_idx_type
entity_type = entity_type_list[entity_type]
entity,start_idx,end_idx = self.extract_entity(item['text'],[entity_start, entity_end],query_len,offset_mapping)
if entity != '':
entity = {
'entity_text': entity,
'entity_type': entity_type,
'score': float(score),
'entity_index': [[start_idx,end_idx]]
}
if entity not in entity_list:
entity_list.append(entity)
batch_data[i]['entity_list'] = entity_list
elif item['task_type'] in ['抽取式阅读理解']:
entity_list = []
for c in range(len(item['choice'])):
logits = span_logits[i]
max_index = np.unravel_index(
np.argmax(logits, axis=None), logits.shape)
if logits[max_index] < self.args.threshold_entity:
entity = {
'entity_name': '',
'entity_type': item['choice'][c],
'score': float(logits[max_index]),
'entity_idx': [[]]
}
if entity not in entity_list:
entity_list.append(entity)
else:
entity,start_idx,end_idx = self.extract_entity(item['text'],max_index, query_len, offset_mapping)
entity = {
'entity_name': entity,
'entity_type': item['choice'][c],
'score': float(logits[max_index]),
'entity_idx': [[start_idx,end_idx]]
}
if entity not in entity_list:
entity_list.append(entity)
batch_data[i]['entity_list'] = entity_list
elif item['task_type'] in ['关系抽取','指代消解'] and isinstance(item['choice'][0], list):
assert isinstance(item['choice'][0], list)
relation_type_list, entity_type_list, head_type, tail_type, headtail2relation = self.data_encode.process_relation_choice(item['choice'])
head_type_idx=[entity_type_list.index(et) for et in head_type]
tail_type_idx=[entity_type_list.index(et) for et in tail_type]
head_logits = span_logits[i][:, :, head_type_idx]
tail_logits = span_logits[i][:, :, tail_type_idx]
rel_logits = span_logits[i][:, :, len(entity_type_list):len(entity_type_list)+len(relation_type_list)]
entity_idx_type_list_head = self.extract_index(
head_logits, sample_length, split_value=self.args.threshold_entity)
entity_idx_type_list_tail = self.extract_index(
tail_logits, sample_length, split_value=self.args.threshold_entity)
if len(entity_idx_type_list_head)+len(entity_idx_type_list_tail)>sample_length/2:
batch_data[i]['entity_list'] = []
batch_data[i]['spo_list'] = []
else:
spo_list = []
entity_list = []
for entity_head in entity_idx_type_list_head:
for entity_tail in entity_idx_type_list_tail:
subject_type = head_type[entity_head[2]]
object_type = tail_type[entity_tail[2]]
entity_subject,start_idx,end_idx = self.extract_entity(item['text'], [entity_head[0], entity_head[1]], query_len, offset_mapping)
subject_dict = {
'entity_text': entity_subject,
'entity_type': subject_type,
'score': float(entity_head[3]),
'entity_index': [[start_idx,end_idx]]}
entity_list.append(subject_dict)
entity_object,start_idx,end_idx = self.extract_entity(item['text'], [entity_tail[0], entity_tail[1]], query_len, offset_mapping)
object_dict = {
'entity_text': entity_object,
'entity_type': object_type,
'score': float(entity_tail[3]),
'entity_index': [[start_idx,end_idx]]
}
entity_list.append(object_dict)
if subject_type + '|' + object_type not in headtail2relation.keys():
continue
so_rel=headtail2relation[subject_type + '|' + object_type]
so_rel_idx=[relation_type_list.index(r) for r in so_rel]
predicate=None
if len(so_rel_idx)>=1:
so_rel_logits = rel_logits[:, :, so_rel_idx]
hh = np.argmax(
so_rel_logits[entity_head[0], entity_tail[0]])
tt = np.argmax(
so_rel_logits[entity_head[1], entity_tail[1]])
hh_score=so_rel_logits[entity_head[0], entity_tail[0], hh]+so_rel_logits[entity_head[1], entity_tail[1], hh]
tt_score=so_rel_logits[entity_head[0], entity_tail[0], tt]+so_rel_logits[entity_head[1], entity_tail[1], tt]
idx=hh if hh_score>tt_score else tt
ht_score = so_rel_logits[entity_head[0], entity_tail[0], idx]+so_rel_logits[entity_head[1], entity_tail[1], idx]
if ht_score/2>self.args.threshold_relation:
predicate=so_rel[idx]
predicate_score=ht_score/2
if predicate != None:
if entity_subject != '' and entity_object != '':
spo = {
'predicate': predicate,
'score': float(predicate_score),
'subject': subject_dict,
'object': object_dict,
}
spo_list.append(spo)
batch_data[i]['entity_list'] = entity_list
batch_data[i]['spo_list'] = spo_list
elif item['task_type'] in ['事件抽取'] and isinstance(item['choice'][0], dict):
event_type_list, entity_type_list, args2event = self.data_encode.process_event_choice(item['choice'])
event_args_type_list=[]
for at,et_list in args2event.items():
for et in et_list:
if (et,at) not in event_args_type_list:
event_args_type_list.append((et,at))
entity_logits = span_logits[i][:, :, :len(entity_type_list)]
event_logits = span_logits[i][:, :, len(entity_type_list):len(entity_type_list)+len(event_type_list)]
trigger_args_logits = span_logits[i][:,:,-1]
entity_logits = np.tile(entity_logits[:, :, np.newaxis, :],[1,1,len(event_type_list),1])
event_logits = np.tile(event_logits[:, :, :, np.newaxis],[1,1,1,len(entity_type_list)])
event_entity_logits = (event_logits+entity_logits)/2
seq_len,seq_len,etl,atl=event_entity_logits.shape
for ei,et in enumerate(event_type_list):
for ai,at in enumerate(entity_type_list):
if (et,at) not in event_args_type_list:
event_entity_logits[:,:,ei,ai]=np.zeros((seq_len,seq_len))
pred_event_type_list = []
args_list=[]
trigger_list=[]
for sidx in range(event_entity_logits.shape[0]):
for eidx in range(sidx, event_entity_logits.shape[1]):
event_entity_type_idx = np.unravel_index(np.argmax(event_entity_logits[sidx, eidx], axis=None), event_entity_logits[sidx, eidx].shape)
entity_type_score = entity_logits[sidx, eidx,event_entity_type_idx[0],event_entity_type_idx[1]]
event_type_score = event_logits[sidx, eidx,event_entity_type_idx[0],event_entity_type_idx[1]]
entity_type = entity_type_list[event_entity_type_idx[1]]
event_type = event_type_list[event_entity_type_idx[0]]
if entity_type_score+event_type_score>self.args.threshold_entity+self.args.threshold_event:
entity,start_idx,end_idx = self.extract_entity(item['text'],[sidx, eidx],query_len,offset_mapping)
if entity !='':
entity = {
'entity_text': entity,
'entity_type': entity_type,
'entity_score':float(entity_type_score),
'entity_index': [[start_idx, end_idx]]
}
event={}
event['event_type']=event_type
event['args']= [entity]
if event_type not in pred_event_type_list:
pred_event_type_list.append(event_type)
if entity_type == '触发词':
trigger_list.append((event ,sidx, eidx))
else:
args_list.append((event, sidx, eidx))
if len(trigger_list)+len(args_list)>sample_length/4:
batch_data[i]['event_list'] = []
continue
event_list=[]
for event_type in pred_event_type_list:
tmp_e_list=[]
trigger_idx_list=[]
for e in trigger_list:
if e[0]['event_type']==event_type:
tmp={}
tmp['event_type']=event_type
tmp['args']=e[0]['args']
tmp_e_list.append(tmp)
trigger_idx_list.append((e[1],e[2]))
if tmp_e_list==[]:
tmp={}
tmp['event_type']=event_type
tmp['args']=[]
tmp_e_list.append(tmp)
for e in args_list:
if e[0]['event_type']==event_type:
if trigger_idx_list==[]:
tmp_e_list[0]['args'].extend(e[0]['args'])
else:
scores=[]
for t in trigger_idx_list:
score=trigger_args_logits[e[1],t[0]]+trigger_args_logits[e[2],t[1]]
scores.append(score)
et=scores.index(max(scores))
tmp_e_list[et]['args'].extend(e[0]['args'])
event_list.extend(tmp_e_list)
batch_data[i]['event_list'] = event_list
return batch_data
class ComputAcc:
def __init__(self, args):
self.args=args
added_token = ['[unused'+str(i+1)+']' for i in range(99)]
self.tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_path, is_split_into_words=True, add_prefix_space=True, additional_special_tokens=added_token)
if args.fast_ex_mode:
self.em = FastExtractModel(self.tokenizer, args)
else:
self.em = ExtractModel(self.tokenizer, args)
def predict(self, test_data, model):
test_data_ori=copy.deepcopy(test_data)
result = []
start = 0
while start < len(test_data_ori):
batch_data = test_data_ori[start:start+self.args.batchsize]
start += self.args.batchsize
batch_result = self.em.extract(
batch_data, model)
result.extend(batch_result)
if isinstance(test_data[0]['choice'][0],list):
f1, recall, precise = get_rel_f1(test_data, result)
elif isinstance(test_data[0]['choice'][0],dict):
f1, recall, precise = get_event_f1(test_data, result)
else:
f1, recall, precise = get_entity_f1(test_data, result)
del test_data_ori, result
gc.collect()
return f1, recall, precise
class UniEXPipelines:
@staticmethod
def pipelines_args(parent_args):
total_parser = parent_args.add_argument_group("piplines args")
total_parser.add_argument(
'--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_path',
default='./predict.json', type=str)
total_parser.add_argument('--load_checkpoints_path',
default='', type=str)
total_parser.add_argument('--max_extract_entity_number',
default=1, type=float)
total_parser.add_argument('--train', action='store_true')
total_parser.add_argument('--fast_ex_mode', action='store_true')
total_parser.add_argument('--threshold_index',
default=0.5, type=float)
total_parser.add_argument('--threshold_entity',
default=0.5, type=float)
total_parser.add_argument('--threshold_event',
default=0.5, type=float)
total_parser.add_argument('--threshold_relation',
default=0.5, type=float)
total_parser = UniEXDataModel.add_data_specific_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
total_parser = UniEXLitModel.add_model_specific_args(total_parser)
total_parser = pl.Trainer.add_argparse_args(parent_args)
return parent_args
def __init__(self, args):
if args.load_checkpoints_path != '':
self.model = UniEXLitModel.load_from_checkpoint(
args.load_checkpoints_path, args=args)
print('导入模型成功:', args.load_checkpoints_path)
else:
self.model = UniEXLitModel(args)
self.args = args
self.checkpoint_callback = TaskModelCheckpoint(args).callbacks
self.logger = loggers.TensorBoardLogger(save_dir=args.default_root_dir)
self.trainer = pl.Trainer.from_argparse_args(args,
logger=self.logger,
callbacks=[self.checkpoint_callback])
added_token = ['[unused'+str(i+1)+']' for i in range(10)]
self.tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_path, is_split_into_words=True, add_prefix_space=True, additional_special_tokens=added_token)
if args.fast_ex_mode:
self.em = FastExtractModel(self.tokenizer, args)
else:
self.em = ExtractModel(self.tokenizer, args)
def fit(self, train_data, dev_data,test_data=[]):
data_model = UniEXDataModel(
train_data, dev_data, self.tokenizer, self.args)
self.model.num_data = len(train_data)
self.model.dev_data = dev_data
self.model.test_data = test_data
self.trainer.fit(self.model, data_model)
def predict(self, test_data, cuda=True):
result = []
start = 0
if cuda:
self.model = self.model.cuda()
self.model.eval()
while start < len(test_data):
batch_data = test_data[start:start+self.args.batchsize]
start += self.args.batchsize
batch_result = self.em.extract(
batch_data, self.model.model)
result.extend(batch_result)
return result
def load_data(data_path):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
samples = [json.loads(line) for line in tqdm(lines)]
return samples
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--data_dir', default='./data', type=str)
total_parser.add_argument('--train_data', default='train.json', type=str)
total_parser.add_argument('--valid_data', default='dev.json', type=str)
total_parser.add_argument('--test_data', default='test.json', type=str)
total_parser = UniEXPipelines.pipelines_args(total_parser)
args = total_parser.parse_args()
train_data = load_data(os.path.join(args.data_dir, args.train_data))
dev_data = load_data(os.path.join(args.data_dir, args.valid_data))
test_data = load_data(os.path.join(args.data_dir, args.test_data))
# train_data=train_data[:10]
test_data=test_data[:100]
dev_data=dev_data[:10]
test_data_ori = copy.deepcopy(test_data)
model = UniEXPipelines(args)
if args.train:
model.fit(train_data, dev_data,test_data)
start_time=time.time()
pred_data = model.predict(test_data)
consum=time.time()-start_time
print('总共耗费:',consum)
print('sent/s:',len(test_data)/consum)
for line in pred_data[:10]:
print(line)
if isinstance(test_data_ori[0]['choice'][0],list):
f1, recall, precise = get_rel_f1(test_data_ori, pred_data)
print('rel_f1:',f1)
print('rel_recall:',recall)
print('rel_precise:',precise)
elif isinstance(test_data_ori[0]['choice'][0],dict):
f1, recall, precise = get_event_f1(test_data_ori, pred_data)
print('event_f1:',f1)
print('event_recall:',recall)
print('event_precise:',precise)
f1, recall, precise = get_entity_f1(test_data_ori, pred_data)
print('f1:',f1)
print('recall:',recall)
print('precise:',precise)
if __name__ == "__main__":
main()
| 96,133 | 46.995007 | 191 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/GAVAE/gans_model.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import numpy as np
class MyDataset(Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
self.len = self.x.size(0)
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.len
class MyDataset_new(Dataset):
def __init__(self, x, y, s):
self.x = x
self.y = y
self.s = s
self.len = self.x.size(0)
def __getitem__(self, index):
return self.x[index], self.y[index], self.s[index]
def __len__(self):
return self.len
class CLS_Net(torch.nn.Module):
def __init__(self, cls_num, z_dim, cls_batch_size):
super(CLS_Net, self).__init__()
mini_dim = 256 #256
out_input_num = mini_dim
base_dim = 64 #256 #64
self.cls_batch_size = cls_batch_size
self.jie = 1
self.fc1 = nn.Linear(z_dim, mini_dim)
self.fc1.weight.data.normal_(0, 0.1)
self.fc2 = nn.Linear(out_input_num, base_dim)
self.fc2.weight.data.normal_(0, 0.1)
self.out = nn.Linear(base_dim, cls_num)
self.out.weight.data.normal_(0, 0.1)
def self_dis(self, a):
max_dim = self.cls_batch_size
jie = self.jie
all_tag = False
for j in range(a.shape[0]):
col_tag = False
for i in range(a.shape[0]):
tmp = F.pairwise_distance(a[j,:], a[i,:] , p = jie).view(-1,1)
if col_tag == False:
col_dis = tmp
col_tag = True
else:
col_dis = torch.cat((col_dis, tmp), dim = 0)
if all_tag == False:
all_dis = col_dis
all_tag = True
else:
all_dis = torch.cat((all_dis, col_dis), dim = 1)
'''
print(all_dis.shape)
if all_dis.shape[1] < max_dim:
all_dis = torch.cat((all_dis, all_dis[:,:(max_dim - all_dis.shape[1])]), dim = 1)
print(all_dis.shape)
'''
return all_dis
def forward(self, x):
x = self.fc1(x)
x1 = F.relu(x)
x2 = self.fc2(x1)
x2 = torch.nn.Dropout(0.1)(x2) #0.3
x2 = F.relu(x2)
y = self.out(x2)
return y, x1
class Gen_Net(torch.nn.Module):
def __init__(self,input_x2_dim, output_dim):
super(Gen_Net, self).__init__()
self.x2_input = nn.Linear(input_x2_dim , 60)
self.x2_input.weight.data.normal_(0, 0.1)
self.fc1 = nn.Linear(60, 128)
self.fc1.weight.data.normal_(0, 0.1)
self.fc2 = nn.Linear(128, 256)
self.fc2.weight.data.normal_(0, 0.1)
self.fc3 = nn.Linear(256, 128)
self.fc3.weight.data.normal_(0, 0.1)
self.out = nn.Linear(128, output_dim)
self.out.weight.data.normal_(0, 0.1)
def forward(self,x2):
x2 = self.x2_input(x2)
x = x2
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
y = self.out(x)
return y
class gans_process():
def __init__(self, config):
#base pare
self.device = config.device
self.cls_num = config.cls_num
self.x2_dim = config.noise_dim
self.z_dim = config.z_dim
self.cls_lr = config.cls_lr
self.gen_lr = config.gen_lr
self.cls_epoches = config.cls_epoches
self.gen_epoches = config.gen_epoches
self.mse_weight = 1.0
self.cls_batch_size = config.cls_batch_size
self.gen_batch_size = config.gen_batch_size
self.eval_batch_size = config.cls_batch_size
self.gen_batch_size = self.cls_batch_size
#optimer and net
self.cls_net = CLS_Net(self.cls_num, self.z_dim, self.cls_batch_size).to(self.device)
self.cls_optimizer = torch.optim.SGD(self.cls_net.parameters(),
lr = self.cls_lr , weight_decay= 1e-5)
# gen net
self.gen_net = Gen_Net(self.x2_dim, self.z_dim).to(self.device)
self.gen_optimizer = torch.optim.SGD(self.gen_net.parameters(),
lr = self.gen_lr , weight_decay= 0.01)
#base loss
self.loss_func = torch.nn.CrossEntropyLoss()
self.loss_mse = torch.nn.MSELoss()
def freeze_cls(self):
for param in self.cls_net.parameters():
param.requires_grad = False
def unfreeze_cls(self):
for param in self.cls_net.parameters():
param.requires_grad = True
def freeze_gen(self):
for param in self.gen_net.parameters():
param.requires_grad = False
def unfreeze_gen(self):
for param in self.gen_net.parameters():
param.requires_grad = True
def labels2genx(self, sample_num):
x = torch.rand(sample_num, self.x2_dim)
return x.to(self.device)
def pad_batch(self, x):
if int(x.shape[0] % self.cls_batch_size) == 0:
return x
pad_len = self.cls_batch_size - ( x.shape[0] % self.cls_batch_size)
x = torch.cat((x, x[:pad_len]), dim = 0)
return x
def ready_cls(self, sent_output,perm=None):
sample_num = len(sent_output)
#---------------make fake z---------------
sent_output = sent_output.to(self.device)
sent_noise = torch.tensor(self.gen_test(sample_num)).to(self.device)
#--------------handle datas---------------
x = torch.cat((sent_output, sent_noise), dim = 0 )
if perm is None:
perm = torch.randperm(len(x))
x = x[perm]
#add y - only one label per time
multi_label_num = 1
multi_output_y = torch.tensor([0]*sample_num).unsqueeze(1)
multi_noise_y = torch.zeros([sent_noise.size(0),1], dtype = torch.int)
multi_noise_y = multi_noise_y + multi_label_num
y = torch.cat((multi_output_y, multi_noise_y), dim = 0).to(self.device)
y = y[perm]
# x_train = x [:self.train_len]
# y_train = y [:self.train_len]
# x_test = x [self.train_len:]
# y_test = y [self.train_len:]
return x,y,None,None,perm
def ready_fake(self, sent_output, inputs_labels, inputs_indexs, label2id, perm = None):
#---------------make fake z---------------
sent_output = sent_output.to(self.device)
sent_noise = torch.tensor(self.gen_test(inputs_labels, inputs_indexs)).to(self.device)
#--------------handle datas---------------
x = sent_noise
y = torch.tensor(inputs_labels).unsqueeze(1)
if perm is None:
perm = torch.randperm(len(x))
x = x[perm]
y = y[perm]
return x,y,perm
def ready_gen(self, sent_output):
#, inputs_labels, inputs_indexs
sent_num = len(sent_output)
sent_output = sent_output.to(self.device)
x2 = self.labels2genx(sent_num)
y = torch.tensor([0]*sent_num).unsqueeze(1).to(self.device)
return x2, y, sent_output
def cls_train(self, x, y, if_oneHot = True):
#init
self.cls_net.train()
self.gen_net.eval()
self.unfreeze_cls()
self.freeze_gen()
x = x.to(self.device)
y = y.to(self.device)
#if oneHot
if if_oneHot:
y = torch.zeros(y.size(0), self.cls_num).to(self.device).scatter_(1, y.long(), 1)
#make dataset
mydataset = MyDataset(x, y)
train_loader = DataLoader(dataset=mydataset,
batch_size=self.cls_batch_size, shuffle=True)
#training
for epoch in range(self.cls_epoches):
losses = []
accuracy = []
for step, (batch_x, batch_y) in enumerate(train_loader):
self.cls_optimizer.zero_grad()
out, _ = self.cls_net(batch_x)
loss = self.loss_func(out, batch_y)
#One-side label smoothing -not used
#location 0 real, location 1 fake
batch_y = batch_y * torch.tensor([0.9, 1.0]).to(self.device)
loss.backward()
self.cls_optimizer.step()
#tqdm
_, predictions = out.max(1)
predictions = predictions.cpu().numpy().tolist()
_,real_y = batch_y.max(1)
real_y = real_y.cpu().numpy().tolist()
num_correct = np.sum([int(x==y) for x,y in zip(predictions, real_y)])
running_train_acc = float(num_correct) / float(batch_x.shape[0])
losses.append(loss)
accuracy.append(running_train_acc)
return self.cls_net
def cls_eval(self, x, y, if_oneHot = True):
#init
self.cls_net.eval()
x = x.to(self.device)
y = y.to(self.device)
#if oneHot
if if_oneHot:
y = torch.zeros(y.size(0), self.cls_num).to(self.device).scatter_(1, y.long(), 1)
#make dataset
mydataset = MyDataset(x, y)
train_loader = DataLoader(dataset=mydataset,
batch_size=self.eval_batch_size, shuffle=False)
losses = []
accuracy = []
#evaling
for step, (batch_x, batch_y) in enumerate(train_loader):
out,_ = self.cls_net(batch_x)
loss = self.loss_func(out, batch_y)
#tqdm
_, predictions = out.max(1)
predictions = predictions.cpu().numpy().tolist()
_,real_y = batch_y.max(1)
real_y = real_y.cpu().numpy().tolist()
num_correct = np.sum([int(x==y) for x,y in zip(predictions, real_y)])
running_train_acc = float(num_correct) / float(batch_x.shape[0])
accuracy.append(running_train_acc)
mean_acc = np.mean(accuracy)
return mean_acc
def cls_real_eval(self, x, y, if_oneHot = True):
#init
self.cls_net.eval()
x = x.to(self.device)
y = y.to(self.device)
#if oneHot
if if_oneHot:
y = torch.zeros(y.size(0), self.cls_num).to(self.device).scatter_(1, y.long(), 1)
#make dataset
mydataset = MyDataset(x, y)
train_loader = DataLoader(dataset=mydataset,
batch_size=self.eval_batch_size, shuffle=False)
rs = 0
alls = 0
#evaling
for step, (batch_x, batch_y) in enumerate(train_loader):
out, _ = self.cls_net(batch_x)
loss = self.loss_func(out, batch_y)
#tqdm
_, predictions = out.max(1)
predictions = predictions.cpu().numpy().tolist()
_,real_y = batch_y.max(1)
real_y = real_y.cpu().numpy().tolist()
right_num = np.sum([int( x==y and int(y) != int(self.cls_num-1) ) for x,y in zip(predictions, real_y)])
all_num = np.sum([int(int(y) != int(self.cls_num-1) ) for x,y in zip(predictions, real_y)])
rs = rs + right_num
alls = alls + all_num
return rs/alls
def cls_test(self, x, if_oneHot = True):
#init
self.cls_net.eval()
x = x.to(self.device)
y = torch.zeros([x.size(0),1], dtype = torch.float).to(self.device)
#if oneHot
if if_oneHot:
y = torch.zeros(y.size(0), self.cls_num).to(self.device).scatter_(1, y.long(), 1)
#make dataset
mydataset = MyDataset(x, y)
train_loader = DataLoader(dataset=mydataset,
batch_size=self.eval_batch_size, shuffle=False)
preds = []
#testing
for step, (batch_x, batch_y) in enumerate(train_loader):
out, _ = self.cls_net(batch_x)
loss = self.loss_func(out, batch_y)
#tqdm
_, predictions = out.max(1)
predictions = predictions.cpu().numpy().tolist()
preds.extend(predictions)
return preds
def gen_train(self, x2, y, s, times):
#init
self.cls_net.eval()
self.gen_net.train()
self.freeze_cls()
self.unfreeze_gen()
#y is gen + cls
y = torch.zeros(y.size(0), self.cls_num).to(self.device).scatter_(1, y.long(), 1)
#make dataset
mydataset = MyDataset_new(x2, y, s)
train_loader = DataLoader(dataset=mydataset,
batch_size=self.gen_batch_size, shuffle=True)
#training
for epoch in range(self.gen_epoches):
losses = []
accuracy = []
for step, (batch_x2, batch_y, batch_s) in enumerate(train_loader):
# no zero_grad = make batch_size
if step % 6 == 5: #23
self.gen_optimizer.zero_grad()
out = self.gen_net(batch_x2)
#fearture matching
out, hds = self.cls_net(out)
out2, hds2 = self.cls_net(batch_s.float())
loss = self.loss_mse(hds, hds2)
loss = loss * pow(0.9, times)
loss.backward()
self.gen_optimizer.step()
#tqdm
_, predictions = out.max(1)
predictions = predictions.cpu().numpy().tolist()
_, real_y = batch_y.max(1)
real_y = real_y.cpu().numpy().tolist()
num_correct = np.sum([int(x==y) for x,y in zip(predictions, real_y)])
running_train_acc = float(num_correct) / float(batch_x2.shape[0])
losses.append(loss)
accuracy.append(running_train_acc)
return self.gen_net
def gen_test(self, sample_num):
#init
self.gen_net.eval()
x2 = self.labels2genx(sample_num)
#x2: len(inputs_labels) * 80
y = torch.zeros([sample_num,1], dtype = torch.float)
y = torch.zeros(sample_num, self.z_dim).scatter_(1, y.long(), 1)
y = y.to(self.device)
s = torch.ones((sample_num, self.z_dim)).to(self.device)
#make dataset
mydataset = MyDataset_new(x2, y, s)
train_loader = DataLoader(dataset=mydataset,
batch_size=self.eval_batch_size, shuffle=False)
preds = []
#testing
for step, (batch_x2, batch_y, batch_s) in enumerate(train_loader):
out = self.gen_net(batch_x2)
loss = self.loss_mse(out.double(), batch_s.double())
predictions = out.cpu().detach().numpy().tolist()
preds.extend(predictions)
return preds
if __name__ == '__main__':
pass
| 14,922 | 29.769072 | 115 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/GAVAE/GAVAEModel.py
|
# -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : GAVAEModel.py
@Time : 2022/11/04 11:35
@Author : Liang Yuxin
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
import torch
from transformers.modeling_utils import PreTrainedModel
from transformers.configuration_utils import PretrainedConfig
from fengshen.models.DAVAE.DAVAEModel import DAVAEModel
from fengshen.models.GAVAE.gans_model import gans_process
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class GAVAEPretrainedModel(PreTrainedModel):
def _init_weights(self, module):
""" Initialize the weights """
pass # to bypass the not implement error
class GAVAEModel(GAVAEPretrainedModel):
config_class = PretrainedConfig
def __init__(self, config:PretrainedConfig) -> None:
super().__init__(config)
self.config =config
config.device = device
self.gan = gans_process(self.config)
self.vae_model = DAVAEModel(self.config)
def train_gan(self,encoder_tokenizer,decoder_tokenizer,input_texts):
self.vae_model.set_tokenizers(encoder_tokenizer,decoder_tokenizer)
n = len(input_texts)
inputs_latents = self.vae_model.latent_code_from_text_batch(input_texts)
well_trained_gan = False
while not well_trained_gan:
self.gan_training(inputs_latents)
latent = torch.tensor(self.gan.gen_test(n))
if not latent.isnan().any():
well_trained_gan = True
def generate(self,n):
latent_z = torch.tensor(self.gan.gen_test(n)).to(device)
text = self.vae_model.text_from_latent_code_batch(latent_z,prompt=None)
return text
def gan_training(self,inputs_latents):
for gt in range(self.config.gan_epoch):
x_train,y_train,x_test,y_test,perm = self.gan.ready_cls(inputs_latents)
# sent_output:latent_z inputs_labels:id of class label
self.gan.cls_train(x_train, y_train)
x2_gen, y_gen, s_gen = self.gan.ready_gen(inputs_latents)
# s_gen:sent_output
self.gan.gen_train(x2_gen, y_gen, s_gen, gt)
| 2,706 | 38.808824 | 96 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/zen2/tokenization.py
|
# coding=utf-8
# This file is derived from the code at
# https://github.com/huggingface/transformers/blob/master/transformers/tokenization_bert.py
#
# Original copyright notice:
#
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import six
import unicodedata
from io import open
from transformers import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt",
'IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese/resolve/main/vocab.txt',
'IDEA-CCNL/Erlangshen-ZEN2-668M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN2-668M-Chinese/resolve/main/vocab.txt',
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
}
VOCAB_NAME = 'vocab.txt'
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
# elif isinstance(text, unicode):
# return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab.get(token, self.vocab.get("[UNK]")))
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
logger.error(
"Couldn't reach server at '{}' to download vocabulary.".format(
vocab_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 19,799 | 41.950108 | 179 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/zen2/modeling.py
|
# coding: utf-8
# Copyright 2019 Sinovation Ventures AI Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is partially derived from the code at
# https://github.com/huggingface/transformers/tree/master/transformers
#
# Original copyright notice:
#
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ZEN2 model classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import logging
import math
import os
import sys
sys.path.append("/cognitive_comp/lujunyu/TMP/Fengshenbang-LM")
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from dataclasses import dataclass
from typing import Optional
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel
from transformers.utils import ModelOutput
from fengshen.models.zen2.configuration_zen2 import ZenConfig
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
'IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese/resolve/main/pytorch_model.bin',
'IDEA-CCNL/Erlangshen-ZEN2-668M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN2-668M-Chinese/resolve/main/pytorch_model.bin',
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
'IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese/resolve/main/config.json',
'IDEA-CCNL/Erlangshen-ZEN2-668M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN2-668M-Chinese/resolve/main/config.json',
}
BERT_CONFIG_NAME = 'bert_config.json'
TF_WEIGHTS_NAME = 'model.ckpt'
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of [`BertForPreTraining`].
Args:
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class MaskedLMOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked language modeling (MLM) loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
name_lists = re.split(r'_(\d+)', m_name)
else:
name_lists = [m_name]
if name_lists[0] == 'kernel' or name_lists[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif name_lists[0] == 'output_bias' or name_lists[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif name_lists[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif name_lists[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, name_lists[0])
except AttributeError:
print("Skipping {}".format("/".join(name)))
continue
if len(name_lists) >= 2:
num = int(name_lists[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
try:
# from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
from torch.nn import LayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertWordEmbeddings(nn.Module):
"""Construct the embeddings from ngram, position and token_type embeddings.
"""
def __init__(self, config):
super(BertWordEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.word_size, config.hidden_size, padding_idx=0)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class RelativeSinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1568):
"""
:param embedding_dim: 每个位置的dimension
:param padding_idx:
:param init_size:
"""
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
assert init_size % 2 == 0
weights = self.get_embedding(
init_size+1,
embedding_dim,
padding_idx,
)
self.register_buffer('weights', weights)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
def get_embedding(self, num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(-num_embeddings//2, num_embeddings//2, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
self.origin_shift = num_embeddings//2 + 1
return emb
def forward(self, input):
"""Input is expected to be of size [bsz x seqlen].
"""
bsz, _, _, seq_len = input.size()
max_pos = self.padding_idx + seq_len
if max_pos > self.origin_shift:
# recompute/expand embeddings if needed
weights = self.get_embedding(
max_pos*2,
self.embedding_dim,
self.padding_idx,
)
weights = weights.to(self._float_tensor)
del self.weights
self.origin_shift = weights.size(0)//2
self.register_buffer('weights', weights)
positions = torch.arange(-seq_len, seq_len).to(input.device).long() + self.origin_shift # 2*seq_len
embed = self.weights.index_select(0, positions.long()).detach()
return embed
class BertSelfAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = output_attentions
self.keep_multihead_output = keep_multihead_output
self.multihead_output = None
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.softmax = nn.Softmax(dim=-1)
self.position_embedding = RelativeSinusoidalPositionalEmbedding(self.attention_head_size, 0, 1200)
self.r_r_bias = nn.Parameter(
nn.init.xavier_normal_(torch.zeros(self.num_attention_heads, self.attention_head_size)))
self.r_w_bias = nn.Parameter(
nn.init.xavier_normal_(torch.zeros(self.num_attention_heads, self.attention_head_size)))
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
position_embedding = self.position_embedding(attention_mask)
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
rw_head_q = query_layer + self.r_r_bias[:, None]
AC = torch.einsum('bnqd,bnkd->bnqk', [rw_head_q.float(), key_layer.float()]) # b x n x l x d, n是head
D_ = torch.einsum('nd,ld->nl', self.r_w_bias.float(), position_embedding.float())[None, :,
None] # head x 2max_len, 每个head对位置的bias
B_ = torch.einsum('bnqd,ld->bnql', query_layer.float(),
position_embedding.float()) # bsz x head x max_len x 2max_len,每个query对每个shift的偏移
BD = B_ + D_ # bsz x head x max_len x 2max_len, 要转换为bsz x head x max_len x max_len
BD = self._shift(BD)
attention_scores = AC + BD
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = self.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs.type_as(value_layer), value_layer)
if self.keep_multihead_output:
self.multihead_output = context_layer
self.multihead_output.retain_grad()
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if self.output_attentions:
return attention_probs, context_layer
return context_layer
def _shift(self, BD):
"""
类似
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
转换为
0 1 2
-1 0 1
-2 -1 0
:param BD: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = BD.size()
zero_pad = BD.new_zeros(bsz, n_head, max_len, 1)
BD = torch.cat([BD, zero_pad], dim=-1).view(bsz, n_head, -1, max_len) # bsz x n_head x (2max_len+1) x max_len
BD = BD[:, :, :-1].view(bsz, n_head, max_len, -1) # bsz x n_head x 2max_len x max_len
BD = BD[:, :, :, max_len:]
return BD
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertAttention, self).__init__()
self.output_attentions = output_attentions
self.self = BertSelfAttention(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.output = BertSelfOutput(config)
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
def forward(self, input_tensor, attention_mask, head_mask=None):
self_output = self.self(input_tensor, attention_mask, head_mask)
if self.output_attentions:
attentions, self_output = self_output
attention_output = self.output(self_output, input_tensor)
if self.output_attentions:
return attentions, attention_output
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
# if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(BertLayer, self).__init__()
self.output_attentions = output_attentions
self.attention = BertAttention(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
if self.output_attentions:
attentions, attention_output = attention_output
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if self.output_attentions:
return attentions, layer_output
return layer_output
class ZenEncoder(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(ZenEncoder, self).__init__()
self.output_attentions = output_attentions
layer = BertLayer(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
self.word_layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_word_layers)])
self.num_hidden_word_layers = config.num_hidden_word_layers
def forward(self, hidden_states, ngram_hidden_states, ngram_position_matrix, attention_mask,
ngram_attention_mask,
output_all_encoded_layers=True, head_mask=None):
# Need to check what is the attention masking doing here
all_encoder_layers = []
all_attentions = []
num_hidden_ngram_layers = self.num_hidden_word_layers
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask, head_mask[i])
if i < num_hidden_ngram_layers:
ngram_hidden_states = self.word_layers[i](ngram_hidden_states, ngram_attention_mask, head_mask[i])
if self.output_attentions:
ngram_attentions, ngram_hidden_states = ngram_hidden_states
all_attentions.append(ngram_attentions)
if self.output_attentions:
attentions, hidden_states = hidden_states
all_attentions.append(attentions)
hidden_states += torch.bmm(ngram_position_matrix.float(), ngram_hidden_states.float())
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if self.output_attentions:
return all_attentions, all_encoder_layers
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
# if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class ZenOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(ZenOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class ZenOnlyNSPHead(nn.Module):
def __init__(self, config):
super(ZenOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class ZenPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(ZenPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class ZenPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = ZenConfig
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
class ZenModel(ZenPreTrainedModel):
"""ZEN model ("BERT-based Chinese (Z) text encoder Enhanced by N-gram representations").
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
`input_ngram_ids`: input_ids of ngrams.
`ngram_token_type_ids`: token_type_ids of ngrams.
`ngram_attention_mask`: attention_mask of ngrams.
`ngram_position_matrix`: position matrix of ngrams.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(ZenModel, self).__init__(config)
self.output_attentions = output_attentions
self.embeddings = BertEmbeddings(config)
self.word_embeddings = BertWordEmbeddings(config)
self.encoder = ZenEncoder(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.pooler = BertPooler(config)
self.init_weights()
def prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_multihead_outputs(self):
""" Gather all multi-head outputs.
Return: list (layers) of multihead module outputs with gradients
"""
return [layer.attention.self.multihead_output for layer in self.encoder.layer]
def forward(self, input_ids,
input_ngram_ids,
ngram_position_matrix,
token_type_ids=None,
ngram_token_type_ids=None,
attention_mask=None,
ngram_attention_mask=None,
output_all_encoded_layers=True,
head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
if ngram_attention_mask is None:
ngram_attention_mask = torch.ones_like(input_ngram_ids)
if ngram_token_type_ids is None:
ngram_token_type_ids = torch.zeros_like(input_ngram_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_ngram_attention_mask = ngram_attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
extended_ngram_attention_mask = extended_ngram_attention_mask.to(dtype=next(self.parameters()).dtype)
extended_ngram_attention_mask = (1.0 - extended_ngram_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand_as(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(
-1) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, token_type_ids)
ngram_embedding_output = self.word_embeddings(input_ngram_ids, ngram_token_type_ids)
encoded_layers = self.encoder(embedding_output,
ngram_embedding_output,
ngram_position_matrix,
extended_attention_mask,
extended_ngram_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
head_mask=head_mask)
if self.output_attentions:
all_attentions, encoded_layers = encoded_layers
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
if self.output_attentions:
return all_attentions, encoded_layers, pooled_output
return encoded_layers, pooled_output
class ZenForPreTraining(ZenPreTrainedModel):
"""ZEN model with pre-training heads.
This module comprises the ZEN model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
`input_ngram_ids`: input_ids of ngrams.
`ngram_token_type_ids`: token_type_ids of ngrams.
`ngram_attention_mask`: attention_mask of ngrams.
`ngram_position_matrix`: position matrix of ngrams.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(ZenForPreTraining, self).__init__(config)
self.output_attentions = output_attentions
self.bert = ZenModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = ZenPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.init_weights()
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None,
ngram_token_type_ids=None,
attention_mask=None,
ngram_attention_mask=None,
masked_lm_labels=None,
next_sentence_label=None, head_mask=None):
outputs = self.bert(input_ids,
input_ngram_ids,
ngram_position_matrix,
token_type_ids,
ngram_token_type_ids,
attention_mask,
ngram_attention_mask,
output_all_encoded_layers=False, head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, pooled_output = outputs
else:
sequence_output, pooled_output = outputs
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return BertForPreTrainingOutput(loss=total_loss,prediction_logits=prediction_scores)
elif self.output_attentions:
return all_attentions, prediction_scores, seq_relationship_score
return prediction_scores, seq_relationship_score
class ZenForMaskedLM(ZenPreTrainedModel):
"""ZEN model with the masked language modeling head.
This module comprises the ZEN model followed by the masked language modeling head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`head_mask`: an optional torch.LongTensor of shape [num_heads] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
`input_ngram_ids`: input_ids of ngrams.
`ngram_token_type_ids`: token_type_ids of ngrams.
`ngram_attention_mask`: attention_mask of ngrams.
`ngram_position_matrix`: position matrix of ngrams.
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(ZenForMaskedLM, self).__init__(config)
self.output_attentions = output_attentions
self.bert = ZenModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = ZenOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.init_weights()
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None, attention_mask=None, ngram_attention_mask=None, masked_lm_labels=None, head_mask=None):
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids, None, attention_mask, ngram_attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, _ = outputs
else:
sequence_output, _ = outputs
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return MaskedLMOutput(loss=masked_lm_loss,logits=prediction_scores)
elif self.output_attentions:
return all_attentions, prediction_scores
return MaskedLMOutput(loss=masked_lm_loss,logits=prediction_scores)
class ZenForNextSentencePrediction(ZenPreTrainedModel):
"""ZEN model with next sentence prediction head.
This module comprises the ZEN model followed by the next sentence classification head.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
`input_ngram_ids`: input_ids of ngrams.
`ngram_token_type_ids`: token_type_ids of ngrams.
`ngram_attention_mask`: attention_mask of ngrams.
`ngram_position_matrix`: position matrix of ngrams.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(ZenForNextSentencePrediction, self).__init__(config)
self.output_attentions = output_attentions
self.bert = ZenModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.cls = ZenOnlyNSPHead(config)
self.init_weights()
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None, attention_mask=None, next_sentence_label=None, head_mask=None):
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids, attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, _, pooled_output = outputs
else:
_, pooled_output = outputs
seq_relationship_score = self.cls(pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
elif self.output_attentions:
return all_attentions, seq_relationship_score
return seq_relationship_score
class ZenForSequenceClassification(ZenPreTrainedModel):
"""ZEN model for classification.
This module is composed of the ZEN model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
`input_ngram_ids`: input_ids of ngrams.
`ngram_token_type_ids`: token_type_ids of ngrams.
`ngram_attention_mask`: attention_mask of ngrams.
`ngram_position_matrix`: position matrix of ngrams.
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
"""
def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False):
super(ZenForSequenceClassification, self).__init__(config)
self.output_attentions = output_attentions
self.num_labels = config.num_labels
self.bert = ZenModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.init_weights()
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids,
attention_mask=attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, _, pooled_output = outputs
else:
_, pooled_output = outputs
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss, logits
elif self.output_attentions:
return all_attentions, logits
return loss, logits
@dataclass
class TokenClassifierOutput:
"""
Base class for outputs of token classification models.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
class ZenForTokenClassification(ZenPreTrainedModel):
"""ZEN model for token-level classification.
This module is composed of the ZEN model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
`input_ngram_ids`: input_ids of ngrams.
`ngram_token_type_ids`: token_type_ids of ngrams.
`ngram_attention_mask`: attention_mask of ngrams.
`ngram_position_matrix`: position matrix of ngrams.
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
"""
def __init__(self, config, num_labels=2, output_attentions=False, keep_multihead_output=False):
super(ZenForTokenClassification, self).__init__(config)
self.output_attentions = output_attentions
self.num_labels = config.num_labels
self.bert = ZenModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.init_weights()
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, valid_ids=None,
input_ngram_ids=None, ngram_position_matrix=None, head_mask=None, b_use_valid_filter=False):
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids,
attention_mask=attention_mask, output_all_encoded_layers=False, head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, _ = outputs
else:
sequence_output, _ = outputs
# if b_use_valid_filter:
# batch_size, max_len, feat_dim = sequence_output.shape
# valid_output = torch.zeros(batch_size, max_len, feat_dim, dtype=sequence_output.dtype,
# device=input_ids.device)
# for i in range(batch_size):
# temp = sequence_output[i][valid_ids[i] == 1]
# valid_output[i][:temp.size(0)] = temp
# else:
# valid_output = sequence_output
valid_output = sequence_output
sequence_output = self.dropout(valid_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=0)
# Only keep active parts of the loss
# attention_mask_label = None
# if attention_mask_label is not None:
if attention_mask is not None:
# active_loss = attention_mask_label.view(-1) == 1
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(loss, logits)
else:
return TokenClassifierOutput(loss, logits)
class ZenForQuestionAnswering(ZenPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model
`output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False
`keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.
This can be used to compute head importance metrics. Default: False
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.
It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(ZenForQuestionAnswering, self).__init__(config)
self.output_attentions = output_attentions
self.bert = ZenModel(config, output_attentions=output_attentions,
keep_multihead_output=keep_multihead_output)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.init_weights()
def forward(self, input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids=None, attention_mask=None, start_positions=None,
end_positions=None, head_mask=None):
outputs = self.bert(input_ids, input_ngram_ids, ngram_position_matrix, token_type_ids,
attention_mask=attention_mask,
output_all_encoded_layers=False,
head_mask=head_mask)
if self.output_attentions:
all_attentions, sequence_output, _ = outputs
else:
sequence_output, _ = outputs
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
elif self.output_attentions:
return all_attentions, start_logits, end_logits
return start_logits, end_logits
| 72,687 | 51.444444 | 187 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/zen2/ngram_utils.py
|
# coding: utf-8
# Copyright 2019 Sinovation Ventures AI Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""utils for ngram for ZEN2 model."""
import os
import logging
import math
import numpy as np
import torch
from transformers import cached_path
NGRAM_DICT_NAME = 'ngram.txt'
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese/resolve/main/ngram.txt',
'IDEA-CCNL/Erlangshen-ZEN2-668M-Chinese': 'https://huggingface.co/IDEA-CCNL/Erlangshen-ZEN2-668M-Chinese/resolve/main/ngram.txt',
}
class ZenNgramDict(object):
"""
Dict class to store the ngram
"""
def __init__(self, ngram_freq_path, tokenizer=None, max_ngram_in_seq=128):
"""Constructs ZenNgramDict
:param ngram_freq_path: ngrams with frequency
"""
if os.path.isdir(ngram_freq_path):
ngram_freq_path = os.path.join(ngram_freq_path, NGRAM_DICT_NAME)
self.ngram_freq_path = ngram_freq_path
self.max_ngram_in_seq = max_ngram_in_seq
self.max_ngram_len = 8
self.id_to_ngram_list = ["[pad]"]
self.ngram_to_id_dict = {"[pad]": 0}
self.ngram_to_freq_dict = {}
logger.info("loading ngram frequency file {}".format(ngram_freq_path))
with open(ngram_freq_path, "r", encoding="utf-8") as fin:
for i, line in enumerate(fin):
items = line.strip().split(",")
if len(items) != 2:
continue
ngram, freq = items
# self.ngram_to_freq_dict[ngram] = int(freq)
if tokenizer:
tokens = tuple(tokenizer.tokenize(ngram))
if len([token for token in tokens if "[UNK]" in token]) > 0:
tokens = ngram
else:
tokens = tuple(ngram.split(" "))
self.id_to_ngram_list.append(tokens)
self.ngram_to_id_dict[tokens] = i + 1
self.ngram_to_freq_dict[tokens] = int(freq)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
ngram_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
else:
ngram_file = pretrained_model_name_or_path
if os.path.isdir(ngram_file):
ngram_file = os.path.join(ngram_file, NGRAM_DICT_NAME)
# redirect to the cache, if necessary
try:
resolved_ngram_file = cached_path(ngram_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
logger.error(
"Couldn't reach server at '{}' to download vocabulary.".format(
ngram_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
ngram_file))
return None
if resolved_ngram_file == ngram_file:
logger.info("loading vocabulary file {}".format(ngram_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
ngram_file, resolved_ngram_file))
# Instantiate ngram.
ngram_dict = cls(resolved_ngram_file, **kwargs)
return ngram_dict
def save(self, ngram_freq_path):
ngram_freq_path = os.path.join(ngram_freq_path, NGRAM_DICT_NAME)
with open(ngram_freq_path, "w+", encoding="utf-8") as fout:
for ngram, freq in self.ngram_to_freq_dict.items():
fout.write("{},{}\n".format(" ".join(ngram), freq))
def extract_ngram_feature(tokens, ngram_dict, max_seq_len, seg_id_limit):
# ----------- code for ngram BEGIN-----------
ngram_matches = []
# Filter the word segment from 2 to max_ngram_len to check whether there is a word
max_gram_n = ngram_dict.max_ngram_len
for p in range(2, max_gram_n):
for q in range(0, len(tokens) - p + 1):
character_segment = tokens[q:q + p]
# j is the starting position of the word
# i is the length of the current word
character_segment = tuple(character_segment)
if character_segment in ngram_dict.ngram_to_id_dict:
ngram_index = ngram_dict.ngram_to_id_dict[character_segment]
ngram_freq = ngram_dict.ngram_to_freq_dict[character_segment]
ngram_matches.append([ngram_index, q, p, character_segment, ngram_freq])
# shuffle(ngram_matches)
ngram_matches = sorted(ngram_matches, key=lambda s: s[0])
# max_word_in_seq_proportion = max_word_in_seq
max_word_in_seq_proportion = math.ceil((len(tokens) / max_seq_len) * ngram_dict.max_ngram_in_seq)
if len(ngram_matches) > max_word_in_seq_proportion:
ngram_matches = ngram_matches[:max_word_in_seq_proportion]
ngram_ids = [ngram[0] for ngram in ngram_matches]
ngram_positions = [ngram[1] for ngram in ngram_matches]
ngram_lengths = [ngram[2] for ngram in ngram_matches]
ngram_tuples = [ngram[3] for ngram in ngram_matches]
ngram_freqs = [ngram[4] for ngram in ngram_matches]
ngram_seg_ids = [0 if position < seg_id_limit else 1 for position in
ngram_positions]
ngram_mask_array = np.zeros(ngram_dict.max_ngram_in_seq, dtype=np.bool)
ngram_mask_array[:len(ngram_ids)] = 1
# Zero-pad up to the max word in seq length.
padding = [0] * (ngram_dict.max_ngram_in_seq - len(ngram_ids))
ngram_ids += padding
ngram_positions += padding
ngram_lengths += padding
ngram_seg_ids += padding
ngram_freqs += padding
# ----------- code for ngram END-----------
return {
"ngram_ids": ngram_ids,
"ngram_positions": ngram_positions,
"ngram_lengths": ngram_lengths,
"ngram_tuples": ngram_tuples,
"ngram_seg_ids": ngram_seg_ids,
"ngram_masks": ngram_mask_array,
"ngram_freqs": ngram_freqs,
}
def construct_ngram_matrix(ngram_data, max_seq_length):
max_ngram_in_sequence = len(ngram_data["ngram_ids"])
ngram_ids_num = len([x for x in ngram_data["ngram_masks"] if x == 1])
ngram_positions_matrix = np.zeros(shape=(max_seq_length, max_ngram_in_sequence), dtype=np.float)
for i in range(ngram_ids_num):
ngram_positions_matrix[ngram_data["ngram_positions"][i]:
ngram_data["ngram_positions"][i] + ngram_data["ngram_lengths"][i], i] = \
ngram_data["ngram_freqs"][i]
ngram_positions_matrix_t = torch.from_numpy(ngram_positions_matrix.astype(np.float))
ngram_positions_matrix_t = torch.div(ngram_positions_matrix_t,
torch.stack([torch.sum(ngram_positions_matrix_t, 1)] * ngram_positions_matrix_t.size(1)).t() + 1e-10)
return ngram_positions_matrix_t.numpy()
| 8,874 | 44.984456 | 142 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/zen2/configuration_zen2.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TransfoXLDenoise model configuration """
from transformers.configuration_utils import PretrainedConfig
class ZenConfig(PretrainedConfig):
"""Configuration class to store the configuration of a `ZenModel`.
"""
def __init__(self,
# vocab_size_or_config_json_file,
# word_vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
num_hidden_word_layers=6,
**kwargs):
"""Constructs ZenConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
# self.vocab_size = vocab_size_or_config_json_file
# self.word_size = word_vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.num_hidden_word_layers = num_hidden_word_layers
super().__init__(**kwargs)
| 3,783 | 45.716049 | 91 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/zen2/__init__.py
|
from .configuration_zen2 import ZenConfig
from .modeling import ZenForPreTraining, ZenForTokenClassification, ZenForSequenceClassification, ZenForQuestionAnswering, ZenModel, ZenForMaskedLM
from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer, _is_whitespace, whitespace_tokenize, convert_to_unicode, _is_punctuation, _is_control, VOCAB_NAME
from .ngram_utils import ZenNgramDict, NGRAM_DICT_NAME, extract_ngram_feature, construct_ngram_matrix
__all__ = [
'ZenConfig', 'ZenForPreTraining', 'ZenForTokenClassification', 'ZenForSequenceClassification',
'ZenForQuestionAnswering', 'ZenModel', 'ZenForMaskedLM', 'BertTokenizer', 'BasicTokenizer',
'WordpieceTokenizer', '_is_whitespace', 'whitespace_tokenize', 'convert_to_unicode',
'_is_punctuation', '_is_control', 'VOCAB_NAME', 'ZenNgramDict', 'NGRAM_DICT_NAME',
'extract_ngram_feature', 'construct_ngram_matrix',
]
version = "0.1.0"
| 925 | 70.230769 | 174 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/llama/configuration_llama.py
|
# coding=utf-8
# Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" llama model configuration"""
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class LlamaConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an
LLama model according to the specified arguments, defining the model architecture. Instantiating a configuration
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50432):
Vocabulary size of the LLama model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LlamaModel`].
hidden_size (`int`, *optional*, defaults to 6144):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 44):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 64):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 24576):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
rotary_pct (`float`, *optional*, defaults to 0.25):
percentage of hidden dimensions to allocate to rotary embeddings
rotary_emb_base (`int`, *optional*, defaults to 10000)
base for computing rotary embeddings frequency
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 1e-5):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
use_parallel_residual (`bool`, *optional*, defaults to `True`):
Whether to use a "parallel" formulation in each Transformer layer, which can provide a slight training
speedup at large scales (e.g. 20B).
"""
model_type = "llama"
def __init__(
self,
vocab_size=32000,
hidden_size=4096,
num_hidden_layers=32,
num_attention_heads=32,
intermediate_size=11008,
hidden_act="silu",
rotary_pct=1,
rotary_emb_base=10000,
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_epsilon=1.0e-6,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
use_parallel_residual=True,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.rotary_pct = rotary_pct
self.rotary_emb_base = rotary_emb_base
self.initializer_range = initializer_range
self.rms_norm_epsilon = rms_norm_epsilon
self.use_cache = use_cache
self.tie_word_embeddings = tie_word_embeddings
self.use_parallel_residual = use_parallel_residual
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
| 5,106 | 45.427273 | 118 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/llama/modeling_llama.py
|
# coding=utf-8
# Copyright 2022 EleutherAI The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch GPTNeoX model."""
from typing import Optional, Tuple, Union
import torch
from torch import nn
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from .configuration_llama import LlamaConfig
from ..megatron.layers.word_embeddings import Embedding
from ..megatron.layers.init_functions import get_init_methods
from ..megatron.layers.transformer import (
ParallelTransformerLayer,
ParallelLinear
)
from ..megatron.layers.norms import get_norm
from torch.nn import CrossEntropyLoss
def expand_attention_types(attention_config, num_layers):
"""
Expands an `attention_config` list in the following format:
[
[['attention_type_1', ..., `attention_type_n`], 12]
]
to a flattened list of length `num_layers`.
:param params_list:
:return:
"""
# if only strings are found in the config, we assume it's already expanded
if all([isinstance(i, str) for i in attention_config]):
return attention_config
newlist = []
for item in attention_config:
# instead of specifying a number - we can specify 'all' to extend this pattern across all layers
if item[1] == "all":
assert num_layers % len(item[0]) == 0, (
f"Number of layers ({num_layers}) is not divisible by the length "
f"of pattern: {item[0]}"
)
return item[0] * (num_layers // len(item[0]))
for _ in range(item[1]):
newlist.extend(item[0])
return newlist
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LlamaConfig"
def gpt2_attention_mask_func(attention_scores, ltor_mask):
attention_scores.masked_fill_(ltor_mask, torch.finfo(attention_scores.dtype).min)
return attention_scores
class LlamaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LlamaConfig
base_model_prefix = "llama"
supports_gradient_checkpointing = True
_no_split_modules = ["LLamaLayer"]
def _init_weights(self, module):
"""Initialize the weights"""
pass
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, LlamaPreTrainedModel):
module.gradient_checkpointing = value
class LlamaModel(LlamaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.attention_config = expand_attention_types(
config.attention_config, config.num_hidden_layers)
self.config = config
self.init_method, self.output_layer_init_method = get_init_methods(config)
self.embed_in = Embedding(config,
config.hidden_size,
config.vocab_size,
config.max_position_embeddings,
config.hidden_dropout,
self.init_method,
num_tokentypes=0)
self.layers = nn.ModuleList([
ParallelTransformerLayer(
config,
attention_mask_func=gpt2_attention_mask_func,
init_method=self.init_method,
output_layer_init_method=self.output_layer_init_method,
layer_number=i,
rpe=None,
rotary=True) for i in range(config.num_hidden_layers)])
norm, eps = get_norm(config)
self.final_layer_norm = norm(config.hidden_size, eps=eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_in
def set_input_embeddings(self, value):
self.embed_in = value
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
r"""
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
presents = () if use_cache else None
batch_size, seq_length = input_ids.size()
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * self.config.num_hidden_layers)
else:
past_length = past_key_values[0][0].size(0)
if position_ids is None:
position_ids = torch.arange(
past_length, seq_length + past_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
else:
position_ids = position_ids.view(-1, seq_length).long()
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
tril_mask = torch.tril(torch.ones((1, seq_length, seq_length),
device=attention_mask.device)).view(1, 1, seq_length, seq_length)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and the dtype's smallest value for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask * tril_mask
# megatron use 0 for positions
attention_mask = attention_mask < 0.5
hidden_states = self.embed_in(input_ids, position_ids=position_ids)
hidden_states = hidden_states.transpose(0, 1).contiguous()
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, (layer, layer_past) in enumerate(zip(self.layers, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = layer(
hidden_states,
attention_mask=attention_mask,
layer_past=layer_past,
use_cache=use_cache,
position_ids=position_ids,
)
if use_cache is True:
hidden_states = outputs[0]
presents = presents + (outputs[1],)
else:
hidden_states = outputs
if output_attentions:
all_attentions = all_attentions + (outputs[1],)
hidden_states = hidden_states.transpose(0, 1).contiguous()
hidden_states = self.final_layer_norm(hidden_states)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
past_key_values=presents,
attentions=all_attentions,
)
class LlamaForCausalLM(LlamaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.llama = LlamaModel(config)
self.init_method, self.output_layer_init_method = get_init_methods(config)
# parallel_output 是用来判断当前output是否需要同步,一般来说在训练的时候是True
# 因为训练的时候每张卡自己算loss就可以了
# 在inference的时候设为false,因为大家要用同一份logits,目前默认设置为False
self.embed_out = ParallelLinear(
config=self.config,
init_method=self.init_method,
parallel_output=False,)
# Initialize weights and apply final processing
self.post_init()
def train(self, mode):
# 会有cuda out of bound的bug,暂时没修复
# self.embed_out.final_linear.set_parallel_output(mode)
super().train(mode)
def eval(self):
self.embed_out.final_linear.set_parallel_output(False)
super().eval()
def get_output_embeddings(self):
return self.embed_out
def set_output_embeddings(self, new_embeddings):
self.embed_out = new_embeddings
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are
only required when the model is used as a decoder in a Sequence to Sequence model.
Contains pre-computed hidden-states (key and values in the self-attention blocks that can be used (see
`past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if position_ids is None:
# Position ids.
batch_size, seq_length = input_ids.size()
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
outputs = self.llama(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
lm_logits = self.embed_out(hidden_states)[0]
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shift_logits = lm_logits[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithPast(
loss=lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **kwargs):
if past_key_values and past_key_values[0] is not None:
input_ids = input_ids[:, -1:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"position_ids": position_ids,
"past_key_values": past_key_values,
}
def _reorder_cache(self, past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (
tuple(past_state.index_select(0, beam_idx)
for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None):
old_vocab_size = self.config.vocab_size
self.config.vocab_size = new_num_tokens
new_embed_in = Embedding(self.config,
self.config.hidden_size,
self.config.vocab_size,
self.config.max_position_embeddings,
self.config.hidden_dropout,
self.init_method,
num_tokentypes=0)
new_embed_in.word_embeddings.weight.data[:old_vocab_size, :] = self.llama.embed_in.word_embeddings.weight.data[:old_vocab_size, :]
self.llama.embed_in = new_embed_in
new_embed_out = ParallelLinear(
config=self.config,
init_method=self.init_method,
parallel_output=False)
new_embed_out.final_linear.weight.data[:old_vocab_size, :] = self.embed_out.final_linear.weight.data[:old_vocab_size, :]
self.embed_out = new_embed_out
return
| 17,756 | 42.736453 | 198 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/deepVAE/latent_connector.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Della model. """
import torch
import logging
import torch.nn as nn
from dataclasses import dataclass
from torch.nn import CrossEntropyLoss
from typing import Optional, Tuple, Dict, Any
# from transformers.utils.generic import ModelOutput
from transformers.file_utils import ModelOutput
from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
from transformers.models.gpt2.modeling_gpt2 import GPT2PreTrainedModel, GPT2Block, GPT2Model
@dataclass
class DeepVAEDecoderOutput(ModelOutput):
logits: torch.FloatTensor = None
loss: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
logger = logging.getLogger(__name__)
class GPT2LatentDecoderModel(GPT2Model):
_keys_to_ignore_on_load_missing = ["attn.masked_bias"]
def __init__(self, config, latent_dim=32):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
# Model parallel
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
# DeepVAE addition
self.linear_emb_layers = nn.ModuleList([nn.Linear(latent_dim, config.hidden_size, bias=False) for i in range(config.num_hidden_layers)])
# self.linear_emb = nn.Linear(latent_dim, config.hidden_size, bias=False) # share the same latent vector as the embeddings
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids=None,
layer_latent_vecs=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# GPT2Attention mask.
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
# NOTE: deepVAE modification. update hidden_states before passing into gpt2block!
# hidden_states are with shape (batch_size, sequence_length, hidden_size)
# layer_latent_vecs are with shape (batch_size, hidden_size)
latent_repr = self.linear_emb_layers[i](layer_latent_vecs[i])
# latent_repr = self.linear_emb_layers[-1](layer_latent_vecs[-1])
# latent_repr = self.linear_emb(layer_latent_vecs[i])
hidden_states += latent_repr.unsqueeze(dim=1)
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure layer_past is on same device as hidden_states (might not be correct)
if layer_past is not None:
layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
None,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class GPT2ForDecoderLatentConnector(GPT2PreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import torch
from pytorch_transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
def __init__(self, config, latent_dim=32):
super(GPT2ForDecoderLatentConnector, self).__init__(config)
self.transformer = GPT2LatentDecoderModel(config, latent_dim=latent_dim)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.transformer.wte)
def forward(self, input_ids, layer_latent_vecs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
labels=None, label_ignore=None, loss_mask=None, return_dict=False,
output_attentions=None, output_hidden_states=None, use_cache=None):
transformer_outputs = self.transformer(input_ids,
layer_latent_vecs,
past_key_values=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=label_ignore, reduction='none')
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
if loss_mask is not None:
loss = loss.view(-1, shift_labels.shape[-1]) * loss_mask[:, :-1]
loss = torch.sum(loss, -1)
else:
loss = torch.sum(loss.view(-1, shift_labels.shape[-1]), -1)
else:
loss = None
outputs = DeepVAEDecoderOutput(loss=loss, logits=lm_logits, hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions)
return outputs
def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, Any]:
"""
Implement in subclasses of [`PreTrainedModel`] for custom behavior to prepare inputs in the generate method.
"""
return {"input_ids": input_ids, "layer_latent_vecs": kwargs['layer_latent_vecs']}
class GPT2ForEncoderLatentConnector(GPT2PreTrainedModel):
def __init__(self, config):
super(GPT2ForEncoderLatentConnector, self).__init__(config)
self.transformer = GPT2Model(config)
self.init_weights()
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=True,
return_dict=None,
):
# output hidden states must set to true to allow for layer-wise latent vars
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return transformer_outputs
| 18,993 | 45.214112 | 144 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/deepVAE/utils.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Della model. """
import torch
import torch.nn.functional as F
from torch.distributions import Bernoulli
def enforce_repetition_penalty(lprobs, prev_output_tokens, repetition_penalty=1.5):
"""repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858). """
for i in range(len(prev_output_tokens)):
for previous_token in set(prev_output_tokens[i]):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if lprobs[i, previous_token] < 0:
lprobs[i, previous_token] *= repetition_penalty
else:
lprobs[i, previous_token] /= repetition_penalty
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
# assert logits.dim() == 1# batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, dim=-1, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for i in range(sorted_indices.size()[0]):
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
logits[i][indices_to_remove] = filter_value
# indices_to_remove = sorted_indices[sorted_indices_to_remove]
# logits[indices_to_remove] = filter_value
return logits
def word_drop(x, p, unk_token):
x_ = x.detach().clone()
mask = Bernoulli(1. - p).sample(x.shape)
x_[mask == 0] = unk_token
return x_
def log_sum_exp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0), dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
return m + torch.log(sum_exp)
def connect(mean, logvar, nsamples=1, sample=True, clip=False, min_clip_val=-1., beta_logvar=1.):
"""
Returns: Tensor1, Tensor2
Tensor1: the tensor latent z with shape [batch, nsamples, nz]
"""
# (batch, nsamples, nz)
if sample:
if clip:
# NOTE: clip the logvar here to see if we can force z to be more distant
logvar = torch.clip(logvar, min=min_clip_val)
z = reparameterize(mean, logvar, nsamples, beta_logvar)
else:
batch_size, nz = mean.size()
z = mean.unsqueeze(1).expand(batch_size, nsamples, nz)
if nsamples == 1:
z = z.squeeze(dim=1)
return z
def reparameterize(mu, logvar, nsamples=1, beta_logvar=1.):
"""sample from posterior Gaussian family
Args:
mu: Tensor
Mean of gaussian distribution with shape (batch, nz)
logvar: Tensor
logvar of gaussian distibution with shape (batch, nz)
Returns: Tensor
Sampled z with shape (batch, nsamples, nz)
"""
batch_size, nz = mu.size()
std = logvar.mul(0.5).exp().mul(beta_logvar)
mu_expd = mu.unsqueeze(1).expand(batch_size, nsamples, nz)
std_expd = std.unsqueeze(1).expand(batch_size, nsamples, nz)
eps = torch.zeros_like(std_expd).normal_()
return mu_expd + torch.mul(eps, std_expd)
def compute_kl_loss(mean1, logvar1, mean2, logvar2):
'''adapted from adaVAE implementation https://github.com/ImKeTT/adavae/blob/main/src/adapters/vae.py#L1627'''
exponential = logvar1 - logvar2 - torch.pow(mean1 - mean2, 2) / logvar2.exp() - torch.exp(logvar1 - logvar2) + 1
result = -0.5 * torch.sum(exponential, tuple(range(1, len(exponential.shape))))
return result
| 5,630 | 40.711111 | 116 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/deepVAE/deep_vae.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Della model. """
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass
from typing import Optional, Tuple
from transformers.modeling_outputs import ModelOutput
from transformers.modeling_utils import PreTrainedModel
from fengshen.models.deepVAE.configuration_della import DellaModelConfig
from fengshen.models.deepVAE.latent_connector import GPT2ForDecoderLatentConnector, GPT2ForEncoderLatentConnector
from fengshen.models.deepVAE.utils import connect, compute_kl_loss, top_k_top_p_filtering, enforce_repetition_penalty
_CHECKPOINT_FOR_DOC = "della-226M-base"
_CONFIG_FOR_DOC = "DellaModelConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
Della_model_PRETRAINED_MODEL_ARCHIVE_LIST = [
"della-226M-base"
]
@dataclass
class DellaModelOutput(ModelOutput):
logits: torch.FloatTensor = None
posterior_latents: Optional[Tuple[torch.FloatTensor]] = None
prior_latent: Optional[Tuple[torch.FloatTensor]] = None
class latent_layer(nn.Module):
def __init__(self, input_dim) -> None:
super().__init__()
self.W_hh = nn.Linear(input_dim, input_dim, bias=False)
self.W_ih = nn.Linear(input_dim, input_dim, bias=False)
self.tanh = nn.Tanh()
def forward(self, z_lt_lm1, z_lm1):
# inputs are z_<l-1 and z_l-1
return self.tanh(self.W_hh(z_lt_lm1) + self.W_ih(z_lm1))
class AverageSelfAttention(nn.Module):
def __init__(self, hidden_dim):
super(AverageSelfAttention, self).__init__()
w = torch.empty(hidden_dim)
nn.init.normal_(w, std=0.02)
self.attention_weights = nn.Parameter(w)
self.softmax = nn.Softmax(dim=-1)
self.non_linearity = torch.tanh
def forward(self, inputs, attention_mask=None):
scores = self.non_linearity(inputs.matmul(self.attention_weights))
if attention_mask is not None:
scores = scores + attention_mask
scores = self.softmax(scores)
weighted = torch.mul(inputs, scores.unsqueeze(-1).expand_as(inputs))
representations = weighted.sum(1).squeeze(1)
return representations, scores
class DeepVAE(nn.Module):
"""DeepVAE with recursive latent z extracted from every layer of encoder and applied on every layer of decoder """
def __init__(self, encoder, decoder, latent_dim, hidden_dim, layer_num, pad_token_id, bos_token_id, eos_token_id, CVAE):
super(DeepVAE, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.latent_dim = latent_dim
self.layer_num = layer_num
self.CVAE = CVAE
# the first layer of latent net depends on zero vectors and therefore can be ignored
self.latent_nets = nn.ModuleList([latent_layer(latent_dim) for _ in range(layer_num-1)])
post_input_dim = hidden_dim+latent_dim if not CVAE else 2*hidden_dim+latent_dim
prior_input_dim = latent_dim if not CVAE else hidden_dim+latent_dim
self.posterior_nets = nn.ModuleList([nn.Linear(post_input_dim, 2*latent_dim, bias=False) for _ in range(layer_num)])
self.prior_nets = nn.ModuleList([nn.Linear(prior_input_dim, 2*latent_dim, bias=False) for _ in range(layer_num)])
# pooling because we are not using hidden states of BOS token
self.pooling = nn.ModuleList([AverageSelfAttention(hidden_dim) for _ in range(layer_num)])
def get_decoder_loss(self, inputs, layer_latent_vecs, cond_inputs):
loss_mask = None
dec_inputs = inputs
if self.CVAE:
loss_mask = torch.concat((torch.zeros_like(cond_inputs), torch.ones_like(inputs)), dim=1)
dec_inputs = torch.concat((cond_inputs, inputs), dim=1)
rec_loss = self.decoder(input_ids=dec_inputs, layer_latent_vecs=layer_latent_vecs,
labels=dec_inputs, label_ignore=self.pad_token_id, loss_mask=loss_mask).loss
rec_loss = rec_loss / torch.sum(inputs != self.pad_token_id, dim=1) # ignore both the pad token id and the cond inputs
return rec_loss.mean()
def get_latent_vecs(self, layer_hidden_states, sample=True, beta_logvar=1., cond_inputs=None):
prior_z_list, posterior_z_list = [], []
prior_output_list, posterior_output_list = [], []
batch_size = layer_hidden_states[0].shape[0]
z = torch.zeros((batch_size, self.latent_dim), dtype=layer_hidden_states[0].dtype, device=layer_hidden_states[0].device)
for layer_idx in range(self.layer_num):
# TODO be more specific about the pooling range, ignore the pad_token_ids could improve the repr of sent or cond inputs
if self.CVAE:
cond_length = cond_inputs.shape[-1]
cond_repr, _ = self.pooling[layer_idx](layer_hidden_states[layer_idx][:, :cond_length, :])
sent_repr, _ = self.pooling[layer_idx](layer_hidden_states[layer_idx][:, cond_length:, :])
prior_input = torch.cat([cond_repr, z], dim=1)
posterior_input = torch.cat([cond_repr, sent_repr, z], dim=1)
else:
sent_repr, _ = self.pooling[layer_idx](layer_hidden_states[layer_idx])
prior_input = z
posterior_input = torch.cat([sent_repr, z], dim=1)
prior_net_output = self.prior_nets[layer_idx](prior_input)
posterior_net_output = self.posterior_nets[layer_idx](posterior_input).squeeze(dim=1)
prior_z = connect(mean=prior_net_output[:, :self.latent_dim], logvar=prior_net_output[:, self.latent_dim:], sample=sample)
posterior_z = connect(mean=posterior_net_output[:, :self.latent_dim], logvar=posterior_net_output[:, self.latent_dim:],
sample=sample, beta_logvar=beta_logvar)
if layer_idx != self.layer_num - 1:
z = self.latent_nets[layer_idx](z, posterior_z) # we skip than last iteration
# save the outputs for decoder and kl loss calculations
prior_z_list.append(prior_z)
posterior_z_list.append(posterior_z)
prior_output_list.append(prior_net_output)
posterior_output_list.append(posterior_net_output)
return prior_z_list, posterior_z_list, prior_output_list, posterior_output_list
def get_kl_loss(self, prior_output_list, posterior_output_list, beta_kl_constraints):
total_kl_loss = None
layer_kl_loss = []
for prior_output, posterior_output in zip(prior_output_list, posterior_output_list):
kl_loss = compute_kl_loss(posterior_output[:, :self.latent_dim], posterior_output[:, self.latent_dim:],
prior_output[:, :self.latent_dim], prior_output[:, self.latent_dim:])
# incase of overflow and nan value we shall clip the loss here
# kl_loss = torch.clip(kl_loss, max=1e4)
total_kl_loss = kl_loss if total_kl_loss is None else total_kl_loss+kl_loss
layer_kl_loss.append(kl_loss)
return total_kl_loss.mean() * beta_kl_constraints, layer_kl_loss
def forward(self, inputs, beta_kl_constraints, cond_inputs=None):
# handle cond_inputs differently
enc_inputs = torch.concat((cond_inputs, inputs), dim=1) if self.CVAE else inputs
encoder_outputs = self.encoder(input_ids=enc_inputs)
# hidden_states are tuples with length layer_num+1 and each tensor has shape (batch_size, sequence_length, hidden_size), embedding layer is ignored
prior_z_list, posterior_z_list, prior_output_list, posterior_output_list = self.get_latent_vecs(
encoder_outputs.hidden_states[1:], cond_inputs=cond_inputs)
total_kl_loss, layer_kl_loss = self.get_kl_loss(prior_output_list, posterior_output_list, beta_kl_constraints)
# pass the posterior to decoder for layer-wise low rank tensor product
rec_loss = self.get_decoder_loss(inputs, posterior_z_list, cond_inputs)
return total_kl_loss+rec_loss, rec_loss, total_kl_loss, layer_kl_loss
def get_cond_prior_vecs(self, layer_hidden_states, cond_inputs, sample=True, beta_logvar=1.):
prior_z_list, prior_output_list = [], []
batch_size = layer_hidden_states[0].shape[0]
z = torch.zeros((batch_size, self.latent_dim), dtype=layer_hidden_states[0].dtype, device=layer_hidden_states[0].device)
for layer_idx in range(self.layer_num):
# TODO be more specific about the pooling range, ignore the pad_token_ids could improve the repr of sent or cond inputs
cond_length = cond_inputs.shape[-1]
cond_repr, _ = self.pooling[layer_idx](layer_hidden_states[layer_idx][:, :cond_length, :])
prior_input = torch.cat([cond_repr, z], dim=1)
prior_net_output = self.prior_nets[layer_idx](prior_input)
prior_z = connect(mean=prior_net_output[:, :self.latent_dim], logvar=prior_net_output[:, self.latent_dim:],
sample=sample, beta_logvar=beta_logvar)
if layer_idx != self.layer_num - 1:
z = self.latent_nets[layer_idx](z, prior_z) # we skip than last iteration
# save the outputs for decoder and kl loss calculations
prior_z_list.append(prior_z)
prior_output_list.append(prior_net_output)
return prior_z_list, prior_output_list
def inference(self, inputs, top_p, max_length, top_k=0., temperature=1., repetition_penalty=1., sample=False, beta_logvar=1.):
# NOTE: if we want to use BOS hidden states for x repr then we need to change the causal mask in attention block.
encoder_outputs = self.encoder(input_ids=inputs)
# hidden_states are tuples with length layer_num+1 and each tensor has shape (batch_size, sequence_length, hidden_size), embedding layer is ignored
if self.CVAE:
prior_z_list, prior_output_list = self.get_cond_prior_vecs(encoder_outputs.hidden_states[1:], inputs, sample=sample, beta_logvar=beta_logvar)
latent_vecs = prior_z_list
generated = inputs
else:
prior_z_list, posterior_z_list, prior_output_list, posterior_output_list = self.get_latent_vecs(encoder_outputs.hidden_states[1:], sample=sample, beta_logvar=beta_logvar)
latent_vecs = posterior_z_list
generated = [[self.bos_token_id] for _ in range(inputs.shape[0])]
generated = torch.tensor(generated, dtype=torch.long, device=inputs.device)
# start generation
with torch.no_grad():
for _ in range(max_length):
outputs = self.decoder(input_ids=generated, layer_latent_vecs=latent_vecs, labels=None,
label_ignore=self.pad_token_id)
next_token_logits = outputs.logits[:, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_p=top_p, top_k=top_k)
log_probs = F.softmax(filtered_logits, dim=-1)
if repetition_penalty != 1.0:
enforce_repetition_penalty(log_probs, generated, repetition_penalty)
next_token = torch.multinomial(log_probs, num_samples=1)
generated = torch.cat((generated, next_token), dim=1)
if all(next_token[idx, 0].item() == self.eos_token_id for idx in range(next_token.shape[0])):
break # if all samples predict eos in the batch.
return generated
class DellaPretrainedModel(PreTrainedModel):
def _init_weights(self, module):
""" Initialize the weights """
pass # to bypass the not implement error
class Della(DellaPretrainedModel):
'''This class is only implemented to suit huggingface interface, use vae_pl_module to initialize the VAE for training'''
config_class = DellaModelConfig
base_model_prefix = "della"
supports_gradient_checkpointing = True
def __init__(self, config: DellaModelConfig):
super().__init__(config)
self.config = config
encoder_model = GPT2ForEncoderLatentConnector(config=self.config)
decoder_model = GPT2ForDecoderLatentConnector(config=self.config, latent_dim=self.config.latent_dim)
vae_model = DeepVAE(encoder_model, decoder_model, latent_dim=self.config.latent_dim,
hidden_dim=self.config.hidden_size, layer_num=self.config.num_hidden_layers,
pad_token_id=self.config.pad_token_id, bos_token_id=self.config.bos_token_id,
eos_token_id=self.config.eos_token_id, CVAE=self.config.CVAE)
self.model = vae_model
def forward(self, inputs, cond_inputs=None, sample_latent=True):
# handle cond_inputs differently
enc_inputs = torch.concat((cond_inputs, inputs), dim=1) if self.model.CVAE else inputs
encoder_outputs = self.model.encoder(input_ids=enc_inputs)
# hidden_states are tuples with length layer_num+1 and each tensor has shape (batch_size, sequence_length, hidden_size), embedding layer is ignored
prior_z_list, posterior_z_list, prior_output_list, posterior_output_list = self.model.get_latent_vecs(
encoder_outputs.hidden_states[1:], cond_inputs=cond_inputs, sample=sample_latent)
loss_mask, dec_inputs = None, inputs
if self.model.CVAE:
loss_mask = torch.concat((torch.zeros_like(cond_inputs), torch.ones_like(inputs)), dim=1)
dec_inputs = torch.concat((cond_inputs, inputs), dim=1)
logits = self.model.decoder(input_ids=dec_inputs, layer_latent_vecs=posterior_z_list,
labels=dec_inputs, label_ignore=self.model.pad_token_id, loss_mask=loss_mask).logits
return DellaModelOutput(
logits=logits,
posterior_latents=posterior_z_list,
prior_latent=prior_z_list
)
| 14,648 | 55.559846 | 182 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/deepVAE/configuration_della.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Della model configuration """
from transformers.configuration_utils import PretrainedConfig
Della_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"Della-226M-base": "https://huggingface.co/IDEA-CCNL/Randeng-DELLA-226M-Chinese/resolve/main/config.json"
}
class DellaModelConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`~DellaModel`].
It is used to instantiate an DellaModel model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the DellaModel [Randeng-DELLA-226M-Chinese](https://huggingface.co/IDEA-CCNL/Randeng-DELLA-226M-Chinese) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used
to control the model outputs. Read the documentation from [`PretrainedConfig`]
for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Della model. Defines the number of different
tokens that can be represented by the
`inputs_ids` passed when calling [`~DellaModel`] or
[`~TFDellaModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler.
If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with.
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`~DellaModel`] or
[`~TFDellaModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
"""
model_type = "DellaModel"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(
self,
vocab_size=50257,
n_positions=1024,
n_embd=768,
n_layer=12,
n_head=12,
n_inner=None,
activation_function="gelu_new",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
scale_attn_weights=True,
use_cache=True,
scale_attn_by_inverse_layer_idx=False,
reorder_and_upcast_attn=False,
bos_token_id=21128,
eos_token_id=21129,
pad_token_id=0,
CVAE=False,
latent_dim=256,
**kwargs,
):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.scale_attn_weights = scale_attn_weights
self.use_cache = use_cache
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
self.reorder_and_upcast_attn = reorder_and_upcast_attn
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.CVAE = CVAE
self.latent_dim = latent_dim
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs)
| 5,872 | 43.832061 | 122 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/deepVAE/__init__.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Della model. """
| 665 | 40.625 | 74 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/deberta_v2/modeling_deberta_v2.py
|
# coding=utf-8
# Copyright 2020 Microsoft and the Hugging Face Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DeBERTa-v2 model."""
import math
from collections.abc import Sequence
from typing import Optional, Tuple, Union
import numpy as np
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutput,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import softmax_backward_data
from transformers.utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from transformers import DebertaV2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DebertaV2Config"
_TOKENIZER_FOR_DOC = "DebertaV2Tokenizer"
_CHECKPOINT_FOR_DOC = "microsoft/deberta-v2-xlarge"
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/deberta-v2-xlarge",
"microsoft/deberta-v2-xxlarge",
"microsoft/deberta-v2-xlarge-mnli",
"microsoft/deberta-v2-xxlarge-mnli",
]
# Copied from transformers.models.deberta.modeling_deberta.ContextPooler
class ContextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
self.dropout = StableDropout(config.pooler_dropout)
self.config = config
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
context_token = hidden_states[:, 0]
context_token = self.dropout(context_token)
pooled_output = self.dense(context_token)
pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
return pooled_output
@property
def output_dim(self):
return self.config.hidden_size
# Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2
class XSoftmax(torch.autograd.Function):
"""
Masked Softmax which is optimized for saving memory
Args:
input (`torch.tensor`): The input tensor that will apply softmax.
mask (`torch.IntTensor`):
The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
dim (int): The dimension that will apply softmax
Example:
```python
>>> import torch
>>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax
>>> # Make a tensor
>>> x = torch.randn([4, 20, 100])
>>> # Create a mask
>>> mask = (x > 0).int()
>>> # Specify the dimension to apply softmax
>>> dim = -1
>>> y = XSoftmax.apply(x, mask, dim)
```"""
@staticmethod
def forward(self, input, mask, dim):
self.dim = dim
rmask = ~(mask.to(torch.bool))
output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))
output = torch.softmax(output, self.dim)
output.masked_fill_(rmask, 0)
self.save_for_backward(output)
return output
@staticmethod
def backward(self, grad_output):
(output,) = self.saved_tensors
inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output)
return inputGrad, None, None
@staticmethod
def symbolic(g, self, mask, dim):
import torch.onnx.symbolic_helper as sym_help
from torch.onnx.symbolic_opset9 import masked_fill, softmax
mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"])
r_mask = g.op(
"Cast",
g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value),
to_i=sym_help.cast_pytorch_to_onnx["Byte"],
)
output = masked_fill(g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.dtype).min)))
output = softmax(g, output, dim)
return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.uint8)))
# Copied from transformers.models.deberta.modeling_deberta.DropoutContext
class DropoutContext(object):
def __init__(self):
self.dropout = 0
self.mask = None
self.scale = 1
self.reuse_mask = True
# Copied from transformers.models.deberta.modeling_deberta.get_mask
def get_mask(input, local_context):
if not isinstance(local_context, DropoutContext):
dropout = local_context
mask = None
else:
dropout = local_context.dropout
dropout *= local_context.scale
mask = local_context.mask if local_context.reuse_mask else None
if dropout > 0 and mask is None:
mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool)
if isinstance(local_context, DropoutContext):
if local_context.mask is None:
local_context.mask = mask
return mask, dropout
# Copied from transformers.models.deberta.modeling_deberta.XDropout
class XDropout(torch.autograd.Function):
"""Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
@staticmethod
def forward(ctx, input, local_ctx):
mask, dropout = get_mask(input, local_ctx)
ctx.scale = 1.0 / (1 - dropout)
if dropout > 0:
ctx.save_for_backward(mask)
return input.masked_fill(mask, 0) * ctx.scale
else:
return input
@staticmethod
def backward(ctx, grad_output):
if ctx.scale > 1:
(mask,) = ctx.saved_tensors
return grad_output.masked_fill(mask, 0) * ctx.scale, None
else:
return grad_output, None
# Copied from transformers.models.deberta.modeling_deberta.StableDropout
class StableDropout(nn.Module):
"""
Optimized dropout module for stabilizing the training
Args:
drop_prob (float): the dropout probabilities
"""
def __init__(self, drop_prob):
super().__init__()
self.drop_prob = drop_prob
self.count = 0
self.context_stack = None
def forward(self, x):
"""
Call the module
Args:
x (`torch.tensor`): The input tensor to apply dropout
"""
if self.training and self.drop_prob > 0:
return XDropout.apply(x, self.get_context())
return x
def clear_context(self):
self.count = 0
self.context_stack = None
def init_context(self, reuse_mask=True, scale=1):
if self.context_stack is None:
self.context_stack = []
self.count = 0
for c in self.context_stack:
c.reuse_mask = reuse_mask
c.scale = scale
def get_context(self):
if self.context_stack is not None:
if self.count >= len(self.context_stack):
self.context_stack.append(DropoutContext())
ctx = self.context_stack[self.count]
ctx.dropout = self.drop_prob
self.count += 1
return ctx
else:
return self.drop_prob
# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaLayerNorm->LayerNorm
class DebertaV2SelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->DebertaV2
class DebertaV2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = DisentangledSelfAttention(config)
self.output = DebertaV2SelfOutput(config)
self.config = config
def forward(
self,
hidden_states,
attention_mask,
output_attentions=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
self_output = self.self(
hidden_states,
attention_mask,
output_attentions,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if output_attentions:
self_output, att_matrix = self_output
if query_states is None:
query_states = hidden_states
attention_output = self.output(self_output, query_states)
if output_attentions:
return (attention_output, att_matrix)
else:
return attention_output
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->DebertaV2
class DebertaV2Intermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm
class DebertaV2Output(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->DebertaV2
class DebertaV2Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = DebertaV2Attention(config)
self.intermediate = DebertaV2Intermediate(config)
self.output = DebertaV2Output(config)
def forward(
self,
hidden_states,
attention_mask,
query_states=None,
relative_pos=None,
rel_embeddings=None,
output_attentions=False,
):
attention_output = self.attention(
hidden_states,
attention_mask,
output_attentions=output_attentions,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if output_attentions:
attention_output, att_matrix = attention_output
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if output_attentions:
return (layer_output, att_matrix)
else:
return layer_output
class ConvLayer(nn.Module):
def __init__(self, config):
super().__init__()
kernel_size = getattr(config, "conv_kernel_size", 3)
groups = getattr(config, "conv_groups", 1)
self.conv_act = getattr(config, "conv_act", "tanh")
self.conv = nn.Conv1d(
config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups
)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
def forward(self, hidden_states, residual_states, input_mask):
out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
rmask = (1 - input_mask).bool()
out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0)
out = ACT2FN[self.conv_act](self.dropout(out))
layer_norm_input = residual_states + out
output = self.LayerNorm(layer_norm_input).to(layer_norm_input)
if input_mask is None:
output_states = output
else:
if input_mask.dim() != layer_norm_input.dim():
if input_mask.dim() == 4:
input_mask = input_mask.squeeze(1).squeeze(1)
input_mask = input_mask.unsqueeze(2)
input_mask = input_mask.to(output.dtype)
output_states = output * input_mask
return output_states
class DebertaV2Encoder(nn.Module):
"""Modified BertEncoder with relative position bias support"""
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)])
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.position_buckets = getattr(config, "position_buckets", -1)
pos_ebd_size = self.max_relative_positions * 2
if self.position_buckets > 0:
pos_ebd_size = self.position_buckets * 2
self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)
self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
if "layer_norm" in self.norm_rel_ebd:
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None
self.gradient_checkpointing = False
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
rel_embeddings = self.LayerNorm(rel_embeddings)
return rel_embeddings
def get_attention_mask(self, attention_mask):
if attention_mask.dim() <= 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
attention_mask = attention_mask.byte()
elif attention_mask.dim() == 3:
attention_mask = attention_mask.unsqueeze(1)
return attention_mask
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
relative_pos = build_relative_position(
q, hidden_states.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions
)
return relative_pos
def forward(
self,
hidden_states,
attention_mask,
output_hidden_states=True,
output_attentions=False,
query_states=None,
relative_pos=None,
return_dict=True,
):
if attention_mask.dim() <= 2:
input_mask = attention_mask
else:
input_mask = (attention_mask.sum(-2) > 0).byte()
attention_mask = self.get_attention_mask(attention_mask)
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[0]
else:
next_kv = hidden_states
rel_embeddings = self.get_rel_embedding()
output_states = next_kv
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
output_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
next_kv,
attention_mask,
query_states,
relative_pos,
rel_embeddings,
)
else:
output_states = layer_module(
next_kv,
attention_mask,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
output_attentions=output_attentions,
)
if output_attentions:
output_states, att_m = output_states
if i == 0 and self.conv is not None:
output_states = self.conv(hidden_states, output_states, input_mask)
if query_states is not None:
query_states = output_states
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
else:
next_kv = output_states
if output_attentions:
all_attentions = all_attentions + (att_m,)
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
if not return_dict:
return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
)
def make_log_bucket_position(relative_pos, bucket_size, max_position):
sign = np.sign(relative_pos)
mid = bucket_size // 2
abs_pos = np.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, np.abs(relative_pos))
log_pos = np.ceil(np.log(abs_pos / mid) / np.log((max_position - 1) / mid) * (mid - 1)) + mid
bucket_pos = np.where(abs_pos <= mid, relative_pos, log_pos * sign).astype(np.int)
return bucket_pos
def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):
"""
Build relative position according to the query and key
We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
\\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
P_k\\)
Args:
query_size (int): the length of query
key_size (int): the length of key
bucket_size (int): the size of position bucket
max_position (int): the maximum allowed absolute position
Return:
`torch.LongTensor`: A tensor with shape [1, query_size, key_size]
"""
q_ids = np.arange(0, query_size)
k_ids = np.arange(0, key_size)
rel_pos_ids = q_ids[:, None] - np.tile(k_ids, (q_ids.shape[0], 1))
if bucket_size > 0 and max_position > 0:
rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
rel_pos_ids = torch.tensor(rel_pos_ids, dtype=torch.long)
rel_pos_ids = rel_pos_ids[:query_size, :]
rel_pos_ids = rel_pos_ids.unsqueeze(0)
return rel_pos_ids
@torch.jit.script
# Copied from transformers.models.deberta.modeling_deberta.c2p_dynamic_expand
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
@torch.jit.script
# Copied from transformers.models.deberta.modeling_deberta.p2c_dynamic_expand
def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
@torch.jit.script
# Copied from transformers.models.deberta.modeling_deberta.pos_dynamic_expand
def pos_dynamic_expand(pos_index, p2c_att, key_layer):
return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
class DisentangledSelfAttention(nn.Module):
"""
Disentangled self-attention module
Parameters:
config (`DebertaV2Config`):
A model config class instance with the configuration to build a new model. The schema is similar to
*BertConfig*, for more details, please refer [`DebertaV2Config`]
"""
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
_attention_head_size = config.hidden_size // config.num_attention_heads
self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
self.share_att_key = getattr(config, "share_att_key", False)
self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.position_buckets = getattr(config, "position_buckets", -1)
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.pos_ebd_size = self.max_relative_positions
if self.position_buckets > 0:
self.pos_ebd_size = self.position_buckets
self.pos_dropout = StableDropout(config.hidden_dropout_prob)
if not self.share_att_key:
if "c2p" in self.pos_att_type:
self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
if "p2c" in self.pos_att_type:
self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = StableDropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, attention_heads):
new_x_shape = x.size()[:-1] + (attention_heads, -1)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))
def forward(
self,
hidden_states,
attention_mask,
output_attentions=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
"""
Call the module
Args:
hidden_states (`torch.FloatTensor`):
Input states to the module usually the output from previous layer, it will be the Q,K and V in
*Attention(Q,K,V)*
attention_mask (`torch.ByteTensor`):
An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
th token.
output_attentions (`bool`, optional):
Whether return the attention matrix.
query_states (`torch.FloatTensor`, optional):
The *Q* state in *Attention(Q,K,V)*.
relative_pos (`torch.LongTensor`):
The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
values ranging in [*-max_relative_positions*, *max_relative_positions*].
rel_embeddings (`torch.FloatTensor`):
The embedding of relative distances. It's a tensor of shape [\\(2 \\times
\\text{max_relative_positions}\\), *hidden_size*].
"""
if query_states is None:
query_states = hidden_states
query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
rel_att = None
# Take the dot product between "query" and "key" to get the raw attention scores.
scale_factor = 1
if "c2p" in self.pos_att_type:
scale_factor += 1
if "p2c" in self.pos_att_type:
scale_factor += 1
scale = math.sqrt(query_layer.size(-1) * scale_factor)
attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / scale
if self.relative_attention:
rel_embeddings = self.pos_dropout(rel_embeddings)
rel_att = self.disentangled_attention_bias(
query_layer, key_layer, relative_pos, rel_embeddings, scale_factor
)
if rel_att is not None:
attention_scores = attention_scores + rel_att
attention_scores = attention_scores
attention_scores = attention_scores.view(
-1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1)
)
# bsz x height x length x dimension
attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.bmm(
attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer
)
context_layer = (
context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1))
.permute(0, 2, 1, 3)
.contiguous()
)
new_context_layer_shape = context_layer.size()[:-2] + (-1,)
context_layer = context_layer.view(new_context_layer_shape)
if output_attentions:
return (context_layer, attention_probs)
else:
return context_layer
def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
if relative_pos is None:
q = query_layer.size(-2)
relative_pos = build_relative_position(
q, key_layer.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions
)
if relative_pos.dim() == 2:
relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
elif relative_pos.dim() == 3:
relative_pos = relative_pos.unsqueeze(1)
# bsz x height x query x key
elif relative_pos.dim() != 4:
raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
att_span = self.pos_ebd_size
relative_pos = relative_pos.long().to(query_layer.device)
rel_embeddings = rel_embeddings[0 : att_span * 2, :].unsqueeze(0)
if self.share_att_key:
pos_query_layer = self.transpose_for_scores(
self.query_proj(rel_embeddings), self.num_attention_heads
).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)
pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
)
else:
if "c2p" in self.pos_att_type:
pos_key_layer = self.transpose_for_scores(
self.pos_key_proj(rel_embeddings), self.num_attention_heads
).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
) # .split(self.all_head_size, dim=-1)
if "p2c" in self.pos_att_type:
pos_query_layer = self.transpose_for_scores(
self.pos_query_proj(rel_embeddings), self.num_attention_heads
).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
) # .split(self.all_head_size, dim=-1)
score = 0
# content->position
if "c2p" in self.pos_att_type:
scale = math.sqrt(pos_key_layer.size(-1) * scale_factor)
c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2))
c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
c2p_att = torch.gather(
c2p_att,
dim=-1,
index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]),
)
score += c2p_att / scale
# position->content
if "p2c" in self.pos_att_type:
scale = math.sqrt(pos_query_layer.size(-1) * scale_factor)
if key_layer.size(-2) != query_layer.size(-2):
r_pos = build_relative_position(
key_layer.size(-2),
key_layer.size(-2),
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
).to(query_layer.device)
r_pos = r_pos.unsqueeze(0)
else:
r_pos = relative_pos
p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2))
p2c_att = torch.gather(
p2c_att,
dim=-1,
index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]),
).transpose(-1, -2)
score += p2c_att / scale
return score
# Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm
class DebertaV2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
pad_token_id = getattr(config, "pad_token_id", 0)
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
self.position_biased_input = getattr(config, "position_biased_input", True)
if not self.position_biased_input:
self.position_embeddings = None
else:
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
if self.embedding_size != config.hidden_size:
self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.position_embeddings is not None:
position_embeddings = self.position_embeddings(position_ids.long())
else:
position_embeddings = torch.zeros_like(inputs_embeds)
embeddings = inputs_embeds
if self.position_biased_input:
embeddings += position_embeddings
if self.config.type_vocab_size > 0:
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings += token_type_embeddings
if self.embedding_size != self.config.hidden_size:
embeddings = self.embed_proj(embeddings)
embeddings = self.LayerNorm(embeddings)
# if mask is not None:
# if mask.dim() != embeddings.dim():
# if mask.dim() == 4:
# mask = mask.squeeze(1).squeeze(1)
# mask = mask.unsqueeze(2)
# mask = mask.to(embeddings.dtype)
# embeddings = embeddings * mask
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.deberta.modeling_deberta.DebertaPreTrainedModel with Deberta->DebertaV2
class DebertaV2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DebertaV2Config
base_model_prefix = "deberta"
_keys_to_ignore_on_load_missing = ["position_ids"]
_keys_to_ignore_on_load_unexpected = ["position_embeddings"]
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, DebertaV2Encoder):
module.gradient_checkpointing = value
DEBERTA_START_DOCSTRING = r"""
The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.```
Parameters:
config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
DEBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`DebertaV2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaModel with Deberta->DebertaV2
class DebertaV2Model(DebertaV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = DebertaV2Embeddings(config)
self.encoder = DebertaV2Encoder(config)
self.z_steps = 0
self.config = config
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError("The prune function is not implemented in DeBERTa model.")
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
mask=attention_mask,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask,
output_hidden_states=True,
output_attentions=output_attentions,
return_dict=return_dict,
)
encoded_layers = encoder_outputs[1]
if self.z_steps > 1:
hidden_states = encoded_layers[-2]
layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
query_states = encoded_layers[-1]
rel_embeddings = self.encoder.get_rel_embedding()
attention_mask = self.encoder.get_attention_mask(attention_mask)
rel_pos = self.encoder.get_rel_pos(embedding_output)
for layer in layers[1:]:
query_states = layer(
hidden_states,
attention_mask,
output_attentions=False,
query_states=query_states,
relative_pos=rel_pos,
rel_embeddings=rel_embeddings,
)
encoded_layers.append(query_states)
sequence_output = encoded_layers[-1]
if not return_dict:
return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForMaskedLM with Deberta->DebertaV2
class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.deberta = DebertaV2Model(config)
self.cls = DebertaV2OnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# copied from transformers.models.bert.BertPredictionHeadTransform with bert -> deberta
class DebertaV2PredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# copied from transformers.models.bert.BertLMPredictionHead with bert -> deberta
class DebertaV2LMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = DebertaV2PredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta
class DebertaV2OnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = DebertaV2LMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
@add_start_docstrings(
"""
DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForSequenceClassification with Deberta->DebertaV2
class DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
num_labels = getattr(config, "num_labels", 2)
self.num_labels = num_labels
self.deberta = DebertaV2Model(config)
self.pooler = ContextPooler(config)
output_dim = self.pooler.output_dim
self.classifier = nn.Linear(output_dim, num_labels)
drop_out = getattr(config, "cls_dropout", None)
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
self.dropout = StableDropout(drop_out)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.deberta.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.deberta.set_input_embeddings(new_embeddings)
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
encoder_layer = outputs[0]
pooled_output = self.pooler(encoder_layer)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
# regression task
loss_fn = nn.MSELoss()
logits = logits.view(-1).to(labels.dtype)
loss = loss_fn(logits, labels.view(-1))
elif labels.dim() == 1 or labels.size(-1) == 1:
label_index = (labels >= 0).nonzero()
labels = labels.long()
if label_index.size(0) > 0:
labeled_logits = torch.gather(
logits, 0, label_index.expand(label_index.size(0), logits.size(1))
)
labels = torch.gather(labels, 0, label_index.view(-1))
loss_fct = CrossEntropyLoss()
loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
else:
loss = torch.tensor(0).to(logits)
else:
log_softmax = nn.LogSoftmax(-1)
loss = -((log_softmax(logits) * labels).sum(-1)).mean()
elif self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"""
DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForTokenClassification with Deberta->DebertaV2
class DebertaV2ForTokenClassification(DebertaV2PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaV2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"""
DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForQuestionAnswering with Deberta->DebertaV2
class DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaV2Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
DeBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
DEBERTA_START_DOCSTRING,
)
class DebertaV2ForMultipleChoice(DebertaV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
num_labels = getattr(config, "num_labels", 2)
self.num_labels = num_labels
self.deberta = DebertaV2Model(config)
self.pooler = ContextPooler(config)
output_dim = self.pooler.output_dim
self.classifier = nn.Linear(output_dim, 1)
drop_out = getattr(config, "cls_dropout", None)
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
self.dropout = StableDropout(drop_out)
self.init_weights()
def get_input_embeddings(self):
return self.deberta.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.deberta.set_input_embeddings(new_embeddings)
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.deberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
encoder_layer = outputs[0]
pooled_output = self.pooler(encoder_layer)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 66,327 | 39.99382 | 127 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/unimc/modeling_unimc.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import basicConfig
import torch
from torch import nn
import json
from tqdm import tqdm
import os
import numpy as np
from transformers import BertTokenizer
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import trainer, loggers
from torch.utils.data import Dataset, DataLoader
from transformers.optimization import get_linear_schedule_with_warmup
from transformers import BertForMaskedLM, AlbertTokenizer
from transformers import AutoConfig
from transformers.pipelines.base import Pipeline
from transformers import MegatronBertForMaskedLM
from fengshen.models.deberta_v2.modeling_deberta_v2 import DebertaV2ForMaskedLM
from fengshen.models.albert.modeling_albert import AlbertForMaskedLM
import argparse
import copy
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
import warnings
from transformers import TextClassificationPipeline as HuggingfacePipe
class UniMCDataset(Dataset):
def __init__(self, data, yes_token, no_token, tokenizer, args, used_mask=True):
super().__init__()
self.tokenizer = tokenizer
self.max_length = args.max_length
self.num_labels = args.num_labels
self.used_mask = used_mask
self.data = data
self.args = args
self.yes_token = yes_token
self.no_token = no_token
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.encode(self.data[index], self.used_mask)
def get_token_type(self, sep_idx, max_length):
token_type_ids = np.zeros(shape=(max_length,))
for i in range(len(sep_idx)-1):
if i % 2 == 0:
ty = np.ones(shape=(sep_idx[i+1]-sep_idx[i],))
else:
ty = np.zeros(shape=(sep_idx[i+1]-sep_idx[i],))
token_type_ids[sep_idx[i]:sep_idx[i+1]] = ty
return token_type_ids
def get_position_ids(self, label_idx, max_length, question_len):
question_position_ids = np.arange(question_len)
label_position_ids = np.arange(question_len, label_idx[-1])
for i in range(len(label_idx)-1):
label_position_ids[label_idx[i]-question_len:label_idx[i+1]-question_len] = np.arange(
question_len, question_len+label_idx[i+1]-label_idx[i])
max_len_label = max(label_position_ids)
text_position_ids = np.arange(
max_len_label+1, max_length+max_len_label+1-label_idx[-1])
position_ids = list(question_position_ids) + \
list(label_position_ids)+list(text_position_ids)
if max_length <= 512:
return position_ids[:max_length]
else:
for i in range(512, max_length):
if position_ids[i] > 511:
position_ids[i] = 511
return position_ids[:max_length]
def get_att_mask(self, attention_mask, label_idx, question_len):
max_length = len(attention_mask)
attention_mask = np.array(attention_mask)
attention_mask = np.tile(attention_mask[None, :], (max_length, 1))
zeros = np.zeros(
shape=(label_idx[-1]-question_len, label_idx[-1]-question_len))
attention_mask[question_len:label_idx[-1],
question_len:label_idx[-1]] = zeros
for i in range(len(label_idx)-1):
label_token_length = label_idx[i+1]-label_idx[i]
if label_token_length <= 0:
print('label_idx', label_idx)
print('question_len', question_len)
continue
ones = np.ones(shape=(label_token_length, label_token_length))
attention_mask[label_idx[i]:label_idx[i+1],
label_idx[i]:label_idx[i+1]] = ones
return attention_mask
def random_masking(self, token_ids, maks_rate, mask_start_idx, max_length, mask_id, tokenizer):
rands = np.random.random(len(token_ids))
source, target = [], []
for i, (r, t) in enumerate(zip(rands, token_ids)):
if i < mask_start_idx:
source.append(t)
target.append(-100)
continue
if r < maks_rate * 0.8:
source.append(mask_id)
target.append(t)
elif r < maks_rate * 0.9:
source.append(t)
target.append(t)
elif r < maks_rate:
source.append(np.random.choice(tokenizer.vocab_size - 1) + 1)
target.append(t)
else:
source.append(t)
target.append(-100)
while len(source) < max_length:
source.append(0)
target.append(-100)
return source[:max_length], target[:max_length]
def encode(self, item, used_mask=False):
while len(self.tokenizer.encode('[MASK]'.join(item['choice']))) > self.max_length-32:
item['choice'] = [c[:int(len(c)/2)] for c in item['choice']]
if 'textb' in item.keys() and item['textb'] != '':
if 'question' in item.keys() and item['question'] != '':
texta = '[MASK]' + '[MASK]'.join(item['choice']) + '[SEP]' + \
item['question'] + '[SEP]' + \
item['texta']+'[SEP]'+item['textb']
else:
texta = '[MASK]' + '[MASK]'.join(item['choice']) + '[SEP]' + \
item['texta']+'[SEP]'+item['textb']
else:
if 'question' in item.keys() and item['question'] != '':
texta = '[MASK]' + '[MASK]'.join(item['choice']) + '[SEP]' + \
item['question'] + '[SEP]' + item['texta']
else:
texta = '[MASK]' + '[MASK]'.join(item['choice']) + \
'[SEP]' + item['texta']
encode_dict = self.tokenizer.encode_plus(texta,
max_length=self.max_length,
padding='max_length',
truncation='longest_first')
encode_sent = encode_dict['input_ids']
token_type_ids = encode_dict['token_type_ids']
attention_mask = encode_dict['attention_mask']
sample_max_length = sum(encode_dict['attention_mask'])
if 'label' not in item.keys():
item['label'] = 0
item['answer'] = ''
question_len = 1
label_idx = [question_len]
for choice in item['choice']:
cur_mask_idx = label_idx[-1] + \
len(self.tokenizer.encode(choice, add_special_tokens=False))+1
label_idx.append(cur_mask_idx)
token_type_ids = [0]*question_len+[1] * \
(label_idx[-1]-label_idx[0]+1)+[0]*self.max_length
token_type_ids = token_type_ids[:self.max_length]
attention_mask = self.get_att_mask(
attention_mask, label_idx, question_len)
position_ids = self.get_position_ids(
label_idx, self.max_length, question_len)
clslabels_mask = np.zeros(shape=(len(encode_sent),))
clslabels_mask[label_idx[:-1]] = 10000
clslabels_mask = clslabels_mask-10000
mlmlabels_mask = np.zeros(shape=(len(encode_sent),))
mlmlabels_mask[label_idx[0]] = 1
# used_mask=False
if used_mask:
mask_rate = 0.1*np.random.choice(4, p=[0.3, 0.3, 0.25, 0.15])
source, target = self.random_masking(token_ids=encode_sent, maks_rate=mask_rate,
mask_start_idx=label_idx[-1], max_length=self.max_length,
mask_id=self.tokenizer.mask_token_id, tokenizer=self.tokenizer)
else:
source, target = encode_sent[:], encode_sent[:]
source = np.array(source)
target = np.array(target)
source[label_idx[:-1]] = self.tokenizer.mask_token_id
target[label_idx[:-1]] = self.no_token
target[label_idx[item['label']]] = self.yes_token
input_ids = source[:sample_max_length]
token_type_ids = token_type_ids[:sample_max_length]
attention_mask = attention_mask[:sample_max_length, :sample_max_length]
position_ids = position_ids[:sample_max_length]
mlmlabels = target[:sample_max_length]
clslabels = label_idx[item['label']]
clslabels_mask = clslabels_mask[:sample_max_length]
mlmlabels_mask = mlmlabels_mask[:sample_max_length]
return {
"input_ids": torch.tensor(input_ids).long(),
"token_type_ids": torch.tensor(token_type_ids).long(),
"attention_mask": torch.tensor(attention_mask).float(),
"position_ids": torch.tensor(position_ids).long(),
"mlmlabels": torch.tensor(mlmlabels).long(),
"clslabels": torch.tensor(clslabels).long(),
"clslabels_mask": torch.tensor(clslabels_mask).float(),
"mlmlabels_mask": torch.tensor(mlmlabels_mask).float(),
}
class UniMCDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--batchsize', default=16, type=int)
parser.add_argument('--max_length', default=512, type=int)
return parent_args
def __init__(self, train_data, val_data, yes_token, no_token, tokenizer, args):
super().__init__()
self.batchsize = args.batchsize
self.train_data = UniMCDataset(
train_data, yes_token, no_token, tokenizer, args, True)
self.valid_data = UniMCDataset(
val_data, yes_token, no_token, tokenizer, args, False)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, collate_fn=self.collate_fn, batch_size=self.batchsize, pin_memory=False)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, collate_fn=self.collate_fn, batch_size=self.batchsize, pin_memory=False)
def collate_fn(self, batch):
'''
Aggregate a batch data.
batch = [ins1_dict, ins2_dict, ..., insN_dict]
batch_data = {'sentence':[ins1_sentence, ins2_sentence...], 'input_ids':[ins1_input_ids, ins2_input_ids...], ...}
'''
batch_data = {}
for key in batch[0]:
batch_data[key] = [example[key] for example in batch]
batch_data['input_ids'] = nn.utils.rnn.pad_sequence(batch_data['input_ids'],
batch_first=True,
padding_value=0)
batch_data['clslabels_mask'] = nn.utils.rnn.pad_sequence(batch_data['clslabels_mask'],
batch_first=True,
padding_value=-10000)
batch_size, batch_max_length = batch_data['input_ids'].shape
for k, v in batch_data.items():
if k == 'input_ids' or k == 'clslabels_mask':
continue
if k == 'clslabels':
batch_data[k] = torch.tensor(v).long()
continue
if k != 'attention_mask':
batch_data[k] = nn.utils.rnn.pad_sequence(v,
batch_first=True,
padding_value=0)
else:
attention_mask = torch.zeros(
(batch_size, batch_max_length, batch_max_length))
for i, att in enumerate(v):
sample_length, _ = att.shape
attention_mask[i, :sample_length, :sample_length] = att
batch_data[k] = attention_mask
return batch_data
class UniMCModel(nn.Module):
def __init__(self, pre_train_dir, yes_token):
super().__init__()
self.config = AutoConfig.from_pretrained(pre_train_dir)
if self.config.model_type == 'megatron-bert':
self.bert = MegatronBertForMaskedLM.from_pretrained(pre_train_dir)
elif self.config.model_type == 'deberta-v2':
self.bert = DebertaV2ForMaskedLM.from_pretrained(pre_train_dir)
elif self.config.model_type == 'albert':
self.bert = AlbertForMaskedLM.from_pretrained(pre_train_dir)
else:
self.bert = BertForMaskedLM.from_pretrained(pre_train_dir)
self.loss_func = torch.nn.CrossEntropyLoss()
self.yes_token = yes_token
def forward(self, input_ids, attention_mask, token_type_ids, position_ids=None, mlmlabels=None, clslabels=None, clslabels_mask=None, mlmlabels_mask=None):
batch_size, seq_len = input_ids.shape
outputs = self.bert(input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
token_type_ids=token_type_ids,
labels=mlmlabels) # (bsz, seq, dim)
mask_loss = outputs.loss
mlm_logits = outputs.logits
cls_logits = mlm_logits[:, :,
self.yes_token].view(-1, seq_len)+clslabels_mask
if mlmlabels == None:
return 0, mlm_logits, cls_logits
else:
cls_loss = self.loss_func(cls_logits, clslabels)
all_loss = mask_loss+cls_loss
return all_loss, mlm_logits, cls_logits
class UniMCLitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--learning_rate', default=1e-5, type=float)
parser.add_argument('--weight_decay', default=0.1, type=float)
parser.add_argument('--warmup', default=0.01, type=float)
parser.add_argument('--num_labels', default=2, type=int)
return parent_args
def __init__(self, args, yes_token, model_path, num_data=100):
super().__init__()
self.args = args
self.num_data = num_data
self.model = UniMCModel(model_path, yes_token)
def setup(self, stage) -> None:
if stage == 'fit':
num_gpus = self.trainer.gpus if self.trainer.gpus is not None else 0
self.total_step = int(self.trainer.max_epochs * self.num_data /
(max(1, num_gpus) * self.trainer.accumulate_grad_batches))
print('Total training step:', self.total_step)
def training_step(self, batch, batch_idx):
loss, logits, cls_logits = self.model(**batch)
cls_acc = self.comput_metrix(
cls_logits, batch['clslabels'], batch['mlmlabels_mask'])
self.log('train_loss', loss)
self.log('train_acc', cls_acc)
return loss
def validation_step(self, batch, batch_idx):
loss, logits, cls_logits = self.model(**batch)
cls_acc = self.comput_metrix(
cls_logits, batch['clslabels'], batch['mlmlabels_mask'])
self.log('val_loss', loss)
self.log('val_acc', cls_acc)
def configure_optimizers(self):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
paras = list(
filter(lambda p: p[1].requires_grad, self.named_parameters()))
paras = [{
'params':
[p for n, p in paras if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay
}, {
'params': [p for n, p in paras if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
}]
optimizer = torch.optim.AdamW(paras, lr=self.args.learning_rate)
scheduler = get_linear_schedule_with_warmup(
optimizer, int(self.total_step * self.args.warmup),
self.total_step)
return [{
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'interval': 'step',
'frequency': 1
}
}]
def comput_metrix(self, logits, labels, mlmlabels_mask):
logits = torch.nn.functional.softmax(logits, dim=-1)
logits = torch.argmax(logits, dim=-1)
y_pred = logits.view(size=(-1,))
y_true = labels.view(size=(-1,))
corr = torch.eq(y_pred, y_true).float()
return torch.sum(corr.float())/labels.size(0)
class UniMCPredict:
def __init__(self, yes_token, no_token, model, tokenizer, args):
self.tokenizer = tokenizer
self.args = args
self.data_model = UniMCDataModel(
[], [], yes_token, no_token, tokenizer, args)
self.model = model
def predict(self, batch_data):
batch = [self.data_model.train_data.encode(
sample) for sample in batch_data]
batch = self.data_model.collate_fn(batch)
batch = {k: v.cuda() for k, v in batch.items()}
_, _, logits = self.model.model(**batch)
soft_logits = torch.nn.functional.softmax(logits, dim=-1)
logits = torch.argmax(soft_logits, dim=-1).detach().cpu().numpy()
soft_logits = soft_logits.detach().cpu().numpy()
clslabels_mask = batch['clslabels_mask'].detach(
).cpu().numpy().tolist()
clslabels = batch['clslabels'].detach().cpu().numpy().tolist()
for i, v in enumerate(batch_data):
label_idx = [idx for idx, v in enumerate(
clslabels_mask[i]) if v == 0.]
label = label_idx.index(logits[i])
answer = batch_data[i]['choice'][label]
score = {}
for c in range(len(batch_data[i]['choice'])):
score[batch_data[i]['choice'][c]] = float(
soft_logits[i][label_idx[c]])
batch_data[i]['label_ori'] = copy.deepcopy(batch_data[i]['label'])
batch_data[i]['label'] = label
batch_data[i]['answer'] = answer
batch_data[i]['score'] = score
return batch_data
class UniMCPipelines(Pipeline):
@staticmethod
def pipelines_args(parent_args):
total_parser = parent_args.add_argument_group("piplines args")
total_parser.add_argument(
'--pretrained_model_path', default='', type=str)
total_parser.add_argument('--load_checkpoints_path',
default='', type=str)
total_parser.add_argument('--train', action='store_true')
total_parser.add_argument('--language',
default='chinese', type=str)
total_parser = UniMCDataModel.add_data_specific_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = UniMCLitModel.add_model_specific_args(total_parser)
total_parser = pl.Trainer.add_argparse_args(parent_args)
return parent_args
def __init__(self, args, model_path):
self.args = args
self.checkpoint_callback = UniversalCheckpoint(args).callbacks
self.logger = loggers.TensorBoardLogger(save_dir=args.default_root_dir)
self.trainer = pl.Trainer.from_argparse_args(args,
logger=self.logger,
callbacks=[self.checkpoint_callback])
self.config = AutoConfig.from_pretrained(model_path)
if self.config.model_type == 'albert':
self.tokenizer = AlbertTokenizer.from_pretrained(
model_path)
else:
self.tokenizer = BertTokenizer.from_pretrained(
model_path)
if args.language == 'chinese':
self.yes_token = self.tokenizer.encode('是')[1]
self.no_token = self.tokenizer.encode('非')[1]
else:
self.yes_token = self.tokenizer.encode('yes')[1]
self.no_token = self.tokenizer.encode('no')[1]
if args.load_checkpoints_path != '':
self.model = UniMCLitModel.load_from_checkpoint(
args.load_checkpoints_path, args=args, yes_token=self.yes_token, model_path=model_path)
print('load model from: ', args.load_checkpoints_path)
else:
self.model = UniMCLitModel(
args, yes_token=self.yes_token, model_path=model_path)
def train(self, train_data, dev_data, process=True):
if process:
train_data = self.preprocess(train_data)
dev_data = self.preprocess(dev_data)
data_model = UniMCDataModel(
train_data, dev_data, self.yes_token, self.no_token, self.tokenizer, self.args)
self.model.num_data = len(train_data)
self.trainer.fit(self.model, data_model)
def predict(self, test_data, cuda=True, process=True):
if process:
test_data = self.preprocess(test_data)
result = []
start = 0
if cuda:
self.model = self.model.cuda()
self.model.model.eval()
predict_model = UniMCPredict(
self.yes_token, self.no_token, self.model, self.tokenizer, self.args)
while start < len(test_data):
batch_data = test_data[start:start+self.args.batchsize]
start += self.args.batchsize
batch_result = predict_model.predict(batch_data)
result.extend(batch_result)
if process:
result = self.postprocess(result)
return result
def preprocess(self, data):
for i, line in enumerate(data):
if 'task_type' in line.keys() and line['task_type'] == '语义匹配':
data[i]['choice'] = ['不能理解为:'+data[i]
['textb'], '可以理解为:'+data[i]['textb']]
# data[i]['question']='怎么理解这段话?'
data[i]['textb'] = ''
if 'task_type' in line.keys() and line['task_type'] == '自然语言推理':
data[i]['choice'] = ['不能推断出:'+data[i]['textb'],
'很难推断出:'+data[i]['textb'], '可以推断出:'+data[i]['textb']]
# data[i]['question']='根据这段话'
data[i]['textb'] = ''
return data
def postprocess(self, data):
for i, line in enumerate(data):
if 'task_type' in line.keys() and line['task_type'] == '语义匹配':
data[i]['textb'] = data[i]['choice'][0].replace('不能理解为:', '')
data[i]['choice'] = ['不相似', '相似']
ns = {}
for k, v in data[i]['score'].items():
if '不能' in k:
k = '不相似'
if '可以' in k:
k = '相似'
ns[k] = v
data[i]['score'] = ns
data[i]['answer'] = data[i]['choice'][data[i]['label']]
if 'task_type' in line.keys() and line['task_type'] == '自然语言推理':
data[i]['textb'] = data[i]['choice'][0].replace('不能推断出:', '')
data[i]['choice'] = ['矛盾', '自然', '蕴含']
ns = {}
for k, v in data[i]['score'].items():
if '不能' in k:
k = '矛盾'
if '很难' in k:
k = '自然'
if '可以' in k:
k = '蕴含'
ns[k] = v
data[i]['score'] = ns
data[i]['answer'] = data[i]['choice'][data[i]['label']]
return data
def _forward(self, model_inputs):
return self.model(**model_inputs)
def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
preprocess_params = tokenizer_kwargs
postprocess_params = {}
if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
return_all_scores = self.model.config.return_all_scores
if isinstance(top_k, int) or top_k is None:
postprocess_params["top_k"] = top_k
postprocess_params["_legacy"] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar funcionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.",
UserWarning,
)
if return_all_scores:
postprocess_params["top_k"] = None
else:
postprocess_params["top_k"] = 1
if function_to_apply is not None:
postprocess_params["function_to_apply"] = function_to_apply
return preprocess_params, {}, postprocess_params
def load_data(data_path):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
samples = [json.loads(line) for line in tqdm(lines)]
return samples
def comp_acc(pred_data, test_data):
corr = 0
for i in range(len(pred_data)):
if pred_data[i]['label'] == test_data[i]['label']:
corr += 1
return corr/len(pred_data)
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--data_dir', default='./data', type=str)
total_parser.add_argument('--train_data', default='train.json', type=str)
total_parser.add_argument('--valid_data', default='dev.json', type=str)
total_parser.add_argument('--test_data', default='test.json', type=str)
total_parser.add_argument('--output_path', default='', type=str)
total_parser = UniMCPipelines.piplines_args(total_parser)
args = total_parser.parse_args()
train_data = load_data(os.path.join(args.data_dir, args.train_data))
dev_data = load_data(os.path.join(args.data_dir, args.valid_data))
test_data = load_data(os.path.join(args.data_dir, args.test_data))
dev_data_ori = copy.deepcopy(dev_data)
model = UniMCPipelines(args)
print(args.data_dir)
if args.train:
model.train(train_data, dev_data)
result = model.predict(dev_data)
for line in result[:20]:
print(line)
acc = comp_acc(result, dev_data_ori)
print('acc:', acc)
if args.output_path != '':
test_result = model.predict(test_data)
with open(args.output_path, 'w', encoding='utf8') as f:
for line in test_result:
json_data = json.dumps(line, ensure_ascii=False)
f.write(json_data+'\n')
if __name__ == "__main__":
main()
| 27,338 | 40.360061 | 158 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/unimc/__init__.py
|
from .modeling_unimc import UniMCPipelines
| 42 | 42 | 42 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/PPVAE/utils.py
|
from torch.utils.data import Dataset
class CustomDataset(Dataset):
def __init__(self, data) -> None:
super().__init__()
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
# Get data
d = self.data[index]
return d
class EarlyStopping():
def __init__(self, tolerance=10, min_delta=0):
self.tolerance = tolerance
self.min_delta = min_delta
self.counter = 0
self.early_stop = False
def __call__(self, train_loss, min_loss):
if (train_loss-min_loss) > self.min_delta:
self.counter +=1
if self.counter >= self.tolerance:
self.early_stop = True
# def gen_text_from_center(args,plugin_vae, vae_model, decoder_tokenizer,label,epoch,pos):
# gen_text = []
# latent_z = gen_latent_center(plugin_vae,pos).to(args.device).repeat((1,1))
# print("latent_z",latent_z.shape)
# text_analogy = text_from_latent_code_batch(latent_z, vae_model, args, decoder_tokenizer)
# print("label",label)
# print(text_analogy)
# gen_text.extend([(label,y,epoch) for y in text_analogy])
# text2out(gen_text, '/cognitive_comp/liangyuxin/projects/cond_vae/outputs/test.json')
| 1,264 | 32.289474 | 94 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/PPVAE/pluginVAE.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from transformers.modeling_utils import PreTrainedModel
from transformers.configuration_utils import PretrainedConfig
from fengshen.models.DAVAE.DAVAEModel import DAVAEModel
from fengshen.models.PPVAE.utils import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Encoder(nn.Module):
def __init__(self, latent_dim=128, bottle_dim=20) -> None:
super().__init__()
self.fc1 = nn.Linear(latent_dim, latent_dim//2)
self.fc2 = nn.Linear(latent_dim//2, latent_dim//4)
self.mean = nn.Linear(latent_dim//4, bottle_dim)
self.log_var = nn.Linear(latent_dim//4, bottle_dim)
def kl_loss(self, mean, log_var):
return (-0.5 * (1 + log_var - mean**2 - log_var.exp()).sum(-1)).mean()
def sampling(self, mean, log_var):
epsilon = torch.randn(mean.shape[0], mean.shape[-1], device=mean.device)
return mean + (log_var / 2).exp() * epsilon.unsqueeze(1)
def forward(self, z):
'''
:param z: shape (b, latent_dim)
'''
z = self.fc1(z)
z = F.leaky_relu(z)
z = F.leaky_relu(self.fc2(z))
z_mean = self.mean(z)
z_log_var = self.log_var(z)
kl_loss = self.kl_loss(z_mean, z_log_var)
enc_z = self.sampling(z_mean, z_log_var)
if not self.training:
enc_z = z_mean
return enc_z, kl_loss
class Decoder(nn.Module):
def __init__(self, latent_dim=128, bottle_dim=20) -> None:
super().__init__()
self.fc1 = nn.Linear(bottle_dim, latent_dim//4)
self.fc2 = nn.Linear(latent_dim//4, latent_dim//2)
self.fc3 = nn.Linear(latent_dim//2, latent_dim)
def forward(self, enc_z):
z = F.leaky_relu(self.fc1(enc_z))
z = F.leaky_relu(self.fc2(z))
z = self.fc3(z)
return z
class PluginVAE(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.kl_weight = config.kl_weight
self.beta = config.beta
self.encoder = Encoder(config.latent_dim, config.bottle_dim)
self.decoder = Decoder(config.latent_dim, config.bottle_dim)
def set_beta(self, beta):
self.beta = beta
def forward(self, z):
enc_z, kl_loss = self.encoder(z)
z_out = self.decoder(enc_z)
return z_out, kl_loss
def loss(self, z):
z_out, kl_loss = self.forward(z)
z_loss = ((z_out-z)**2).mean()
loss = z_loss + self.kl_weight * (kl_loss-self.beta).abs()
return loss, kl_loss
class PPVAEPretrainedModel(PreTrainedModel):
def _init_weights(self, module):
""" Initialize the weights """
pass # to bypass the not implement error
class PPVAEModel(PPVAEPretrainedModel):
config_class = PretrainedConfig
def __init__(self, config:PretrainedConfig) -> None:
super().__init__(config=config)
self.config =config
self.pluginvae = PluginVAE(self.config)
self.vae_model = DAVAEModel(self.config)
def train_plugin(self,encoder_tokenizer,decoder_tokenizer,input_texts,negative_samples=None):
# 输入:pluginVAE,label,train_data_dict
# 输出:pluginVAE
self.vae_model.set_tokenizers(encoder_tokenizer,decoder_tokenizer)
pos=self.get_latent(input_texts)
pos_batch_size = self.config.batch_size
total_epoch = self.config.total_epoch
pos_dataset = CustomDataset(pos)
pos_dataloader = DataLoader(
pos_dataset,
batch_size=pos_batch_size,
shuffle=True
)
neg =None
if negative_samples is not None:
neg=self.get_latent(negative_samples)
neg_batch_size = int(pos_batch_size*(neg.shape[0]/pos.shape[0]))
neg_dataset = CustomDataset(neg)
neg_dataloader = DataLoader(
neg_dataset,
batch_size=neg_batch_size,
shuffle=True
)
optimizer = torch.optim.Adam(
params=self.pluginvae.parameters(),
lr=self.config.ppvae_lr, betas=(self.config.mu, self.config.nu)
)
gamma = self.config.gamma
iter_num = 0
early_stopper = EarlyStopping()
min_loss = 10.0
for epoch in range(total_epoch):
self.pluginvae.train()
total_pos_loss = 0.0
total_neg_loss = 0.0
total_loss = 0.0
total_pos_kl = 0.0
for i, data in enumerate(pos_dataloader):
if self.config.get_dymanic_beta:
self.pluginvae.set_beta(self.get_beta_weight(iter_num,self.config.beta,self.config.beta_total_step))
iter_num += 1
pos_loss,pos_kl = self.pluginvae.loss(data)
neg_loss = 0.0
if neg is not None:
neg_data = next(iter(neg_dataloader))
neg_loss,loss_kl = self.pluginvae.loss(neg_data)
if neg_loss.item()>self.config.neg_loss_threshold*pos_loss.item():
# print("neg_loss exceed, detached")
neg_loss = neg_loss.detach()
total_neg_loss += neg_loss.item()
loss = pos_loss - gamma*neg_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_pos_loss += pos_loss.item()
total_loss += loss.item()
total_pos_kl += pos_kl.item()
avg_loss = total_loss/len(pos_dataloader)
avg_kl_loss = total_pos_kl/len(pos_dataloader)
if avg_loss<min_loss:
min_loss = avg_loss
early_stopper.counter = 0
early_stopper(avg_loss, min_loss)
if early_stopper.early_stop:
# print(f"stop training at epoch {epoch}")
break
def generate(self,n):
latent_z = self.gen_latent(n)
text_analogy = self.vae_model.text_from_latent_code_batch(latent_z)
return text_analogy
def get_latent(self,texts):
latent = self.vae_model.latent_code_from_text_batch(texts)
return latent
def gen_latent(self,gen_num=5):
random_vec = torch.randn((gen_num, self.config.bottle_dim)).to(device)
with torch.no_grad():
g_vec = self.pluginvae.decoder(random_vec)
return g_vec
def get_beta_weight(self,iter_num,beta,total_step):
now_beta_weight = min((beta/total_step)*iter_num, beta)
return now_beta_weight
| 6,649 | 35.740331 | 120 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/PPVAE/__init__.py
|
# coding=utf-8
# Copyright 2022 IDEA-CCNL The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch PPVAE model. """
| 665 | 40.625 | 74 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/deltalm/modeling_deltalm.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import math
import random
import torch
import torch.nn as nn
import torch.utils.checkpoint
from torch.nn import CrossEntropyLoss
from typing import List, Optional, Tuple, Union
from transformers.modeling_utils import PreTrainedModel
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqModelOutput,
Seq2SeqLMOutput,
)
from transformers.file_utils import (
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
import logging
from .configuration_deltalm import DeltalmConfig
logger = logging.getLogger(__name__)
_CHECKPOINT_FOR_DOC = "IDEA-CCNL/Randeng-Deltalm-362M-En-Zn"
_CONFIG_FOR_DOC = "DeltalmConfig"
_TOKENIZER_FOR_DOC = "DeltalmTokenizer"
# Base model docstring
_EXPECTED_OUTPUT_SHAPE = [1, 8, 768]
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(float("-inf")))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
class DeltalmLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# Deltalm is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions + self.offset)
class DeltalmAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class DeltalmEncoderLayer(nn.Module):
def __init__(self, config: DeltalmConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = DeltalmAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: torch.FloatTensor,
layer_head_mask: torch.FloatTensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class DeltalmDecoderLayer(nn.Module):
def __init__(self, config: DeltalmConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = DeltalmAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = DeltalmAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.fc3 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc4 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.ffn_layer_norm = nn.LayerNorm(self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Add another ffn after self-attention to keep the structure same to encoder-layer
residual = hidden_states
hidden_states = self.activation_fn(self.fc3(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc4(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.ffn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class DeltalmPretrainedModel(PreTrainedModel):
config_class = DeltalmConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (DeltalmDecoder, DeltalmEncoder)):
module.gradient_checkpointing = value
class DeltalmDecoder(DeltalmPretrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DeltalmDecoderLayer`]
Args:
config: DeltalmConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: DeltalmConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = DeltalmLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([DeltalmDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
# fairseq实现了一个 nn.init.normal_(self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5) 对最后的output权重做正态分布转换?
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(inputs_embeds.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`DeltalmTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
control over how to convert `input_ids` indices into associated vectors than the model's internal
embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
if attn_mask.size()[0] != (len(self.layers)):
raise ValueError(
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
DELTALM_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`DeltalmConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
DELTALM_GENERATION_EXAMPLE = r"""
Summarization example:
```python
>>> from transformers import DeltalmTokenizer, DeltalmForConditionalGeneration
>>> model = DeltalmForConditionalGeneration.from_pretrained("facebook/deltalm-large-cnn")
>>> tokenizer = DeltalmTokenizer.from_pretrained("facebook/deltalm-large-cnn")
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="pt")
>>> # Generate Summary
>>> summary_ids = model.generate(inputs["input_ids"], num_beams=2, min_length=0, max_length=20)
>>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
'PG&E scheduled the blackouts in response to forecasts for high winds amid dry conditions'
```
Mask filling example:
```python
>>> from transformers import DeltalmTokenizer, DeltalmForConditionalGeneration
>>> tokenizer = DeltalmTokenizer.from_pretrained("facebook/deltalm-base")
>>> model = DeltalmForConditionalGeneration.from_pretrained("facebook/deltalm-base")
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"]
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
['not', 'good', 'healthy', 'great', 'very']
```
"""
DELTALM_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`DeltalmTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`DeltalmTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Deltalm uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_deltalm._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
information on the default strategy.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape
`(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you
can choose to directly pass an embedded representation. This is useful if you want more control over how to
convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class DeltalmEncoder(DeltalmPretrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`DeltalmEncoderLayer`].
Args:
config: DeltalmConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: DeltalmConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = DeltalmLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
)
self.layers = nn.ModuleList([DeltalmEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.gradient_checkpointing = False
if config.encoder_normalize_before:
self.layer_norm = nn.LayerNorm(embed_dim)
else:
self.layer_norm = None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`DeltalmTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.size()[0] != (len(self.layers)):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if self.layer_norm is not None:
hidden_states = self.layer_norm(hidden_states)
# hidden_states = self.layernorm_embedding(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class DeltalmModel(DeltalmPretrainedModel):
def __init__(self, config: DeltalmConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = DeltalmEncoder(config, self.shared)
self.decoder = DeltalmDecoder(config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(DELTALM_INPUTS_DOCSTRING)
# @add_code_sample_docstrings(
# processor_class=_TOKENIZER_FOR_DOC,
# checkpoint=_CHECKPOINT_FOR_DOC,
# output_type=Seq2SeqModelOutput,
# config_class=_CONFIG_FOR_DOC,
# expected_output=_EXPECTED_OUTPUT_SHAPE,
# )
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[List[torch.FloatTensor]] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Seq2SeqModelOutput]:
# different to other models, Deltalm automatically creates decoder_input_ids from
# input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
if input_ids is None:
raise ValueError(
"If no `decoder_input_ids` or `decoder_inputs_embeds` are "
"passed, `input_ids` cannot be `None`. Please pass either "
"`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
)
decoder_input_ids = shift_tokens_right(
input_ids, self.config.pad_token_id, self.config.decoder_start_token_id
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
logger.debug("last_hidden_state.size: %s", decoder_outputs.last_hidden_state)
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The DELTALM Model with a language modeling head. Can be used for translation.", DELTALM_START_DOCSTRING
)
class DeltalmForConditionalGeneration(DeltalmPretrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [r"final_logits_bias", r"lm_head.weight"]
def __init__(self, config: DeltalmConfig):
super().__init__(config)
self.model = DeltalmModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
logger.debug("Debug: coming to _resize_final_logits_bias")
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(DELTALM_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(DELTALM_GENERATION_EXAMPLE)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[List[torch.FloatTensor]] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Seq2SeqLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
logger.debug("Comming to Generation!")
if labels is not None:
logger.debug("Debug: *************** Before label ***************** ")
logger.debug("Debug: %s", labels.size())
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
logger.debug("Debug: ************ After labels ************")
logger.debug("Debug: %s", labels.size())
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
# print(self.lm_head)
logger.debug("Debug: logit_size: %s", lm_logits.size())
# logger.debug("Debug: change logit size: ", lm_logits.view(-1, self.config.vocab_size).size())
# logger.debug("Debug: change label size: ", labels.view(-1).size())
masked_lm_loss = None
if labels is not None:
# logger.debug("Debug: model label_size: %s", labels.size())
# loss_fct = CrossEntropyLoss()
# masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
# label_smoothing = self.config.label_smoothing
# # logger.debug("Debug: label.size: ", )
# if label_smoothing == 0:
# # compute label smoothed loss
# loss_fct = CrossEntropyLoss()
# masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
# else:
# m = torch.nn.LogSoftmax(dim=-1)
# lprobs = m(lm_logits.float())
# # lprobs = m(lm_logits)
# # # torch.set_printoptions(linewidth=200)
# loss_fn = label_smoothed_nll_loss
# masked_lm_loss, _ = loss_fn(lprobs.view(-1, lprobs.size(-1)), labels.view(-1), label_smoothing, self.config.pad_token_id)
if not return_dict:
logger.debug("Debug: not return dict")
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
class DeltalmDecoderWrapper(DeltalmPretrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = DeltalmDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
class DeltalmForCausalLM(DeltalmPretrainedModel):
def __init__(self, config):
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.model = DeltalmDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`DeltalmTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
Example:
```python
>>> from transformers import DeltalmTokenizer, DeltalmForCausalLM
>>> tokenizer = DeltalmTokenizer.from_pretrained("facebook/deltalm-base")
>>> model = DeltalmForCausalLM.from_pretrained("facebook/deltalm-base", add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
>>> list(logits.shape) == expected_shape
True
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
| 77,342 | 48.834407 | 150 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/deltalm/configuration_deltalm.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" deltalm model configuration"""
import warnings
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
BART_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"IDEA/Deltalm": "https://huggingface.co/Deltalm-362M-Zh-En/resolve/main/config.json",
}
class DeltalmConfig(PretrainedConfig):
model_type = "Deltalm"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=250001,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=3072,
encoder_attention_heads=12,
decoder_layers=6,
decoder_ffn_dim=3072,
decoder_attention_heads=12,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
use_cache=True,
num_labels=3,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
is_encoder_decoder=True,
decoder_start_token_id=0,
forced_eos_token_id=2,
label_smoothing=0.1,
length_penalty=1.0,
encoder_normalize_before=False,
**kwargs
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.label_smoothing = label_smoothing
self.encoder_normalize_before = encoder_normalize_before
super().__init__(
num_labels=num_labels,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
forced_eos_token_id=forced_eos_token_id,
length_penalty=length_penalty,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"The config can simply be saved and uploaded again to be fixed."
)
@property
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
@property
def hidden_size(self) -> int:
return self.d_model
| 3,663 | 33.895238 | 118 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/deltalm/tokenizer_deltalm.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import warnings
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.utils import logging
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "spm.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {"IDEA-CCNL/deltalm": "https://huggingface.co/IDEA-CCNL/Randeng-Deltalm-362M-En-Zn/resolve/main/spm.model"}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"IDEA-CCNL/deltalm": 512,
}
logger = logging.get_logger(__name__)
class DeltalmTokenizer(PreTrainedTokenizer):
"""
Construct a T5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (`int`, *optional*, defaults to 100):
Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary
like in T5 preprocessing see
[here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)).
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
extra_ids=0,
additional_special_tokens=None,
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs
) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens"
)
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
additional_special_tokens=additional_special_tokens,
extra_ids=extra_ids,
sp_model_kwargs=self.sp_model_kwargs,
**kwargs,
)
self.vocab_file = vocab_file
self.offset = 1
self._extra_ids = extra_ids
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
self.encoder: Dict[int, str] = {
0: self.bos_token,
1: self.pad_token,
2: self.eos_token,
3: self.unk_token,
}
self.decoder: Dict[str, int] = {v: k for k, v in self.encoder.items()}
@staticmethod
def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
if pretrained_model_name_or_path in DeltalmTokenizer.max_model_input_sizes:
deprecated_max_model_length = DeltalmTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value.",
FutureWarning,
)
return max_model_length
@property
def vocab_size(self):
return self.sp_model.get_piece_size() # + self._extra_ids
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
# normal case: some special tokens
if token_ids_1 is None:
return ([0] * len(token_ids_0)) + [1]
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
"""Do not add eos again if user already added it."""
if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added."
)
return token_ids
else:
return token_ids + [self.eos_token_id]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: `X </s>`
- pair of sequences: `A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
token_ids_0 = self._add_eos_if_not_present(token_ids_0)
if token_ids_1 is None:
return token_ids_0
else:
token_ids_1 = self._add_eos_if_not_present(token_ids_1)
return token_ids_0 + token_ids_1
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _tokenize(self, text: str) -> List[str]:
"""Take as input a string and return a list of strings (tokens) for words/sub-words"""
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if token.startswith("<extra_id_"):
match = re.match(r"<extra_id_(\d+)>", token)
num = int(match.group(1))
return self.vocab_size - num - 1
elif token in self.decoder:
return self.decoder[token]
sp_id = self.sp_model.piece_to_id(token)
return sp_id + self.offset
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
# if index < self.sp_model.get_piece_size():
# token = self.sp_model.IdToPiece(index)
# else:
# token = f"<extra_id_{self.vocab_size - 1 - index}>"
# return token
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
elif index < self.sp_model.get_piece_size() + 4:
token = self.sp_model.IdToPiece(index-self.offset)
else:
token = f"<extra_id_{self.vocab_size - 1 - index}>"
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode_pieces(current_sub_tokens) + token + " "
current_sub_tokens = []
else:
current_sub_tokens.append(token)
out_string += self.sp_model.decode_pieces(current_sub_tokens)
return out_string.strip()
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
| 14,738 | 44.490741 | 167 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/transfo_xl_reasoning/generate.py
|
# encoding=utf-8
from typing import List, Union
import torch
from torch.nn.utils.rnn import pad_sequence
from transformers import T5Tokenizer
from fengshen.models.transfo_xl_reasoning import TransfoXLModel
from fengshen.utils import sample_sequence_batch
def en_to_zh(sentence:str):
en_pun = u",.!?[]()<>\"\"''"
zh_pun = u",。!?【】()《》“”‘’"
table = {
ord(f): ord(t) for f,t in zip(en_pun, zh_pun)
}
return sentence.translate(table)
def deduction_generate(
model:TransfoXLModel,
tokenizer:T5Tokenizer,
input_text:Union[str, List[str]],
device:int=0,
batch_size:int=2,
temperature:float=1.0,
repetition_penalty:float=2.0,
max_out_seq:int=512,
top_p:float=0.6) -> List[str]:
""" Generate with fixed prompt of deduction """
model = model.eval().cuda(device)
if isinstance(input_text, str):
input_text = [input_text]
input_text = [f"<bos>{text},因而" for text in input_text]
input_ids = [torch.tensor(ids[:-1]) for ids in tokenizer(input_text).input_ids]
input_length = [len(ids) for ids in input_ids]
output = []
for index in range(0, len(input_ids), batch_size):
input_ids_batch = pad_sequence(
input_ids[index: index + batch_size], batch_first=True, padding_value=50000,
)
input_ids_length = torch.tensor(input_length[index: index + batch_size])
res_ids_batch, _ = sample_sequence_batch(
model=model,
context_tokens_tensor=input_ids_batch.cuda(device=device),
context_length_tensor=input_ids_length.cuda(device=device),
end_token_id=50000,
top_k=0, top_p=top_p,
max_out_seq=max_out_seq,
repetition_penalty=repetition_penalty,
temperature=temperature
)
res_sentence = [
en_to_zh(tokenizer.decode(ids[length:])).replace(" ", "")
for ids, length in zip(res_ids_batch, input_length[index: index + batch_size])
]
output.extend(res_sentence)
return output
def abduction_generate(
model:TransfoXLModel,
tokenizer:T5Tokenizer,
input_text:Union[str, List[str]],
device:int=0,
batch_size:int=2,
temperature:float=1.0,
repetition_penalty:float=2.0,
top_p:float=0.6) -> List[str]:
""" Generate with fixed prompt of abduction """
model = model.eval().cuda(device)
if isinstance(input_text, str):
input_text = [input_text]
input_text = [f"<bos>之所以{text},是因为" for text in input_text]
input_ids = [torch.tensor(ids[:-1]) for ids in tokenizer(input_text).input_ids]
input_length = [len(ids) for ids in input_ids]
output = []
for index in range(0, len(input_ids), batch_size):
input_ids_batch = pad_sequence(
input_ids[index: index + batch_size], batch_first=True, padding_value=50000,
)
input_ids_length = torch.tensor(input_length[index: index + batch_size])
res_ids_batch, _ = sample_sequence_batch(
model=model,
context_tokens_tensor=input_ids_batch.cuda(device=device),
context_length_tensor=input_ids_length.cuda(device=device),
end_token_id=50000,
top_k=0, top_p=top_p,
max_out_seq=512,
repetition_penalty=repetition_penalty,
temperature=temperature
)
res_sentence = [
en_to_zh(tokenizer.decode(ids[length:])).replace(" ", "")
for ids, length in zip(res_ids_batch, input_length[index: index + batch_size])
]
output.extend(res_sentence)
return output
| 3,642 | 29.107438 | 90 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/models/transfo_xl_reasoning/__init__.py
|
# encoding=utf-8
from fengshen.models.transfo_xl_denoise.modeling_transfo_xl_denoise import TransfoXLDenoiseModel as TransfoXLModel
from .generate import deduction_generate, abduction_generate
| 192 | 63.333333 | 114 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/tokenizer/tokenizer.py
|
# coding=utf-8
| 15 | 7 | 14 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/tokenizer/sentencepiece/shuffle_corpus.py
|
import sys
import os
from tqdm import tqdm
sys.path.append('../../')
if __name__ == '__main__':
from data.fs_datasets import load_dataset
dataset = load_dataset('wudao_180g', num_proc=100)
print('dataset loaded', flush=True)
shuffle_ds = dataset['train'].shuffle(seed=42, writer_batch_size=1000)
print('dataset shuffled', flush=True)
need_size = len(shuffle_ds)
f = open('shuffle_corpus_{}.txt'.format(need_size), 'w', encoding='utf-8')
for i in tqdm(range(0, need_size)):
f.write(shuffle_ds[i]['text'] + os.linesep)
f.close()
| 574 | 29.263158 | 78 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/transfo_xl_utils.py
|
# encoding=utf-8
import torch, math
import torch.nn.functional as F
def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
# This function has been mostly taken from huggingface conversational ai code at
# https://medium.com/huggingface/how-to-build-a-state-of-the-art-conversational-ai-with-transfer-learning-2d818ac26313
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# convert to 1D
sorted_logits, sorted_indices = torch.sort(logits, dim=-1, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for i in range(sorted_indices.size()[0]):
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
logits[i][indices_to_remove] = filter_value
return logits
def enforce_repetition_penalty(lprobs, prev_output_tokens, repetition_penalty=1.5):
"""repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858). """
for previous_token in set(prev_output_tokens):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if lprobs[previous_token] < 0:
lprobs[previous_token] *= repetition_penalty
else:
lprobs[previous_token] /= repetition_penalty
def switch(next_value, init, is_update): # 换成真实token
is_update = is_update.type_as(next_value)
return (1-is_update)*init + is_update*next_value
def get_atten_mask(batch_size, seq_length, memory_length=0):
memory_attention_mask = torch.ones(
(batch_size, 1, seq_length, seq_length + memory_length), dtype=torch.int16)
memory_attention_mask = torch.tril(
torch.triu(memory_attention_mask, 1 - seq_length + memory_length), memory_length)
return memory_attention_mask # [bs, 1, seq_len, seq_len+M]
def get_masks_and_position_ids(data, mem_length=None):
# Extract batch size and sequence length.
batch_size, seq_length = data.size()
# Attention mask (lower triangular).
attention_mask = torch.ones((1, seq_length, seq_length + mem_length), device=data.device)
attention_mask = torch.tril(torch.triu(attention_mask, 1 - seq_length + mem_length), mem_length)
attention_mask = attention_mask.unsqueeze(1)
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long,
device=data.device)
position_ids = position_ids.unsqueeze(0).expand_as(data)
return attention_mask, position_ids
def sample_sequence_batch(model, context_tokens_tensor, context_length_tensor, max_out_seq=None, mems=None,
end_token_id=None, repetition_penalty=1.0, temperature=1.0, top_k=0, top_p=0.0):
"""_summary_
Args:
model (_type_): _description_
context_tokens_tensor (Tensor): [bs, seq_len]
context_length_tensor (Tensor): [bs, ]
max_out_seq (_type_, optional): _description_. Defaults to None.
mems (_type_, optional): _description_. Defaults to None.
end_token_id (_type_, optional): _description_. Defaults to None.
repetition_penalty (float, optional): _description_. Defaults to 1.0.
temperature (float, optional): _description_. Defaults to 1.0.
top_k (int, optional): _description_. Defaults to 0.
top_p (float, optional): _description_. Defaults to 0.0.
Returns:
_type_: _description_
"""
model_dtype = next(model.parameters()).dtype
org_context_length = torch.min(context_length_tensor).item()
batch_size = context_tokens_tensor.shape[0]
tokens = context_tokens_tensor[:, :org_context_length]
attention_mask = get_atten_mask(batch_size, org_context_length).cuda(context_tokens_tensor.device).to(model_dtype)
position_ids = torch.arange(org_context_length, dtype=torch.long,
device=tokens.device)
position_ids = position_ids.unsqueeze(0).expand_as(tokens)
counter, mem_length = 0, 0
if mems is None:
mems = []
if end_token_id is None:
end_token_id = 50000
if max_out_seq is None:
max_out_seq = 512
output_tokens_lists = []
# record order
origin_order = torch.tensor(range(batch_size), device=tokens.device)
output_order = []
# record log_probs
log_probs_tensor = torch.tensor([0.0] * batch_size, device=tokens.device)
log_probs_list = []
with torch.no_grad():
# while counter < (max_out_seq - org_context_length):
while counter < max_out_seq:
index = org_context_length + counter
if counter == 0:
output = model.forward(input_ids=tokens, position_ids=position_ids,
attention_mask=attention_mask, hidden_states=mems)
logits, mems = output.logits, output.hidden_states
else:
output = model.forward(input_ids=tokens[:, index - 1: index], position_ids=tokens.new_ones((1, 1)) * (index - 1),
attention_mask=tokens.new_ones(batch_size, 1, 1, mem_length + 1).to(model_dtype), hidden_states=mems)
logits, mems = output.logits, output.hidden_states
logits = logits[:, -1]
logits /= temperature
logits = top_k_logits(logits, top_k=top_k, top_p=top_p)
# if repetition_penalty != 1.0:
# for bz in range(batch_size):
# enforce_repetition_penalty(logits[bz, :], tokens[bz, :], repetition_penalty)
log_probs = F.softmax(logits, dim=-1) # [bs, vocab_size]
# if repetition_penalty != 1.0:
# for bz in range(batch_size):
# enforce_repetition_penalty(
# log_probs[bz, :], tokens[bz, :], repetition_penalty)
prev = torch.multinomial(log_probs, num_samples=1).view(-1)
if index < torch.max(context_length_tensor).item():
prev = switch(
prev, context_tokens_tensor[:, index], context_length_tensor <= index)
for i in range(batch_size):
if index > context_length_tensor[i] and prev[i] != end_token_id:
log_probs_tensor[i] += math.log(log_probs[i][prev[i]] + 1e-6) ###
if prev[i] == end_token_id:
log_probs_tensor[i] /= (context_length_tensor[i].cpu() - index)
# with torch.autocast('cpu'):
stop_idx = prev == end_token_id
if torch.all(stop_idx).item():
output_order.extend(origin_order[stop_idx].tolist())
break
finished = tokens[stop_idx]
output_tokens_lists.extend(finished.detach().cpu().tolist())
log_probs_list.extend(log_probs_tensor[stop_idx].tolist())
output_order.extend(origin_order[stop_idx].tolist())
# continue with non-ending tokens
conti_idx = (prev != end_token_id)
origin_order = origin_order[conti_idx]
tokens, prev = tokens[conti_idx], prev[conti_idx]
context_tokens_tensor = context_tokens_tensor[conti_idx]
context_length_tensor = context_length_tensor[conti_idx]
log_probs_tensor = log_probs_tensor[conti_idx]
batch_size = tokens.shape[0]
for im in range(len(mems)):
mems[im] = mems[im][conti_idx, :, :]
tokens = torch.cat((tokens, prev.view(batch_size, 1)), dim=-1)
counter += 1
output_tokens_lists.extend(tokens.detach().cpu().tolist())
log_probs_list.extend(log_probs_tensor.tolist())
output_order.extend(origin_order.tolist()) ###
output_tokens_lists = [tokens[:tokens.index(
end_token_id)] if end_token_id in tokens else tokens for tokens in output_tokens_lists]
output_tokens_lists = [tokens for _, tokens in sorted(zip(output_order, output_tokens_lists))]
output_log_porbs = [prob for _, prob in sorted(zip(output_order, log_probs_list))]
return output_tokens_lists, output_log_porbs
def sample_sequence(model, tokens, attention_mask, do_sampling=True,
repetition_penalty=1.0, max_out_seq=None, mems=None, end_token_id=None,
mem_length=0, temperature=1.0, top_k=0, top_p=0.0):
"""_summary_
Args:
model (_type_): _description_
tokens (Tensor): [1, seq_len]
attention_mask (Tensor): [1, 1, seq_len, seq_len]
do_sampling (bool, optional): _description_. Defaults to True.
repetition_penalty (float, optional): _description_. Defaults to 1.0.
max_out_seq (_type_, optional): _description_. Defaults to None.
mems (_type_, optional): _description_. Defaults to None.
end_token (_type_, optional): _description_. Defaults to None.
mem_length (int, optional): _description_. Defaults to 0.
temperature (float, optional): _description_. Defaults to 1.0.
top_k (int, optional): _description_. Defaults to 0.
top_p (float, optional): _description_. Defaults to 0.0.
Returns:
_type_: _description_
"""
counter = 0
if mems is None:
mems = []
if end_token_id is None:
end_token_id = 50000
if max_out_seq is None:
max_out_seq = 512
org_context_length = tokens.size(1)
with torch.no_grad():
# while counter < (max_out_seq - org_context_length):
while counter < max_out_seq:
if counter == 0:
logits, *mems = model(input_ids=tokens, position_ids=None,
attention_mask=attention_mask, mems=mems)
else:
index = org_context_length + counter
logits, *mems = model(input_ids=tokens[:, index - 1: index], position_ids=None,
attention_mask=tokens.new_ones(1, 1, 1, mem_length + 1), mems=mems)
logits = logits[:, -1]
logits /= temperature
if do_sampling:
logits = top_k_logits(logits, top_k=top_k, top_p=top_p)
log_probs = F.softmax(logits, dim=-1)
if repetition_penalty != 1.0:
enforce_repetition_penalty(
log_probs[0, :], tokens[0, :], repetition_penalty)
prev = torch.multinomial(log_probs, num_samples=1)[0]
is_end = (prev == end_token_id)
if is_end:
break
tokens = torch.cat((tokens, prev.view(1, 1)), dim=1)
counter += 1
output_tokens_list = tokens.detach().cpu().tolist()
if end_token_id in output_tokens_list:
output_tokens_list = output_tokens_list[:output_tokens_list.index(
end_token_id)]
return output_tokens_list[0], mems
| 11,388 | 44.374502 | 147 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/convert_diffusers_to_original_stable_diffusion.py
|
# coding=utf8
# Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint.
# *Only* converts the UNet, VAE, and Text Encoder.
# Does not convert optimizer state or any other thing.
import argparse
import os.path as osp
import torch
# =================#
# UNet Conversion #
# =================#
unet_conversion_map = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
unet_conversion_map_resnet = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
unet_conversion_map_layer = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
hf_mid_atn_prefix = "mid_block.attentions.0."
sd_mid_atn_prefix = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
hf_mid_res_prefix = f"mid_block.resnets.{j}."
sd_mid_res_prefix = f"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def convert_unet_state_dict(unet_state_dict):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
mapping = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
mapping[hf_name] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
v = v.replace(hf_part, sd_part)
mapping[k] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
v = v.replace(hf_part, sd_part)
mapping[k] = v
new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
vae_conversion_map = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}."
sd_down_prefix = f"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0."
sd_downsample_prefix = f"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
sd_upsample_prefix = f"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}."
sd_up_prefix = f"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
hf_mid_res_prefix = f"mid_block.resnets.{i}."
sd_mid_res_prefix = f"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
vae_conversion_map_attn = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def reshape_weight_for_sd(w):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape, 1, 1)
def convert_vae_state_dict(vae_state_dict):
mapping = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
v = v.replace(hf_part, sd_part)
mapping[k] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
v = v.replace(hf_part, sd_part)
mapping[k] = v
new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
weights_to_convert = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format")
new_state_dict[k] = reshape_weight_for_sd(v)
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
# pretty much a no-op
# here we need transform it to support
def convert_text_enc_state_dict(text_enc_dict):
return text_enc_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", default='', type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default='', type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
args = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
unet_path = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
vae_path = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
text_enc_path = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
# Convert the UNet model
unet_state_dict = torch.load(unet_path, map_location="cpu")
unet_state_dict = convert_unet_state_dict(unet_state_dict)
unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
vae_state_dict = torch.load(vae_path, map_location="cpu")
vae_state_dict = convert_vae_state_dict(vae_state_dict)
vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Convert the text encoder model
text_enc_dict = torch.load(text_enc_path, map_location="cpu")
text_enc_dict = convert_text_enc_state_dict(text_enc_dict)
text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
state_dict = {k: v.half() for k, v in state_dict.items()}
state_dict = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 8,919 | 36.79661 | 115 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/convert_py_to_npy.py
|
import argparse
import torch
import glob
import os
import numpy as np
class MMapIndexDataset():
def __init__(self, datapath):
self.idxfp = np.load(datapath + '.npy', mmap_mode='r')
self.binfp = np.memmap(datapath + '.bin', dtype='long', mode='r')
def __len__(self):
return self.idxfp.shape[0]
def __getitem__(self, idx):
return self.binfp[self.idxfp[idx, 0]:self.idxfp[idx, 1]]
def convert_py_to_npy(input_tensor, bin_out, idx_out):
idx = torch.empty(len(input_tensor), 2, dtype=torch.long)
start = 0
for i, input in enumerate(input_tensor):
idx[i] = torch.tensor([start, start + len(input)])
start += len(input)
np.save(idx_out, idx)
binfp = np.memmap(bin_out, dtype='long', mode='w+', shape=(start))
start = 0
for i, input in enumerate(input_tensor):
for j, idx in enumerate(input):
binfp[start + j] = idx
start += len(input)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Text infilling.")
parser.add_argument('--data_path', type=str,
default='/cognitive_comp/gaoxinyu/data/wudao')
args = parser.parse_args()
process_key = [
'incorrect_input_ids_list',
'label_ids_list',
'target_ids_list',
]
if os.path.exists(args.data_path):
print(f'''Loading data from {args.data_path}''')
data_dict = torch.load(args.data_path)
for k in process_key:
bin_out = ('_' + k + '.bin').join(args.data_path.rsplit('.pt', 1))
idx_out = ('_' + k).join(args.data_path.rsplit('.pt', 1))
convert_py_to_npy(data_dict[k], bin_out, idx_out)
else:
print(
f'Please create the synthetic datafile {args.data_path} with create_synthetic_data.py.')
| 1,823 | 32.163636 | 100 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/utils.py
|
# coding=utf-8
import jieba
import torch
def jieba_tokenize(str):
return jieba.lcut(str)
_UCODE_RANGES = (
("\u3400", "\u4db5"), # CJK Unified Ideographs Extension A, release 3.0
("\u4e00", "\u9fa5"), # CJK Unified Ideographs, release 1.1
("\u9fa6", "\u9fbb"), # CJK Unified Ideographs, release 4.1
("\uf900", "\ufa2d"), # CJK Compatibility Ideographs, release 1.1
("\ufa30", "\ufa6a"), # CJK Compatibility Ideographs, release 3.2
("\ufa70", "\ufad9"), # CJK Compatibility Ideographs, release 4.1
("\u20000", "\u2a6d6"), # (UTF16) CJK Unified Ideographs Extension B, release 3.1
("\u2f800", "\u2fa1d"), # (UTF16) CJK Compatibility Supplement, release 3.1
("\uff00", "\uffef"), # Full width ASCII, full width of English punctuation,
# half width Katakana, half wide half width kana, Korean alphabet
("\u2e80", "\u2eff"), # CJK Radicals Supplement
("\u3000", "\u303f"), # CJK punctuation mark
("\u31c0", "\u31ef"), # CJK stroke
("\u2f00", "\u2fdf"), # Kangxi Radicals
("\u2ff0", "\u2fff"), # Chinese character structure
("\u3100", "\u312f"), # Phonetic symbols
("\u31a0", "\u31bf"), # Phonetic symbols (Taiwanese and Hakka expansion)
("\ufe10", "\ufe1f"),
("\ufe30", "\ufe4f"),
("\u2600", "\u26ff"),
("\u2700", "\u27bf"),
("\u3200", "\u32ff"),
("\u3300", "\u33ff"),
)
def is_chinese_char(uchar):
for start, end in _UCODE_RANGES:
if start <= uchar <= end:
return True
return False
def chinese_char_tokenize(line):
line = line.strip()
line_in_chars = ""
for char in line:
if is_chinese_char(char):
line_in_chars += " "
line_in_chars += char
line_in_chars += " "
else:
line_in_chars += char
return line_in_chars
# s = '中国的首都是哪里?1,2,3d回答'
# print(chinese_char_tokenize(s))
def report_memory(name):
"""Simple GPU memory report."""
mega_bytes = 1024.0 * 1024.0
string = name + ' memory (MB)'
string += ' | allocated: {}'.format(
torch.cuda.memory_allocated() / mega_bytes)
string += ' | max allocated: {}'.format(
torch.cuda.max_memory_allocated() / mega_bytes)
string += ' | reserved: {}'.format(
torch.cuda.memory_reserved() / mega_bytes)
string += ' | max reserved: {}'.format(
torch.cuda.max_memory_reserved() / mega_bytes)
print(string)
| 2,431 | 31.426667 | 86 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/make_delta.py
|
"""
Code is modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/model/make_delta.py.
Make the delta weights by subtracting base weights.
Usage:
python3 -m make_delta --base ~/model_weights/llama-13b --target ~/model_weights/ziya-13b --delta ~/model_weights/ziya-13b-delta
"""
import argparse
import torch
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM
def make_delta(base_model_path, target_model_path, delta_path):
print(f"Loading the base model from {base_model_path}")
base = LlamaForCausalLM.from_pretrained(
base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
print(f"Loading the target model from {target_model_path}")
target = LlamaForCausalLM.from_pretrained(
target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
target_tokenizer = AutoTokenizer.from_pretrained(
target_model_path, use_fast=False
)
print("Calculating the delta")
for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"):
assert name in base.state_dict()
if "embed_tokens" in name or "lm_head.weight" in name or "self_attn.rotary_emb.inv_freq" in name:
continue
try:
param.data -= base.state_dict()[name]
except:
print(name)
raise ValueError()
print(f"Saving the delta to {delta_path}")
if args.hub_repo_id:
kwargs = {"push_to_hub": True, "repo_id": args.hub_repo_id}
else:
kwargs = {}
target.save_pretrained(delta_path, max_shard_size="1GB", **kwargs)
target_tokenizer.save_pretrained(delta_path, **kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--base-model-path", type=str, required=True)
parser.add_argument("--target-model-path", type=str, required=True)
parser.add_argument("--delta-path", type=str, required=True)
parser.add_argument("--hub-repo-id", type=str)
args = parser.parse_args()
make_delta(args.base_model_path, args.target_model_path, args.delta_path)
| 2,132 | 35.775862 | 127 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/universal_checkpoint.py
|
from pytorch_lightning.callbacks import ModelCheckpoint
import os
class UniversalCheckpoint(ModelCheckpoint):
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('universal checkpoint callback')
parser.add_argument('--monitor', default='step', type=str)
parser.add_argument('--mode', default='max', type=str)
parser.add_argument('--save_ckpt_path', default='./ckpt/', type=str)
parser.add_argument('--load_ckpt_path', default='./ckpt/', type=str)
parser.add_argument(
'--filename', default='model-ep{epoch:02d}-st{step:d}', type=str)
parser.add_argument('--save_last', action='store_true', default=False)
parser.add_argument('--save_top_k', default=10, type=float)
parser.add_argument('--every_n_train_steps', default=None, type=float)
parser.add_argument('--save_weights_only', action='store_true', default=False)
parser.add_argument('--every_n_epochs', default=None, type=int)
parser.add_argument('--save_on_train_epoch_end', action='store_true', default=None)
return parent_args
def __init__(self, args):
super().__init__(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.save_ckpt_path,
filename=args.filename,
save_last=args.save_last,
every_n_epochs=args.every_n_epochs,
save_on_train_epoch_end=args.save_on_train_epoch_end)
# 做兼容,如果目录不存在的话把这个参数去掉,不然会报错
if args.load_ckpt_path is not None and \
not os.path.exists(args.load_ckpt_path):
print('--------warning no checkpoint found--------, remove args')
args.load_ckpt_path = None
| 2,013 | 46.952381 | 91 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/huggingface_spider.py
|
from huggingface_hub import HfApi, login, ModelFilter
login()
api = HfApi()
fs_filter = ModelFilter(author='IDEA-CCNL')
models = api.list_models(filter=fs_filter, sort='downloads', direction=-1)
downloads = 0
likes = 0
for model in models:
downloads += model.downloads
likes += model.likes
created_at = api.list_repo_commits(model.modelId)[-1].created_at
print(f"{model.modelId}:{model.downloads}:{model.likes}")
print(downloads, likes)
| 452 | 33.846154 | 74 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/convert_tf_checkpoint_to_pytorch.py
|
"""Convert ALBERT checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
# from models.transformers.modeling_albert_bright import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
import logging
logging.basicConfig(level=logging.INFO)
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = BertConfig.from_pretrained(bert_config_file)
# print("Building PyTorch model from configuration: {}".format(str(config)))
model = BertForPreTraining(config)
# Load weights from tf checkpoint
load_tf_weights_in_bert(model, config, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--tf_checkpoint_path",
default = None,
type = str,
required = True,
help = "Path to the TensorFlow checkpoint path.")
parser.add_argument("--bert_config_file",
default = None,
type = str,
required = True,
help = "The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture.")
parser.add_argument("--pytorch_dump_path",
default = None,
type = str,
required = True,
help = "Path to the output PyTorch model.")
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path,args.bert_config_file,
args.pytorch_dump_path)
'''
# google
python convert_albert_tf_checkpoint_to_pytorch.py \
--tf_checkpoint_path=./prev_trained_model/albert_large_zh \
--bert_config_file=./prev_trained_model/albert_large_zh/config.json \
--pytorch_dump_path=./prev_trained_model/albert_large_zh/pytorch_model.bin
# bright
from model.modeling_albert_bright import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
python convert_albert_tf_checkpoint_to_pytorch.py \
--tf_checkpoint_path=./prev_trained_model/albert_base_bright \
--bert_config_file=./prev_trained_model/albert_base_bright/config.json \
--pytorch_dump_path=./prev_trained_model/albert_base_bright/pytorch_model.bin
'''
| 2,736 | 42.444444 | 118 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/__init__.py
|
from .universal_checkpoint import UniversalCheckpoint
from .utils import chinese_char_tokenize
from .transfo_xl_utils import top_k_logits, sample_sequence_batch, sample_sequence, get_masks_and_position_ids
__all__ = ['UniversalCheckpoint', 'chinese_char_tokenize', 'top_k_logits', 'sample_sequence_batch', 'sample_sequence', 'get_masks_and_position_ids']
| 355 | 70.2 | 148 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/apply_delta.py
|
"""
Code is modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/model/apply_delta.py
Apply the delta weights on top of a base model.
Usage:
python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta-v1.1
"""
import argparse
import gc
import glob
import json
import os
import shutil
import tempfile
from huggingface_hub import snapshot_download
import torch
from torch import nn
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, LlamaForCausalLM
GB = 1 << 30
def split_files(model_path, tmp_path, split_size):
if not os.path.exists(model_path):
model_path = snapshot_download(repo_id=model_path)
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
file_pattern = os.path.join(model_path, "pytorch_model-*.bin")
files = glob.glob(file_pattern)
part = 0
try:
for file_path in tqdm(files):
state_dict = torch.load(file_path)
new_state_dict = {}
current_size = 0
for name, param in state_dict.items():
param_size = param.numel() * param.element_size()
if current_size + param_size > split_size:
new_file_name = f"pytorch_model-{part}.bin"
new_file_path = os.path.join(tmp_path, new_file_name)
torch.save(new_state_dict, new_file_path)
current_size = 0
new_state_dict = None
gc.collect()
new_state_dict = {}
part += 1
new_state_dict[name] = param
current_size += param_size
new_file_name = f"pytorch_model-{part}.bin"
new_file_path = os.path.join(tmp_path, new_file_name)
torch.save(new_state_dict, new_file_path)
new_state_dict = None
gc.collect()
new_state_dict = {}
part += 1
except Exception as e:
print(f"An error occurred during split_files: {e}")
shutil.rmtree(tmp_path)
raise
def apply_delta_low_cpu_mem(base_model_path, target_model_path, delta_path):
delta_tokenizer = AutoTokenizer.from_pretrained(delta_path, use_fast=False)
delta_config = AutoConfig.from_pretrained(delta_path)
if os.path.exists(target_model_path):
shutil.rmtree(target_model_path)
os.makedirs(target_model_path)
split_size = 4 * GB
with tempfile.TemporaryDirectory() as tmp_base_path, tempfile.TemporaryDirectory() as tmp_delta_path:
print(f"Split files for the base model to {tmp_base_path}")
split_files(base_model_path, tmp_base_path, split_size)
print(f"Split files for the delta weights to {tmp_delta_path}")
split_files(delta_path, tmp_delta_path, split_size)
base_pattern = os.path.join(tmp_base_path, "pytorch_model-*.bin")
base_files = glob.glob(base_pattern)
delta_pattern = os.path.join(tmp_delta_path, "pytorch_model-*.bin")
delta_files = glob.glob(delta_pattern)
delta_state_dict = torch.load(delta_files[0])
print("Applying the delta")
weight_map = {}
total_size = 0
for i, base_file in tqdm(enumerate(base_files)):
state_dict = torch.load(base_file)
file_name = f"pytorch_model-{i}.bin"
for name, param in state_dict.items():
if name not in delta_state_dict:
for delta_file in delta_files:
delta_state_dict = torch.load(delta_file)
gc.collect()
if name in delta_state_dict:
break
if "embed_tokens" in name or "lm_head.weight" in name or "self_attn.rotary_emb.inv_freq" in name:
state_dict[name] = delta_state_dict[name]
else:
state_dict[name] += delta_state_dict[name]
weight_map[name] = file_name
total_size += param.numel() * param.element_size()
gc.collect()
torch.save(state_dict, os.path.join(target_model_path, file_name))
with open(
os.path.join(target_model_path, "pytorch_model.bin.index.json"), "w"
) as f:
json.dump(
{"weight_map": weight_map, "metadata": {"total_size": total_size}}, f
)
print(f"Saving the target model to {target_model_path}")
delta_tokenizer.save_pretrained(target_model_path)
delta_config.save_pretrained(target_model_path)
def apply_delta(base_model_path, target_model_path, delta_path):
print(f"Loading the delta weights from {delta_path}")
delta_tokenizer = AutoTokenizer.from_pretrained(delta_path, use_fast=False)
delta = LlamaForCausalLM.from_pretrained(
delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
print(f"Loading the base model from {base_model_path}")
base = LlamaForCausalLM.from_pretrained(
base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
)
print("Applying the delta")
for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"):
assert name in base.state_dict()
# param.data += delta.state_dict()[name]
if "embed_tokens" in name or "lm_head.weight" in name or "self_attn.rotary_emb.inv_freq" in name:
continue
else:
param.data += base.state_dict()[name]
# base.model.embed_tokens = delta.model.embed_tokens
# base.lm_head.weight = delta.lm_head.weight
print(f"Saving the target model to {target_model_path}")
delta.save_pretrained(target_model_path)
delta_tokenizer.save_pretrained(target_model_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--base-model-path", type=str, required=True)
parser.add_argument("--target-model-path", type=str, required=True)
parser.add_argument("--delta-path", type=str, required=True)
parser.add_argument(
"--low-cpu-mem",
action="store_true",
help="Lower the cpu memory usage. This will split large files and use "
"disk as swap to reduce the memory usage below 10GB.",
)
args = parser.parse_args()
if args.low_cpu_mem:
apply_delta_low_cpu_mem(
args.base_model_path, args.target_model_path, args.delta_path
)
else:
apply_delta(args.base_model_path, args.target_model_path, args.delta_path)
| 6,611 | 36.782857 | 139 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/llama_convert/fs_to_hf.py
|
from transformers.models.llama import LlamaForCausalLM, LlamaTokenizer, LlamaConfig
from fengshen.models.megatron import mpu
from fengshen.models.llama.modeling_llama import LlamaForCausalLM as FengshenLlama
from fengshen.models.llama.configuration_llama import LlamaConfig as FengshenConfig
import argparse
import torch
from tqdm import tqdm
def convert_config(fs_config: FengshenConfig):
hf_config = LlamaConfig(
vocab_size=fs_config.vocab_size,
hidden_size=fs_config.hidden_size,
intermediate_size=fs_config.intermediate_size,
num_hidden_layers=fs_config.num_hidden_layers,
num_attention_heads=fs_config.num_attention_heads,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=fs_config.rms_norm_epsilon,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
)
return hf_config
def merge_data(module):
if hasattr(module, "merge"):
module.merge()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert fengshen llama to hugginface format.")
parser.add_argument(
"--input_path",
help="Location of LLaMA weights, which contains tokenizer.model and model folders",
)
parser.add_argument(
"--output_path",
help="Location to write fengshen mode",
)
args = parser.parse_args()
mpu.set_model_parallel_world_size(1)
mpu.set_model_parallel_rank(0)
fs_model = FengshenLlama.from_pretrained(args.input_path)
fs_model.apply(merge_data)
tokenizer = LlamaTokenizer.from_pretrained(args.input_path)
fs_config = fs_model.config
hf_config = convert_config(fs_config)
hf_model = LlamaForCausalLM(hf_config)
# embed_in
hf_model.model.embed_tokens.load_state_dict(
{"weight": fs_model.llama.embed_in.word_embeddings.weight}
)
# embed_out
hf_model.lm_head.load_state_dict({"weight": fs_model.embed_out.final_linear.weight})
# final_norm
hf_model.model.norm.load_state_dict({"weight": fs_model.llama.final_layer_norm.scale})
num_heads = hf_config.num_attention_heads
hidden_size = hf_config.hidden_size
dims_per_head = hidden_size // num_heads
# layer
for layer_i in tqdm(range(fs_config.num_hidden_layers)):
hf_layer = hf_model.model.layers[layer_i]
fs_layer = fs_model.llama.layers[layer_i]
state_dict = {}
sharded_qkv = fs_layer.attention.query_key_value.weight.view(num_heads, 3, dims_per_head, hidden_size)
q, k, v = sharded_qkv.chunk(3, dim=1)
state_dict["self_attn.q_proj.weight"] = q.reshape(num_heads * dims_per_head, hidden_size)
state_dict["self_attn.k_proj.weight"] = k.reshape(num_heads * dims_per_head, hidden_size)
state_dict["self_attn.v_proj.weight"] = v.reshape(num_heads * dims_per_head, hidden_size)
state_dict["self_attn.o_proj.weight"] = fs_layer.attention.dense.weight
# Just take one
state_dict["self_attn.rotary_emb.inv_freq"] = fs_layer.attention.rotary_emb.inv_freq
## average layernorm stats over mp ranks
state_dict["input_layernorm.weight"] = fs_layer.input_layernorm.scale
state_dict["post_attention_layernorm.weight"] = fs_layer.post_attention_layernorm.scale
## mlp params
state_dict["mlp.gate_proj.weight"] = fs_layer.mlp.w1.weight
state_dict["mlp.up_proj.weight"] = fs_layer.mlp.w3.weight
state_dict["mlp.down_proj.weight"] = fs_layer.mlp.w2.weight
## load state_dict into layer
hf_layer.load_state_dict(state_dict)
hf_model.save_pretrained(args.output_path)
tokenizer.save_pretrained(args.output_path)
| 3,811 | 36.009709 | 110 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/llama_convert/convert_fs_llama_tp.py
|
import argparse
import os
import json
import torch
from fengshen.models.llama.configuration_llama import LlamaConfig
__HF_NORM_PREFIX__ = "llama.final_layer_norm"
__HF_EMBED_IN_KEY__ = "llama.embed_in.word_embeddings.weight"
__HF_EMBED_OUT_KEY__ = "embed_out.final_linear.weight"
__HF_LAYER_PREFIX__ = "llama.layers"
__WEIGHT_MAP_FILE__ = "pytorch_model.bin.index.json"
def make_output_dir(path, parallel_size):
"""
root_dir
|--- part_0
|___ part_1
"""
try:
os.mkdir(path)
except:
pass
for i in range(parallel_size):
try:
os.mkdir(os.path.join(path, f"part_{i}"))
except:
pass
def save_splits(input_dir, output_dir, helper, config):
weight_map_file = os.path.join(input_dir, __WEIGHT_MAP_FILE__)
with open(weight_map_file, 'r') as fp:
weight_map = json.load(fp)
for rank, sd in enumerate(helper.sequential_cache):
output_part_dir = os.path.join(output_dir, f"part_{rank}")
with open(os.path.join(output_part_dir, __WEIGHT_MAP_FILE__), 'w') as f:
json.dump(weight_map, f)
config.save_pretrained(output_part_dir)
for file_name, keys in helper.revert_weight_map.items():
output_sd = {}
for k in keys:
if k in sd:
output_sd[k] = sd[k]
torch.save(output_sd, os.path.join(output_part_dir, file_name))
def get_loaders(root_dir, weight_map):
loaders_map = {}
weight_map_with_loader = {}
revert_weight_map = {}
for k, v in weight_map['weight_map'].items():
if v in revert_weight_map:
revert_weight_map[v].append(k)
else:
revert_weight_map[v] = [k]
# 打开对应的state_dict
ld = torch.load(os.path.join(root_dir, v), map_location='cpu')
loaders_map[v] = ld
weight_map_with_loader[k] = loaders_map[v]
return weight_map_with_loader, revert_weight_map, loaders_map.values()
class Helper:
def __init__(
self, args):
self.num_output_shards = args.model_parallel_size
self.sequential_cache = [{} for _ in range(args.model_parallel_size)]
self.init_weight_map(args)
def init_weight_map(self, args):
weight_map_file = os.path.join(args.input_dir, __WEIGHT_MAP_FILE__)
with open(weight_map_file, 'r') as fp:
weight_map = json.load(fp)
self.weight_map, self.revert_weight_map, self.loaders = get_loaders(
args.input_dir, weight_map)
def del_loaded(self, key: str):
# Remove from memory as we go along
if key in self.weight_map:
del self.weight_map[key][key]
def shard(self, x, dim):
x_shape = list(x.shape)
assert x_shape[dim] % self.num_output_shards == 0
new_x_shape = (
x_shape[:dim]
+ [self.num_output_shards, x_shape[dim] // self.num_output_shards]
+ x_shape[dim + 1:]
)
x = x.view(*new_x_shape)
return torch.movedim(x, 0, dim)
def add_sequential_shard(self, dictionary):
for k, v in dictionary.items():
for rank in range(self.num_output_shards):
# self.sequential_cache[rank][f"sequential.{layer_i}.{k}"] = v[rank].clone()
self.sequential_cache[rank][k] = v[rank].clone()
def add_sequential_duplicates(self, dictionary):
for k, v in dictionary.items():
for rank in range(self.num_output_shards):
# self.sequential_cache[rank][f"sequential.{layer_i}.{k}"] = v.clone()
self.sequential_cache[rank][k] = v.clone()
def add_sequential(self, dictionary, rank):
for k, v in dictionary.items():
# self.sequential_cache[rank][f"sequential.{layer_i}.{k}"] = v.clone()
self.sequential_cache[rank][k] = v.clone()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="covert hf model to hf model with mp"
)
parser.add_argument(
"--input_dir",
type=str,
help="Path to hf model dir",
)
parser.add_argument(
"--output_dir",
type=str,
help="Path to hf model dir",
)
parser.add_argument(
"--model_parallel_size",
type=int,
default=1,
help="Path to hf model dir",
)
args = parser.parse_args()
make_output_dir(args.output_dir, args.model_parallel_size)
helper = Helper(args)
config = LlamaConfig.from_pretrained(args.input_dir)
num_output_shards = args.model_parallel_size
num_heads_per_output_shard = config.num_attention_heads // num_output_shards
dims_per_head = config.hidden_size // config.num_attention_heads
for k, v in helper.weight_map.items():
# embed in and out
if k in [__HF_EMBED_IN_KEY__, __HF_EMBED_OUT_KEY__]:
helper.add_sequential_shard({k: helper.shard(v[k], dim=0)})
elif k.startswith(__HF_NORM_PREFIX__):
helper.add_sequential_duplicates({k: v[k]})
elif k.startswith(__HF_LAYER_PREFIX__):
# QKV weight and bias
if k.find("query_key_value") != -1:
output_shape = [num_output_shards, num_heads_per_output_shard *
3 * dims_per_head] + list(v[k].shape[1:])
sharded = v[k].view(output_shape)
for out_rank in range(num_output_shards):
helper.add_sequential({k: sharded[out_rank]}, out_rank)
# rotary emb
elif k.find("rotary_emb.inv_freq") != -1:
helper.add_sequential_duplicates({k: v[k]})
# layer_norm
elif k.find("layernorm") != -1:
helper.add_sequential_duplicates({k: v[k]})
# linear
elif k.find("dense") != -1 or k.find("mlp") != -1:
# 纵切
if k.find("w2") != -1 or k.find("attention") != -1:
if k.find('weight') != -1:
shard = helper.shard(v[k], dim=1)
helper.add_sequential_shard({k: shard})
# bias不切
else:
helper.add_sequential_duplicates({k: v[k]})
# 横切
else:
shard = helper.shard(v[k], dim=0)
helper.add_sequential_shard({k: shard})
else:
print(f"WARNING: unexcept key {k}")
else:
print(f"WARNING: unexcept key {k}")
helper.del_loaded(k)
save_splits(args.input_dir, args.output_dir, helper, config)
| 6,643 | 34.72043 | 92 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/llama_convert/hf_to_fs.py
|
from transformers.models.llama import LlamaForCausalLM, LlamaTokenizer, LlamaConfig
from fengshen.models.megatron import mpu
from fengshen.models.llama.modeling_llama import LlamaForCausalLM as FengshenLlama
from fengshen.models.llama.configuration_llama import LlamaConfig as FengshenConfig
import argparse
import torch
from tqdm import tqdm
def convert_config(hf_config: LlamaConfig):
fs_config = FengshenConfig(
vocab_size=hf_config.vocab_size,
hidden_size=hf_config.hidden_size,
num_hidden_layers=hf_config.num_hidden_layers,
num_attention_heads=hf_config.num_attention_heads,
intermediate_size=hf_config.intermediate_size,
hidden_act=hf_config.hidden_act,
rotary_pct=1,
rotary_emb_base=10000,
max_position_embeddings=hf_config.max_position_embeddings,
initializer_range=hf_config.initializer_range,
rms_norm_epsilon=hf_config.rms_norm_eps,
torch_dtype=hf_config.torch_dtype,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
use_parallel_residual=False,
)
fs_config.llama_mlp_multiple_of = 256
assert fs_config.intermediate_size % fs_config.llama_mlp_multiple_of == 0, \
f"{fs_config.intermediate_size} % {fs_config.llama_mlp_multiple_of}"
fs_config.init_method = "small_init"
fs_config.hidden_dropout = 0
fs_config.output_layer_init_method = "wang_init"
fs_config.pos_emb = "rotary"
fs_config.norm = "rmsnorm"
fs_config.gpt_j_residual = False
fs_config.gpt_j_tied = False
fs_config.apply_query_key_layer_scaling = False
fs_config.attention_softmax_in_fp32 = False
fs_config.scaled_masked_softmax_fusion = True
fs_config.scaled_upper_triang_masked_softmax_fusion = False
fs_config.bias_gelu_fusion = False
fs_config.attention_dropout = 0
fs_config.output_layer_parallelism = "column"
fs_config.eod_mask_loss = False
fs_config.bias_dropout_fusion = False
fs_config.attention_config = [[["flash"], "all"]]
fs_config.mlp_type = "llama"
fs_config.use_bias_in_attn_linear = False
fs_config.lora = False
return fs_config
def find_closest_multiple(current_num, n):
if current_num % n == 0:
return current_num
closest_multiple = ((current_num // n) + 1) * n
return closest_multiple
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert raw LLaMA checkpoints to fengshen format.")
parser.add_argument(
"--input_path",
help="Location of LLaMA weights, which contains tokenizer.model and model folders",
)
parser.add_argument(
"--output_path",
help="Location to write fengshen mode",
)
parser.add_argument(
"--multiplier",
default = 1,
help="Make embedding_size an integer multiple of multiplier",
)
args = parser.parse_args()
hf_model = LlamaForCausalLM.from_pretrained(args.input_path)
tokenizer = LlamaTokenizer.from_pretrained(args.input_path, use_fast=False)
hf_config = hf_model.config
fs_config = convert_config(hf_config)
# used for FengshenLlama initialized
mpu.set_model_parallel_world_size(1)
mpu.set_model_parallel_rank(0)
mpu.set_init_params_in_cuda(False)
fs_model = FengshenLlama(fs_config)
# embed_in
fs_model.llama.embed_in.load_state_dict(
{"word_embeddings.weight": hf_model.model.embed_tokens.weight}
)
# embed_out
fs_model.embed_out.load_state_dict(
{"final_linear.weight": hf_model.lm_head.weight}
)
fs_model.resize_token_embeddings(find_closest_multiple(fs_model.config.vocab_size, int(args.multiplier)))
# final_norm
fs_model.llama.final_layer_norm.load_state_dict(
{"scale": hf_model.model.norm.weight}
)
num_heads = hf_config.num_attention_heads
hidden_size = hf_config.hidden_size
dims_per_head = hidden_size // num_heads
def permute_rotary(w):
assert w.shape == (num_heads, dims_per_head, hidden_size)
return w.view(num_heads, dims_per_head // 2, 2, hidden_size) \
.transpose(1, 2) \
.reshape(num_heads, dims_per_head, hidden_size)
# layer
for layer_i in tqdm(range(fs_config.num_hidden_layers)):
fs_layer = fs_model.llama.layers[layer_i]
hf_layer = hf_model.model.layers[layer_i]
# Linear
attn_wo = hf_layer.self_attn.o_proj.weight
mlp_w1 = hf_layer.mlp.gate_proj.weight
mlp_w2 = hf_layer.mlp.down_proj.weight
mlp_w3 = hf_layer.mlp.up_proj.weight
# Attention
w_q = hf_layer.self_attn.q_proj.weight.view(num_heads, dims_per_head, hidden_size)
w_k = hf_layer.self_attn.k_proj.weight.view(num_heads, dims_per_head, hidden_size)
w_v = hf_layer.self_attn.v_proj.weight.view(num_heads, dims_per_head, hidden_size)
sharded_qkv = torch.stack([w_q, w_k, w_v], dim=1)
sharded_qkv = sharded_qkv.view(num_heads*dims_per_head*3, hidden_size)
# Duplicated
input_layernorm = hf_layer.input_layernorm.weight
post_attention_layernorm = hf_layer.post_attention_layernorm.weight
rotary_inv = hf_layer.self_attn.rotary_emb.inv_freq
fs_layer.load_state_dict({
"attention.query_key_value.weight": sharded_qkv,
# Sharded layers
"attention.dense.weight": attn_wo.clone(),
"mlp.w1.weight": mlp_w1.clone(),
"mlp.w2.weight": mlp_w2.clone(),
"mlp.w3.weight": mlp_w3.clone(),
# Duplicated layers
"input_layernorm.scale": input_layernorm,
"post_attention_layernorm.scale": post_attention_layernorm,
"attention.rotary_emb.inv_freq": rotary_inv,
})
fs_model.save_pretrained(args.output_path)
tokenizer.save_pretrained(args.output_path)
| 5,921 | 38.218543 | 109 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/llama_convert/merge_lt_mp_to_hf.py
|
import argparse
import os
import json
import torch
from fengshen_inner.models.llama.configuration_llama import LlamaConfig as FengshenConfig
from fengshen_inner.models.llama.modeling_llama import LlamaForCausalLM as FengshenLlama
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
from fengshen_inner.models.megatron import mpu
from glob import glob
import copy
from tqdm import tqdm
__FS_FINAL_NORM_KEY__ = "llama.final_layer_norm.scale"
__FS_EMBED_IN_KEY__ = "llama.embed_in.word_embeddings.weight"
__FS_EMBED_OUT_KEY__ = "embed_out.final_linear.weight"
__FS_LAYER_PREFIX__ = "llama.layers"
def convert_config(fs_config: FengshenConfig):
hf_config = LlamaConfig(
vocab_size=fs_config.vocab_size,
hidden_size=fs_config.hidden_size,
intermediate_size=fs_config.intermediate_size,
num_hidden_layers=fs_config.num_hidden_layers,
num_attention_heads=fs_config.num_attention_heads,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=fs_config.rms_norm_epsilon,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
torch_dtype=fs_config.torch_dtype,
)
return hf_config
def merge_data(module):
if hasattr(module, "merge"):
module.merge()
def get_loaders(root_path, mp_size, fs_config):
fs_model = FengshenLlama(fs_config)
loaders = []
for mp in range(mp_size):
file = os.path.join(root_path, f"mp_rank_{mp:02}_model_states.pt")
print(f"loading {file}")
sd = torch.load(file, map_location='cpu')
new_sd = {}
for k, v in sd["module"].items():
try:
anchor = k.index('llama')
except:
if 'embed_out' in k:
anchor = k.index('embed_out')
else:
anchor = 0
rep = k[:anchor]
new_sd[k.replace(rep, "")] = v
# new_sd[k.replace("module.model.", "")] = v
fs_model.load_state_dict(new_sd)
fs_model.apply(merge_data)
loaders.append(copy.deepcopy(fs_model.state_dict()))
return loaders
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="covert hf model to gxy hf model with mp"
)
# fs结构的预训练配置
parser.add_argument(
"--pretrained_model_path",
type=str,
help="Path to hf pretrained model dir",
)
# 模型并行数
parser.add_argument(
"--model_parallel_size",
type=int,
default=1,
help="Path to hf model dir",
)
# lightning checkpoint目录--pretrained_model_path
parser.add_argument(
"--ckpt_path",
type=str,
help="Path to lightning checkpoint dir",
)
parser.add_argument(
"--output_path",
type=str,
help="Path to hf model dir",
)
args = parser.parse_args()
mpu.set_model_parallel_world_size(args.model_parallel_size)
#mpu.set_init_params_in_cuda(False)
mpu.set_model_parallel_rank(0)
fs_config = FengshenConfig.from_pretrained(args.pretrained_model_path)
loaded_tp_ranks = get_loaders(args.ckpt_path, args.model_parallel_size, fs_config)
config = convert_config(fs_config)
tokenizer = LlamaTokenizer.from_pretrained(args.pretrained_model_path)
num_output_shards = 1
num_heads_per_output_shard = config.num_attention_heads
dims_per_head = config.hidden_size // config.num_attention_heads
hf_model = LlamaForCausalLM(config)
num_heads = config.num_attention_heads
hidden_size = config.hidden_size
dims_per_head = hidden_size // num_heads
mp_partitions = args.model_parallel_size
# EMBED_IN
hf_model.model.embed_tokens.load_state_dict(
{"weight": torch.cat([t[__FS_EMBED_IN_KEY__] for t in loaded_tp_ranks], dim=0)})
# EMBED_OUT
hf_model.lm_head.load_state_dict(
{"weight": torch.cat([t[__FS_EMBED_OUT_KEY__] for t in loaded_tp_ranks], dim=0)})
# FINAL_LAYER_NORM
hf_model.model.norm.load_state_dict(
{"weight": (sum([t[__FS_FINAL_NORM_KEY__] for t in loaded_tp_ranks])) / mp_partitions})
# layer
for layer_i in tqdm(range(config.num_hidden_layers)):
hf_layer = hf_model.model.layers[layer_i]
state_dict = {}
sharded_qkv = torch.cat(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.attention.query_key_value.weight"] for t in loaded_tp_ranks], dim=0)
sharded_qkv = sharded_qkv.view(num_heads, 3, dims_per_head, hidden_size)
q, k, v = sharded_qkv.chunk(3, dim=1)
state_dict["self_attn.q_proj.weight"] = q.reshape(num_heads * dims_per_head, hidden_size)
state_dict["self_attn.k_proj.weight"] = k.reshape(num_heads * dims_per_head, hidden_size)
state_dict["self_attn.v_proj.weight"] = v.reshape(num_heads * dims_per_head, hidden_size)
state_dict["self_attn.o_proj.weight"] = torch.cat(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.attention.dense.weight"] for t in loaded_tp_ranks], dim=1)
state_dict["self_attn.rotary_emb.inv_freq"] = \
loaded_tp_ranks[0][f"{__FS_LAYER_PREFIX__}.{layer_i}.attention.rotary_emb.inv_freq"]
# average layernorm stats over mp ranks
state_dict["input_layernorm.weight"] = (sum(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.input_layernorm.scale"] for t in loaded_tp_ranks])) / mp_partitions
state_dict["post_attention_layernorm.weight"] = (sum(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.post_attention_layernorm.scale"] for t in loaded_tp_ranks])) / mp_partitions
# mlp params
state_dict["mlp.gate_proj.weight"] = torch.cat(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.mlp.w1.weight"] for t in loaded_tp_ranks], dim=0)
state_dict["mlp.up_proj.weight"] = torch.cat(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.mlp.w3.weight"] for t in loaded_tp_ranks], dim=0)
state_dict["mlp.down_proj.weight"] = torch.cat(
[t[f"{__FS_LAYER_PREFIX__}.{layer_i}.mlp.w2.weight"] for t in loaded_tp_ranks], dim=1)
# load state_dict into layer
hf_layer.load_state_dict(state_dict)
hf_model.save_pretrained(args.output_path)
tokenizer.save_pretrained(args.output_path)
| 6,335 | 37.4 | 125 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/utils/llama_convert/fs_merge_weight.py
|
from fengshen_inner.models.llama.modeling_llama import LlamaForCausalLM as FengshenLlama
from fengshen_inner.models.llama.configuration_llama import LlamaConfig as FengshenConfig
from fengshen_inner.models.megatron import mpu
import argparse
def merge_data(module):
if hasattr(module, "merge"):
module.merge()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="merge lora weight")
parser.add_argument(
"--input_path",
help="lora model",
)
parser.add_argument(
"--output_path",
help="Location to write fengshen mode",
)
args = parser.parse_args()
mpu.set_model_parallel_world_size(1)
mpu.set_model_parallel_rank(0)
model = FengshenLlama.from_pretrained(args.input_path)
model.apply(merge_data)
model.config.lora = False
model.save_pretrained(args.output_path)
| 911 | 28.419355 | 89 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/data/__init__.py
|
# coding=utf-8
| 15 | 7 | 14 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.