Spaces:
Sleeping
Sleeping
""" | |
Head Tuning with Prefix / Adapter | |
""" | |
from typing import Optional, List, Union, Tuple | |
import torch | |
from torch._C import NoopLogger | |
import torch.nn | |
import torch.nn.functional as F | |
from torch import Tensor | |
from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss | |
from transformers import BertModel, BertPreTrainedModel | |
from transformers import RobertaModel, RobertaPreTrainedModel | |
from transformers.models.deberta.modeling_deberta import DebertaModel, DebertaPreTrainedModel, ContextPooler, StableDropout | |
from transformers.models.gpt2.modeling_gpt2 import GPT2Model, GPT2PreTrainedModel | |
from transformers.models.bart.modeling_bart import BartPretrainedModel, BartClassificationHead, BartModel | |
from transformers.models.roberta.modeling_roberta import RobertaClassificationHead | |
from transformers.models.bart.configuration_bart import BartConfig | |
from transformers.modeling_outputs import SequenceClassifierOutput, Seq2SeqSequenceClassifierOutput, SequenceClassifierOutputWithPast | |
from models.basic_modules.prefix_encoder import PrefixEncoder | |
from models.basic_modules.adapter import BertAdaModel, RobertaAdaModel, init_adapter | |
from tools.model_utils.parameter_freeze import ParameterFreeze | |
from tools.runner_utils.log_util import logging | |
logger = logging.getLogger(__name__) | |
freezer = ParameterFreeze() | |
## ======== BERT ======== | |
# Vanilla Fine-tuning For BERT | |
class BertForSequenceClassification(BertPreTrainedModel): | |
def __init__(self, config): | |
super().__init__(config) | |
self.num_labels = config.num_labels | |
self.config = config | |
self.bert = BertModel(config) | |
if self.config.use_freezing: | |
self.bert = freezer.freeze_lm(self.bert) | |
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) | |
self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) | |
self.init_weights() | |
def freeze_backbone(self, use_freezing: bool=True): | |
if use_freezing: | |
self.bert = freezer.freeze_lm(self.bert) | |
else: | |
self.bert = freezer.unfreeze_lm(self.bert) | |
def forward( | |
self, | |
input_ids=None, | |
attention_mask=None, | |
token_type_ids=None, | |
position_ids=None, | |
head_mask=None, | |
inputs_embeds=None, | |
labels=None, | |
output_attentions=None, | |
output_hidden_states=None, | |
return_dict=None, | |
): | |
r""" | |
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): | |
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., | |
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), | |
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). | |
""" | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
# print("input_ids.shape=", input_ids.shape) # e.g., [8, 128] | |
# print("attention_mask.shape=", attention_mask.shape) # e.g., [8, 128] | |
# print("token_type_ids.shape=", token_type_ids.shape) # e.g., [8, 128] | |
outputs = self.bert( | |
input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
) | |
pooled_output = outputs[1] | |
pooled_output = self.dropout(pooled_output) | |
logits = self.classifier(pooled_output) | |
loss = None | |
if labels is not None: | |
if self.config.problem_type is None: | |
if self.num_labels == 1: | |
self.config.problem_type = "regression" | |
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): | |
self.config.problem_type = "single_label_classification" | |
else: | |
self.config.problem_type = "multi_label_classification" | |
if self.config.problem_type == "regression": | |
loss_fct = MSELoss() | |
if self.num_labels == 1: | |
loss = loss_fct(logits.squeeze(), labels.squeeze()) | |
else: | |
loss = loss_fct(logits, labels) | |
elif self.config.problem_type == "single_label_classification": | |
loss_fct = CrossEntropyLoss() | |
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) | |
elif self.config.problem_type == "multi_label_classification": | |
loss_fct = BCEWithLogitsLoss() | |
loss = loss_fct(logits, labels) | |
if not return_dict: | |
output = (logits,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return SequenceClassifierOutput( | |
loss=loss, | |
logits=logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
# Prefix-tuning For BERT | |
class BertPrefixForSequenceClassification(BertPreTrainedModel): | |
def __init__(self, config): | |
super().__init__(config) | |
self.num_labels = config.num_labels | |
self.config = config | |
self.bert = BertModel(config) | |
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) | |
self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) | |
# for param in self.bert.parameters(): | |
# param.requires_grad = False | |
if self.config.use_freezing: | |
self.bert = freezer.freeze_lm(self.bert) | |
self.pre_seq_len = config.pre_seq_len | |
self.n_layer = config.num_hidden_layers | |
self.n_head = config.num_attention_heads | |
self.n_embd = config.hidden_size // config.num_attention_heads | |
self.prefix_tokens = torch.arange(self.pre_seq_len).long() | |
self.prefix_encoder = PrefixEncoder(config) | |
bert_param = 0 | |
for name, param in self.bert.named_parameters(): | |
bert_param += param.numel() | |
all_param = 0 | |
for name, param in self.named_parameters(): | |
all_param += param.numel() | |
total_param = all_param - bert_param | |
print("total param is {}".format(total_param)) # 9860105 | |
def freeze_backbone(self, use_freezing: bool=True): | |
if use_freezing: | |
self.bert = freezer.freeze_lm(self.bert) | |
else: | |
self.bert = freezer.unfreeze_lm(self.bert) | |
def get_prompt(self, batch_size): | |
prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.bert.device) | |
past_key_values = self.prefix_encoder(prefix_tokens) | |
# bsz, seqlen, _ = past_key_values.shape | |
past_key_values = past_key_values.view( | |
batch_size, | |
self.pre_seq_len, | |
self.n_layer * 2, | |
self.n_head, | |
self.n_embd | |
) | |
past_key_values = self.dropout(past_key_values) | |
past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(2) | |
return past_key_values | |
def forward( | |
self, | |
input_ids=None, | |
attention_mask=None, | |
token_type_ids=None, | |
position_ids=None, | |
head_mask=None, | |
inputs_embeds=None, | |
labels=None, | |
output_attentions=None, | |
output_hidden_states=None, | |
return_dict=None, | |
): | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
# print("input_ids.shape=", input_ids.shape) # e.g., [8, 128] | |
# print("attention_mask.shape=", attention_mask.shape) # e.g., [8, 128] | |
# print("token_type_ids.shape=", token_type_ids.shape) # e.g., [8, 128] | |
batch_size = input_ids.shape[0] | |
past_key_values = self.get_prompt(batch_size=batch_size) | |
prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.bert.device) | |
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) | |
if position_ids is None: | |
position_ids = torch.tensor([i for i in range(input_ids.shape[-1])]).expand(batch_size, -1).to(self.bert.device) | |
outputs = self.bert( | |
input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
past_key_values=past_key_values, | |
) | |
pooled_output = outputs[1] | |
pooled_output = self.dropout(pooled_output) | |
logits = self.classifier(pooled_output) | |
loss = None | |
if labels is not None: | |
if self.config.problem_type is None: | |
if self.num_labels == 1: | |
self.config.problem_type = "regression" | |
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): | |
self.config.problem_type = "single_label_classification" | |
else: | |
self.config.problem_type = "multi_label_classification" | |
if self.config.problem_type == "regression": | |
loss_fct = MSELoss() | |
if self.num_labels == 1: | |
loss = loss_fct(logits.squeeze(), labels.squeeze()) | |
else: | |
loss = loss_fct(logits, labels) | |
elif self.config.problem_type == "single_label_classification": | |
loss_fct = CrossEntropyLoss() | |
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) | |
elif self.config.problem_type == "multi_label_classification": | |
loss_fct = BCEWithLogitsLoss() | |
loss = loss_fct(logits, labels) | |
if not return_dict: | |
output = (logits,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return SequenceClassifierOutput( | |
loss=loss, | |
logits=logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
# Prompt-tuning For BERT | |
class BertPtuningForSequenceClassification(BertPreTrainedModel): | |
def __init__(self, config): | |
super().__init__(config) | |
self.num_labels = config.num_labels | |
self.bert = BertModel(config) | |
self.embeddings = self.bert.embeddings | |
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) | |
self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) | |
# for param in self.bert.parameters(): | |
# param.requires_grad = False | |
if self.config.use_freezing: | |
self.bert = freezer.freeze_lm(self.bert) | |
self.pre_seq_len = config.pre_seq_len | |
self.n_layer = config.num_hidden_layers | |
self.n_head = config.num_attention_heads | |
self.n_embd = config.hidden_size // config.num_attention_heads | |
self.prefix_tokens = torch.arange(self.pre_seq_len).long() | |
self.prefix_encoder = torch.nn.Embedding(self.pre_seq_len, config.hidden_size) | |
def freeze_backbone(self, use_freezing: bool=True): | |
if use_freezing: | |
self.bert = freezer.freeze_lm(self.bert) | |
else: | |
self.bert = freezer.unfreeze_lm(self.bert) | |
def get_prompt(self, batch_size): | |
prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.bert.device) | |
prompts = self.prefix_encoder(prefix_tokens) | |
return prompts | |
def forward( | |
self, | |
input_ids=None, | |
attention_mask=None, | |
token_type_ids=None, | |
position_ids=None, | |
head_mask=None, | |
inputs_embeds=None, | |
labels=None, | |
output_attentions=None, | |
output_hidden_states=None, | |
return_dict=None, | |
): | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
batch_size = input_ids.shape[0] | |
raw_embedding = self.embeddings( | |
input_ids=input_ids, | |
position_ids=position_ids, | |
token_type_ids=token_type_ids, | |
) | |
prompts = self.get_prompt(batch_size=batch_size) | |
inputs_embeds = torch.cat((prompts, raw_embedding), dim=1) | |
prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.bert.device) | |
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) | |
outputs = self.bert( | |
# input_ids, | |
attention_mask=attention_mask, | |
# token_type_ids=token_type_ids, | |
# position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
# past_key_values=past_key_values, | |
) | |
# pooled_output = outputs[1] | |
sequence_output = outputs[0] | |
sequence_output = sequence_output[:, self.pre_seq_len:, :].contiguous() | |
first_token_tensor = sequence_output[:, 0] | |
pooled_output = self.bert.pooler.dense(first_token_tensor) | |
pooled_output = self.bert.pooler.activation(pooled_output) | |
pooled_output = self.dropout(pooled_output) | |
logits = self.classifier(pooled_output) | |
loss = None | |
if labels is not None: | |
if self.config.problem_type is None: | |
if self.num_labels == 1: | |
self.config.problem_type = "regression" | |
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): | |
self.config.problem_type = "single_label_classification" | |
else: | |
self.config.problem_type = "multi_label_classification" | |
if self.config.problem_type == "regression": | |
loss_fct = MSELoss() | |
if self.num_labels == 1: | |
loss = loss_fct(logits.squeeze(), labels.squeeze()) | |
else: | |
loss = loss_fct(logits, labels) | |
elif self.config.problem_type == "single_label_classification": | |
loss_fct = CrossEntropyLoss() | |
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) | |
elif self.config.problem_type == "multi_label_classification": | |
loss_fct = BCEWithLogitsLoss() | |
loss = loss_fct(logits, labels) | |
if not return_dict: | |
output = (logits,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return SequenceClassifierOutput( | |
loss=loss, | |
logits=logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
# Adapter-tuning For BERT | |
class BertAdapterForSequenceClassification(BertPreTrainedModel): | |
def __init__(self, config): | |
super().__init__(config) | |
self.num_labels = config.num_labels | |
self.bert = BertAdaModel(config) | |
self.embeddings = self.bert.embeddings | |
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) | |
self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) | |
# for param in self.bert.parameters(): | |
# param.requires_grad = False | |
if self.config.use_freezing: | |
self.bert = freezer.freeze_lm_component(self.bert, "adapter") | |
def freeze_backbone(self, use_freezing: bool=True): | |
if use_freezing: | |
self.bert = freezer.freeze_lm_component(self.bert, "adapter") | |
else: | |
self.bert = freezer.unfreeze_lm(self.bert) | |
def forward( | |
self, | |
input_ids=None, | |
attention_mask=None, | |
token_type_ids=None, | |
position_ids=None, | |
head_mask=None, | |
inputs_embeds=None, | |
labels=None, | |
output_attentions=None, | |
output_hidden_states=None, | |
return_dict=None, | |
): | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
batch_size = input_ids.shape[0] | |
inputs_embeds = self.embeddings( | |
input_ids=input_ids, | |
position_ids=position_ids, | |
token_type_ids=token_type_ids, | |
) | |
outputs = self.bert( | |
# input_ids, | |
attention_mask=attention_mask, | |
# token_type_ids=token_type_ids, | |
# position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
# past_key_values=past_key_values, | |
) | |
# pooled_output = outputs[1] | |
sequence_output = outputs[0] | |
# sequence_output = sequence_output[:, self.pre_seq_len:, :].contiguous() | |
first_token_tensor = sequence_output[:, 0] | |
pooled_output = self.bert.pooler.dense(first_token_tensor) | |
pooled_output = self.bert.pooler.activation(pooled_output) | |
pooled_output = self.dropout(pooled_output) | |
logits = self.classifier(pooled_output) | |
loss = None | |
if labels is not None: | |
if self.config.problem_type is None: | |
if self.num_labels == 1: | |
self.config.problem_type = "regression" | |
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): | |
self.config.problem_type = "single_label_classification" | |
else: | |
self.config.problem_type = "multi_label_classification" | |
if self.config.problem_type == "regression": | |
loss_fct = MSELoss() | |
if self.num_labels == 1: | |
loss = loss_fct(logits.squeeze(), labels.squeeze()) | |
else: | |
loss = loss_fct(logits, labels) | |
elif self.config.problem_type == "single_label_classification": | |
loss_fct = CrossEntropyLoss() | |
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) | |
elif self.config.problem_type == "multi_label_classification": | |
loss_fct = BCEWithLogitsLoss() | |
loss = loss_fct(logits, labels) | |
if not return_dict: | |
output = (logits,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return SequenceClassifierOutput( | |
loss=loss, | |
logits=logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
# ========= RoBERTa ========= | |
# Vanilla Fine-tuning For RoBERTa | |
class RobertaForSequenceClassification(RobertaPreTrainedModel): | |
def __init__(self, config): | |
super().__init__(config) | |
self.num_labels = config.num_labels | |
self.config = config | |
self.roberta = RobertaModel(config) | |
if self.config.use_freezing: | |
self.roberta = freezer.freeze_lm(self.roberta) | |
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) | |
# self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) | |
self.classifier = RobertaClassificationHead(config) | |
self.init_weights() | |
def freeze_backbone(self, use_freezing: bool=True): | |
if use_freezing: | |
self.roberta = freezer.freeze_lm(self.roberta) | |
else: | |
self.roberta = freezer.unfreeze_lm(self.roberta) | |
def forward( | |
self, | |
input_ids=None, | |
attention_mask=None, | |
token_type_ids=None, | |
position_ids=None, | |
head_mask=None, | |
inputs_embeds=None, | |
labels=None, | |
output_attentions=None, | |
output_hidden_states=None, | |
return_dict=None, | |
): | |
r""" | |
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): | |
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., | |
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), | |
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). | |
""" | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
outputs = self.roberta( | |
input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
) | |
pooled_output = outputs[1] | |
pooled_output = self.dropout(pooled_output) | |
logits = self.classifier(pooled_output) | |
loss = None | |
if labels is not None: | |
if self.config.problem_type is None: | |
if self.num_labels == 1: | |
self.config.problem_type = "regression" | |
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): | |
self.config.problem_type = "single_label_classification" | |
else: | |
self.config.problem_type = "multi_label_classification" | |
if self.config.problem_type == "regression": | |
loss_fct = MSELoss() | |
if self.num_labels == 1: | |
loss = loss_fct(logits.squeeze(), labels.squeeze()) | |
else: | |
loss = loss_fct(logits, labels) | |
elif self.config.problem_type == "single_label_classification": | |
loss_fct = CrossEntropyLoss() | |
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) | |
elif self.config.problem_type == "multi_label_classification": | |
loss_fct = BCEWithLogitsLoss() | |
loss = loss_fct(logits, labels) | |
if not return_dict: | |
output = (logits,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return SequenceClassifierOutput( | |
loss=loss, | |
logits=logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
# Prefix-tuning For RoBERTa | |
class RobertaPrefixForSequenceClassification(RobertaPreTrainedModel): | |
def __init__(self, config): | |
super().__init__(config) | |
self.num_labels = config.num_labels | |
self.config = config | |
self.roberta = RobertaModel(config) | |
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) | |
# self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) | |
self.classifier = RobertaClassificationHead(config) | |
self.init_weights() | |
for param in self.roberta.parameters(): | |
param.requires_grad = False | |
self.pre_seq_len = config.pre_seq_len | |
self.n_layer = config.num_hidden_layers | |
self.n_head = config.num_attention_heads | |
self.n_embd = config.hidden_size // config.num_attention_heads | |
self.prefix_tokens = torch.arange(self.pre_seq_len).long() | |
self.prefix_encoder = PrefixEncoder(config) | |
bert_param = 0 | |
for name, param in self.roberta.named_parameters(): | |
bert_param += param.numel() | |
all_param = 0 | |
for name, param in self.named_parameters(): | |
all_param += param.numel() | |
total_param = all_param - bert_param | |
print("total param is {}".format(total_param)) # 9860105 | |
def freeze_backbone(self, use_freezing: bool=True): | |
if use_freezing: | |
self.roberta = freezer.freeze_lm(self.roberta) | |
else: | |
self.roberta = freezer.unfreeze_lm(self.roberta) | |
def get_prompt(self, batch_size): | |
prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.roberta.device) | |
# print("prefix_tokens.shape=", prefix_tokens.shape) | |
past_key_values = self.prefix_encoder(prefix_tokens) | |
# print("past_key_values[0].shape=", past_key_values[0].shape) | |
past_key_values = past_key_values.view( | |
batch_size, | |
self.pre_seq_len, | |
self.n_layer * 2, | |
self.n_head, | |
self.n_embd | |
) | |
# print("past_key_values[0].shape=", past_key_values[0].shape) | |
past_key_values = self.dropout(past_key_values) | |
past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(2) | |
# print("past_key_values[0].shape=", past_key_values[0].shape) | |
return past_key_values | |
def forward( | |
self, | |
input_ids=None, | |
attention_mask=None, | |
token_type_ids=None, | |
position_ids=None, | |
head_mask=None, | |
inputs_embeds=None, | |
labels=None, | |
output_attentions=None, | |
output_hidden_states=None, | |
return_dict=None, | |
): | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
batch_size = input_ids.shape[0] | |
past_key_values = self.get_prompt(batch_size=batch_size) | |
prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.roberta.device) | |
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) | |
if position_ids is None: | |
position_ids = torch.tensor([i for i in range(input_ids.shape[-1])]).expand(batch_size, -1).to(self.roberta.device) | |
outputs = self.roberta( | |
input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
past_key_values=past_key_values, | |
) | |
pooled_output = outputs[1] | |
pooled_output = self.dropout(pooled_output) | |
logits = self.classifier(pooled_output) | |
loss = None | |
if labels is not None: | |
labels = (labels < 0).long().to(labels.device) + labels | |
if self.config.problem_type is None: | |
if self.num_labels == 1: | |
self.config.problem_type = "regression" | |
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): | |
self.config.problem_type = "single_label_classification" | |
else: | |
self.config.problem_type = "multi_label_classification" | |
if self.config.problem_type == "regression": | |
loss_fct = MSELoss() | |
if self.num_labels == 1: | |
loss = loss_fct(logits.squeeze(), labels.squeeze()) | |
else: | |
loss = loss_fct(logits, labels) | |
elif self.config.problem_type == "single_label_classification": | |
loss_fct = CrossEntropyLoss() | |
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) | |
elif self.config.problem_type == "multi_label_classification": | |
loss_fct = BCEWithLogitsLoss() | |
loss = loss_fct(logits, labels) | |
if not return_dict: | |
output = (logits,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return SequenceClassifierOutput( | |
loss=loss, | |
logits=logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
# Prompt-tuning For RoBERTa | |
class RobertaPtuningForSequenceClassification(RobertaPreTrainedModel): | |
def __init__(self, config): | |
super().__init__(config) | |
self.num_labels = config.num_labels | |
self.roberta = RobertaModel(config) | |
self.embeddings = self.roberta.embeddings | |
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) | |
# self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) | |
self.classifier = RobertaClassificationHead(config) | |
# for param in self.roberta.parameters(): | |
# param.requires_grad = False | |
if self.config.use_freezing: | |
self.roberta = freezer.freeze_lm(self.roberta) | |
self.pre_seq_len = config.pre_seq_len | |
self.n_layer = config.num_hidden_layers | |
self.n_head = config.num_attention_heads | |
self.n_embd = config.hidden_size // config.num_attention_heads | |
self.prefix_tokens = torch.arange(self.pre_seq_len).long() | |
self.prefix_encoder = torch.nn.Embedding(self.pre_seq_len, config.hidden_size) | |
def freeze_backbone(self, use_freezing: bool=True): | |
if use_freezing: | |
self.roberta = freezer.freeze_lm(self.roberta) | |
else: | |
self.roberta = freezer.unfreeze_lm(self.roberta) | |
def get_prompt(self, batch_size): | |
prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.roberta.device) | |
prompts = self.prefix_encoder(prefix_tokens) | |
return prompts | |
def forward( | |
self, | |
input_ids=None, | |
attention_mask=None, | |
token_type_ids=None, | |
position_ids=None, | |
head_mask=None, | |
inputs_embeds=None, | |
labels=None, | |
output_attentions=None, | |
output_hidden_states=None, | |
return_dict=None, | |
): | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
batch_size = input_ids.shape[0] | |
raw_embedding = self.embeddings( | |
input_ids=input_ids, | |
position_ids=position_ids, | |
token_type_ids=token_type_ids, | |
) | |
prompts = self.get_prompt(batch_size=batch_size) | |
inputs_embeds = torch.cat((prompts, raw_embedding), dim=1) | |
# print(input_embeddings.shape) | |
# exit() | |
prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.roberta.device) | |
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) | |
outputs = self.roberta( | |
# input_ids, | |
attention_mask=attention_mask, | |
# token_type_ids=token_type_ids, | |
# position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
# past_key_values=past_key_values, | |
) | |
# pooled_output = outputs[1] | |
sequence_output = outputs[0] | |
sequence_output = sequence_output[:, self.pre_seq_len:, :].contiguous() | |
first_token_tensor = sequence_output[:, 0] | |
pooled_output = self.roberta.pooler.dense(first_token_tensor) | |
pooled_output = self.roberta.pooler.activation(pooled_output) | |
pooled_output = self.dropout(pooled_output) | |
logits = self.classifier(pooled_output) | |
loss = None | |
if labels is not None: | |
if self.config.problem_type is None: | |
if self.num_labels == 1: | |
self.config.problem_type = "regression" | |
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): | |
self.config.problem_type = "single_label_classification" | |
else: | |
self.config.problem_type = "multi_label_classification" | |
if self.config.problem_type == "regression": | |
loss_fct = MSELoss() | |
if self.num_labels == 1: | |
loss = loss_fct(logits.squeeze(), labels.squeeze()) | |
else: | |
loss = loss_fct(logits, labels) | |
elif self.config.problem_type == "single_label_classification": | |
loss_fct = CrossEntropyLoss() | |
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) | |
elif self.config.problem_type == "multi_label_classification": | |
loss_fct = BCEWithLogitsLoss() | |
loss = loss_fct(logits, labels) | |
if not return_dict: | |
output = (logits,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return SequenceClassifierOutput( | |
loss=loss, | |
logits=logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
# Adapter-tuning For RoBERTa | |
class RobertaAdapterForSequenceClassification(RobertaPreTrainedModel): | |
def __init__(self, config): | |
super().__init__(config) | |
self.num_labels = config.num_labels | |
self.roberta = RobertaAdaModel(config) | |
self.embeddings = self.roberta.embeddings | |
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) | |
# self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) | |
self.classifier = RobertaClassificationHead(config) | |
self.init_weights() | |
# for param in self.roberta.parameters(): | |
# param.requires_grad = False | |
self.roberta = init_adapter(self.roberta) | |
if self.config.use_freezing: | |
self.roberta = freezer.freeze_lm_component(self.roberta, "adapter") | |
def freeze_backbone(self, use_freezing: bool=True): | |
if use_freezing: | |
self.roberta = freezer.freeze_lm_component(self.roberta, "adapter") | |
else: | |
self.roberta = freezer.unfreeze_lm(self.roberta) | |
def forward( | |
self, | |
input_ids=None, | |
attention_mask=None, | |
token_type_ids=None, | |
position_ids=None, | |
head_mask=None, | |
inputs_embeds=None, | |
labels=None, | |
output_attentions=None, | |
output_hidden_states=None, | |
return_dict=None, | |
): | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
batch_size = input_ids.shape[0] | |
inputs_embeds = self.embeddings( | |
input_ids=input_ids, | |
position_ids=position_ids, | |
token_type_ids=token_type_ids, | |
) | |
outputs = self.roberta( | |
# input_ids, | |
attention_mask=attention_mask, | |
# token_type_ids=token_type_ids, | |
# position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
# past_key_values=past_key_values, | |
) | |
# pooled_output = outputs[1] | |
sequence_output = outputs[0] | |
# sequence_output = sequence_output[:, self.pre_seq_len:, :].contiguous() | |
first_token_tensor = sequence_output[:, 0] | |
pooled_output = self.roberta.pooler.dense(first_token_tensor) | |
pooled_output = self.roberta.pooler.activation(pooled_output) | |
pooled_output = self.dropout(pooled_output) | |
logits = self.classifier(pooled_output) | |
loss = None | |
if labels is not None: | |
if self.config.problem_type is None: | |
if self.num_labels == 1: | |
self.config.problem_type = "regression" | |
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): | |
self.config.problem_type = "single_label_classification" | |
else: | |
self.config.problem_type = "multi_label_classification" | |
if self.config.problem_type == "regression": | |
loss_fct = MSELoss() | |
if self.num_labels == 1: | |
loss = loss_fct(logits.squeeze(), labels.squeeze()) | |
else: | |
loss = loss_fct(logits, labels) | |
elif self.config.problem_type == "single_label_classification": | |
loss_fct = CrossEntropyLoss() | |
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) | |
elif self.config.problem_type == "multi_label_classification": | |
loss_fct = BCEWithLogitsLoss() | |
loss = loss_fct(logits, labels) | |
if not return_dict: | |
output = (logits,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return SequenceClassifierOutput( | |
loss=loss, | |
logits=logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
# ========= DeBERTa ========= | |
# Prefix-tuning For DeBERTa | |
class DebertaPrefixForSequenceClassification(DebertaPreTrainedModel): | |
def __init__(self, config): | |
super().__init__(config) | |
self.num_labels = config.num_labels | |
self.config = config | |
self.deberta = DebertaModel(config) | |
self.pooler = ContextPooler(config) | |
output_dim = self.pooler.output_dim | |
self.classifier = torch.nn.Linear(output_dim, self.num_labels) | |
self.dropout = StableDropout(config.hidden_dropout_prob) | |
self.init_weights() | |
# for param in self.deberta.parameters(): | |
# param.requires_grad = False | |
if self.config.use_freezing: | |
self.deberta = freezer.freeze_lm(self.deberta) | |
self.pre_seq_len = config.pre_seq_len | |
self.n_layer = config.num_hidden_layers | |
self.n_head = config.num_attention_heads | |
self.n_embd = config.hidden_size // config.num_attention_heads | |
self.prefix_tokens = torch.arange(self.pre_seq_len).long() | |
self.prefix_encoder = PrefixEncoder(config) | |
deberta_param = 0 | |
for name, param in self.deberta.named_parameters(): | |
deberta_param += param.numel() | |
all_param = 0 | |
for name, param in self.named_parameters(): | |
all_param += param.numel() | |
total_param = all_param - deberta_param | |
print("total param is {}".format(total_param)) # 9860105 | |
def freeze_backbone(self, use_freezing: bool=True): | |
if use_freezing: | |
self.deberta = freezer.freeze_lm(self.deberta) | |
else: | |
self.deberta = freezer.unfreeze_lm(self.deberta) | |
def get_prompt(self, batch_size): | |
prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.deberta.device) | |
past_key_values = self.prefix_encoder(prefix_tokens) | |
# bsz, seqlen, _ = past_key_values.shape | |
past_key_values = past_key_values.view( | |
batch_size, | |
self.pre_seq_len, | |
self.n_layer * 2, | |
self.n_head, | |
self.n_embd | |
) | |
past_key_values = self.dropout(past_key_values) | |
past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(2) | |
return past_key_values | |
def forward( | |
self, | |
input_ids=None, | |
attention_mask=None, | |
token_type_ids=None, | |
position_ids=None, | |
head_mask=None, | |
inputs_embeds=None, | |
labels=None, | |
output_attentions=None, | |
output_hidden_states=None, | |
return_dict=None, | |
): | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
batch_size = input_ids.shape[0] | |
past_key_values = self.get_prompt(batch_size=batch_size) | |
prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.deberta.device) | |
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) | |
outputs = self.deberta( | |
input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
past_key_values=past_key_values, | |
) | |
encoder_layer = outputs[0] | |
pooled_output = self.pooler(encoder_layer) | |
pooled_output = self.dropout(pooled_output) | |
logits = self.classifier(pooled_output) | |
loss = None | |
if labels is not None: | |
if self.num_labels == 1: | |
# regression task | |
loss_fn = torch.nn.MSELoss() | |
logits = logits.view(-1).to(labels.dtype) | |
loss = loss_fn(logits, labels.view(-1)) | |
elif labels.dim() == 1 or labels.size(-1) == 1: | |
label_index = (labels >= 0).nonzero() | |
labels = labels.long() | |
if label_index.size(0) > 0: | |
labeled_logits = torch.gather(logits, 0, label_index.expand(label_index.size(0), logits.size(1))) | |
labels = torch.gather(labels, 0, label_index.view(-1)) | |
loss_fct = CrossEntropyLoss() | |
loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1)) | |
else: | |
loss = torch.tensor(0).to(logits) | |
else: | |
log_softmax = torch.nn.LogSoftmax(-1) | |
loss = -((log_softmax(logits) * labels).sum(-1)).mean() | |
if not return_dict: | |
output = (logits,) + outputs[1:] | |
return ((loss,) + output) if loss is not None else output | |
else: | |
return SequenceClassifierOutput( | |
loss=loss, | |
logits=logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
# GPT2 for classification | |
class GPT2ForSequenceClassification(GPT2PreTrainedModel): | |
def __init__(self, config): | |
super().__init__(config) | |
self.num_labels = config.num_labels | |
self.transformer = GPT2Model(config) | |
self.score = torch.nn.Linear(config.n_embd, self.num_labels, bias=False) | |
# Model parallel | |
self.model_parallel = False | |
self.device_map = None | |
# Initialize weights and apply final processing | |
self.post_init() | |
def forward( | |
self, | |
input_ids: Optional[torch.LongTensor] = None, | |
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, | |
attention_mask: Optional[torch.FloatTensor] = None, | |
token_type_ids: Optional[torch.LongTensor] = None, | |
position_ids: Optional[torch.LongTensor] = None, | |
head_mask: Optional[torch.FloatTensor] = None, | |
inputs_embeds: Optional[torch.FloatTensor] = None, | |
labels: Optional[torch.LongTensor] = None, | |
use_cache: Optional[bool] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
) -> Union[Tuple, SequenceClassifierOutputWithPast]: | |
r""" | |
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | |
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., | |
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If | |
`config.num_labels > 1` a classification loss is computed (Cross-Entropy). | |
""" | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
transformer_outputs = self.transformer( | |
input_ids, | |
past_key_values=past_key_values, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
use_cache=use_cache, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
) | |
hidden_states = transformer_outputs[0] | |
logits = self.score(hidden_states) | |
if input_ids is not None: | |
batch_size, sequence_length = input_ids.shape[:2] | |
else: | |
batch_size, sequence_length = inputs_embeds.shape[:2] | |
assert ( | |
self.config.pad_token_id is not None or batch_size == 1 | |
), "Cannot handle batch sizes > 1 if no padding token is defined." | |
if self.config.pad_token_id is None: | |
sequence_lengths = -1 | |
else: | |
if input_ids is not None: | |
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1 | |
else: | |
sequence_lengths = -1 | |
logger.warning( | |
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " | |
"unexpected if using padding tokens in conjunction with `inputs_embeds.`" | |
) | |
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] | |
loss = None | |
if labels is not None: | |
if self.config.problem_type is None: | |
if self.num_labels == 1: | |
self.config.problem_type = "regression" | |
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): | |
self.config.problem_type = "single_label_classification" | |
else: | |
self.config.problem_type = "multi_label_classification" | |
if self.config.problem_type == "regression": | |
loss_fct = MSELoss() | |
if self.num_labels == 1: | |
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) | |
else: | |
loss = loss_fct(pooled_logits, labels) | |
elif self.config.problem_type == "single_label_classification": | |
loss_fct = CrossEntropyLoss() | |
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) | |
elif self.config.problem_type == "multi_label_classification": | |
loss_fct = BCEWithLogitsLoss() | |
loss = loss_fct(pooled_logits, labels) | |
if not return_dict: | |
output = (pooled_logits,) + transformer_outputs[1:] | |
return ((loss,) + output) if loss is not None else output | |
return SequenceClassifierOutputWithPast( | |
loss=loss, | |
logits=pooled_logits, | |
past_key_values=transformer_outputs.past_key_values, | |
hidden_states=transformer_outputs.hidden_states, | |
attentions=transformer_outputs.attentions, | |
) | |
# Bart for classification | |
class BartForSequenceClassification(BartPretrainedModel): | |
def __init__(self, config: BartConfig, **kwargs): | |
super().__init__(config, **kwargs) | |
self.model = BartModel(config) | |
self.classification_head = BartClassificationHead( | |
config.d_model, | |
config.d_model, | |
config.num_labels, | |
config.classifier_dropout, | |
) | |
self.model._init_weights(self.classification_head.dense) | |
self.model._init_weights(self.classification_head.out_proj) | |
def forward( | |
self, | |
input_ids: torch.LongTensor = None, | |
attention_mask: Optional[torch.Tensor] = None, | |
decoder_input_ids: Optional[torch.LongTensor] = None, | |
decoder_attention_mask: Optional[torch.LongTensor] = None, | |
head_mask: Optional[torch.Tensor] = None, | |
decoder_head_mask: Optional[torch.Tensor] = None, | |
cross_attn_head_mask: Optional[torch.Tensor] = None, | |
encoder_outputs: Optional[List[torch.FloatTensor]] = None, | |
inputs_embeds: Optional[torch.FloatTensor] = None, | |
decoder_inputs_embeds: Optional[torch.FloatTensor] = None, | |
labels: Optional[torch.LongTensor] = None, | |
use_cache: Optional[bool] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: | |
r""" | |
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): | |
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., | |
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). | |
""" | |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
if labels is not None: | |
use_cache = False | |
if input_ids is None and inputs_embeds is not None: | |
raise NotImplementedError( | |
f"Passing input embeddings is currently not supported for {self.__class__.__name__}" | |
) | |
outputs = self.model( | |
input_ids, | |
attention_mask=attention_mask, | |
decoder_input_ids=decoder_input_ids, | |
decoder_attention_mask=decoder_attention_mask, | |
head_mask=head_mask, | |
decoder_head_mask=decoder_head_mask, | |
cross_attn_head_mask=cross_attn_head_mask, | |
encoder_outputs=encoder_outputs, | |
inputs_embeds=inputs_embeds, | |
decoder_inputs_embeds=decoder_inputs_embeds, | |
use_cache=use_cache, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
) | |
hidden_states = outputs[0] # last hidden state | |
# print("hidden_states.shape=", hidden_states.shape) # [bz, seq_len, dim] | |
eos_mask = input_ids.eq(self.config.eos_token_id) | |
if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: | |
raise ValueError("All examples must have the same number of <eos> tokens.") | |
sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ | |
:, -1, : | |
] | |
logits = self.classification_head(sentence_representation) | |
loss = None | |
if labels is not None: | |
if self.config.problem_type is None: | |
if self.config.num_labels == 1: | |
self.config.problem_type = "regression" | |
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): | |
self.config.problem_type = "single_label_classification" | |
else: | |
self.config.problem_type = "multi_label_classification" | |
if self.config.problem_type == "regression": | |
loss_fct = MSELoss() | |
if self.config.num_labels == 1: | |
loss = loss_fct(logits.squeeze(), labels.squeeze()) | |
else: | |
loss = loss_fct(logits, labels) | |
elif self.config.problem_type == "single_label_classification": | |
loss_fct = CrossEntropyLoss() | |
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) | |
elif self.config.problem_type == "multi_label_classification": | |
loss_fct = BCEWithLogitsLoss() | |
loss = loss_fct(logits, labels) | |
if not return_dict: | |
output = (logits,) + outputs[1:] | |
return ((loss,) + output) if loss is not None else output | |
return Seq2SeqSequenceClassifierOutput( | |
loss=loss, | |
logits=logits, | |
past_key_values=outputs.past_key_values, | |
decoder_hidden_states=outputs.decoder_hidden_states, | |
decoder_attentions=outputs.decoder_attentions, | |
cross_attentions=outputs.cross_attentions, | |
encoder_last_hidden_state=outputs.encoder_last_hidden_state, | |
encoder_hidden_states=outputs.encoder_hidden_states, | |
encoder_attentions=outputs.encoder_attentions, | |
) | |