Spaces:
Sleeping
Sleeping
# coding=utf-8 | |
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. | |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
""" TF 2.0 BERT model. """ | |
import math | |
import warnings | |
from dataclasses import dataclass | |
from typing import Dict, Optional, Tuple, Union | |
import numpy as np | |
import tensorflow as tf | |
from ...activations_tf import get_tf_activation | |
from ...file_utils import ( | |
MULTIPLE_CHOICE_DUMMY_INPUTS, | |
ModelOutput, | |
add_code_sample_docstrings, | |
add_start_docstrings, | |
add_start_docstrings_to_model_forward, | |
replace_return_docstrings, | |
) | |
from ...modeling_tf_outputs import ( | |
TFBaseModelOutput, | |
TFBaseModelOutputWithPooling, | |
TFCausalLMOutput, | |
TFMaskedLMOutput, | |
TFMultipleChoiceModelOutput, | |
TFNextSentencePredictorOutput, | |
TFQuestionAnsweringModelOutput, | |
TFSequenceClassifierOutput, | |
TFTokenClassifierOutput, | |
) | |
from ...modeling_tf_utils import ( | |
TFCausalLanguageModelingLoss, | |
TFMaskedLanguageModelingLoss, | |
TFModelInputType, | |
TFMultipleChoiceLoss, | |
TFNextSentencePredictionLoss, | |
TFPreTrainedModel, | |
TFQuestionAnsweringLoss, | |
TFSequenceClassificationLoss, | |
TFTokenClassificationLoss, | |
get_initializer, | |
input_processing, | |
keras_serializable, | |
shape_list, | |
) | |
from ...utils import logging | |
from .configuration_bert import BertConfig | |
logger = logging.get_logger(__name__) | |
_CHECKPOINT_FOR_DOC = "bert-base-cased" | |
_CONFIG_FOR_DOC = "BertConfig" | |
_TOKENIZER_FOR_DOC = "BertTokenizer" | |
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ | |
"bert-base-uncased", | |
"bert-large-uncased", | |
"bert-base-cased", | |
"bert-large-cased", | |
"bert-base-multilingual-uncased", | |
"bert-base-multilingual-cased", | |
"bert-base-chinese", | |
"bert-base-german-cased", | |
"bert-large-uncased-whole-word-masking", | |
"bert-large-cased-whole-word-masking", | |
"bert-large-uncased-whole-word-masking-finetuned-squad", | |
"bert-large-cased-whole-word-masking-finetuned-squad", | |
"bert-base-cased-finetuned-mrpc", | |
"cl-tohoku/bert-base-japanese", | |
"cl-tohoku/bert-base-japanese-whole-word-masking", | |
"cl-tohoku/bert-base-japanese-char", | |
"cl-tohoku/bert-base-japanese-char-whole-word-masking", | |
"TurkuNLP/bert-base-finnish-cased-v1", | |
"TurkuNLP/bert-base-finnish-uncased-v1", | |
"wietsedv/bert-base-dutch-cased", | |
# See all BERT models at https://huggingface.co/models?filter=bert | |
] | |
class TFBertPreTrainingLoss: | |
""" | |
Loss function suitable for BERT-like pretraining, that is, the task of pretraining a language model by combining | |
NSP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss | |
computation. | |
""" | |
def compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: | |
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( | |
from_logits=True, reduction=tf.keras.losses.Reduction.NONE | |
) | |
# make sure only labels that are not equal to -100 | |
# are taken into account as loss | |
masked_lm_active_loss = tf.not_equal(tf.reshape(tensor=labels["labels"], shape=(-1,)), -100) | |
masked_lm_reduced_logits = tf.boolean_mask( | |
tensor=tf.reshape(tensor=logits[0], shape=(-1, shape_list(logits[0])[2])), | |
mask=masked_lm_active_loss, | |
) | |
masked_lm_labels = tf.boolean_mask( | |
tensor=tf.reshape(tensor=labels["labels"], shape=(-1,)), mask=masked_lm_active_loss | |
) | |
next_sentence_active_loss = tf.not_equal(tf.reshape(tensor=labels["next_sentence_label"], shape=(-1,)), -100) | |
next_sentence_reduced_logits = tf.boolean_mask( | |
tensor=tf.reshape(tensor=logits[1], shape=(-1, 2)), mask=next_sentence_active_loss | |
) | |
next_sentence_label = tf.boolean_mask( | |
tensor=tf.reshape(tensor=labels["next_sentence_label"], shape=(-1,)), mask=next_sentence_active_loss | |
) | |
masked_lm_loss = loss_fn(y_true=masked_lm_labels, y_pred=masked_lm_reduced_logits) | |
next_sentence_loss = loss_fn(y_true=next_sentence_label, y_pred=next_sentence_reduced_logits) | |
masked_lm_loss = tf.reshape(tensor=masked_lm_loss, shape=(-1, shape_list(next_sentence_loss)[0])) | |
masked_lm_loss = tf.reduce_mean(input_tensor=masked_lm_loss, axis=0) | |
return masked_lm_loss + next_sentence_loss | |
class TFBertEmbeddings(tf.keras.layers.Layer): | |
"""Construct the embeddings from word, position and token_type embeddings.""" | |
def __init__(self, config: BertConfig, **kwargs): | |
super().__init__(**kwargs) | |
self.vocab_size = config.vocab_size | |
self.type_vocab_size = config.type_vocab_size | |
self.hidden_size = config.hidden_size | |
self.max_position_embeddings = config.max_position_embeddings | |
self.initializer_range = config.initializer_range | |
self.embeddings_sum = tf.keras.layers.Add() | |
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") | |
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) | |
def build(self, input_shape: tf.TensorShape): | |
with tf.name_scope("word_embeddings"): | |
self.weight = self.add_weight( | |
name="weight", | |
shape=[self.vocab_size, self.hidden_size], | |
initializer=get_initializer(self.initializer_range), | |
) | |
with tf.name_scope("token_type_embeddings"): | |
self.token_type_embeddings = self.add_weight( | |
name="embeddings", | |
shape=[self.type_vocab_size, self.hidden_size], | |
initializer=get_initializer(self.initializer_range), | |
) | |
with tf.name_scope("position_embeddings"): | |
self.position_embeddings = self.add_weight( | |
name="embeddings", | |
shape=[self.max_position_embeddings, self.hidden_size], | |
initializer=get_initializer(self.initializer_range), | |
) | |
super().build(input_shape) | |
def call( | |
self, | |
input_ids: tf.Tensor = None, | |
position_ids: tf.Tensor = None, | |
token_type_ids: tf.Tensor = None, | |
inputs_embeds: tf.Tensor = None, | |
training: bool = False, | |
) -> tf.Tensor: | |
""" | |
Applies embedding based on inputs tensor. | |
Returns: | |
final_embeddings (:obj:`tf.Tensor`): output embedding tensor. | |
""" | |
assert not (input_ids is None and inputs_embeds is None) | |
if input_ids is not None: | |
inputs_embeds = tf.gather(params=self.weight, indices=input_ids) | |
input_shape = shape_list(inputs_embeds)[:-1] | |
if token_type_ids is None: | |
token_type_ids = tf.fill(dims=input_shape, value=0) | |
if position_ids is None: | |
position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) | |
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) | |
position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1)) | |
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) | |
final_embeddings = self.embeddings_sum(inputs=[inputs_embeds, position_embeds, token_type_embeds]) | |
final_embeddings = self.LayerNorm(inputs=final_embeddings) | |
final_embeddings = self.dropout(inputs=final_embeddings, training=training) | |
return final_embeddings | |
class TFBertSelfAttention(tf.keras.layers.Layer): | |
def __init__(self, config: BertConfig, **kwargs): | |
super().__init__(**kwargs) | |
if config.hidden_size % config.num_attention_heads != 0: | |
raise ValueError( | |
f"The hidden size ({config.hidden_size}) is not a multiple of the number " | |
f"of attention heads ({config.num_attention_heads})" | |
) | |
self.num_attention_heads = config.num_attention_heads | |
self.attention_head_size = int(config.hidden_size / config.num_attention_heads) | |
self.all_head_size = self.num_attention_heads * self.attention_head_size | |
self.sqrt_att_head_size = math.sqrt(self.attention_head_size) | |
self.query = tf.keras.layers.Dense( | |
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" | |
) | |
self.key = tf.keras.layers.Dense( | |
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" | |
) | |
self.value = tf.keras.layers.Dense( | |
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" | |
) | |
self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob) | |
def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: | |
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] | |
tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) | |
# Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] | |
return tf.transpose(tensor, perm=[0, 2, 1, 3]) | |
def call( | |
self, | |
hidden_states: tf.Tensor, | |
attention_mask: tf.Tensor, | |
head_mask: tf.Tensor, | |
output_attentions: bool, | |
training: bool = False, | |
) -> Tuple[tf.Tensor]: | |
batch_size = shape_list(hidden_states)[0] | |
mixed_query_layer = self.query(inputs=hidden_states) | |
mixed_key_layer = self.key(inputs=hidden_states) | |
mixed_value_layer = self.value(inputs=hidden_states) | |
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) | |
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) | |
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) | |
# Take the dot product between "query" and "key" to get the raw attention scores. | |
# (batch size, num_heads, seq_len_q, seq_len_k) | |
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) | |
dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) | |
attention_scores = tf.divide(attention_scores, dk) | |
if attention_mask is not None: | |
# Apply the attention mask is (precomputed for all layers in TFBertModel call() function) | |
attention_scores = tf.add(attention_scores, attention_mask) | |
# Normalize the attention scores to probabilities. | |
attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1) | |
# This is actually dropping out entire tokens to attend to, which might | |
# seem a bit unusual, but is taken from the original Transformer paper. | |
attention_probs = self.dropout(inputs=attention_probs, training=training) | |
# Mask heads if we want to | |
if head_mask is not None: | |
attention_probs = tf.multiply(attention_probs, head_mask) | |
attention_output = tf.matmul(attention_probs, value_layer) | |
attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) | |
# (batch_size, seq_len_q, all_head_size) | |
attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) | |
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) | |
return outputs | |
class TFBertSelfOutput(tf.keras.layers.Layer): | |
def __init__(self, config: BertConfig, **kwargs): | |
super().__init__(**kwargs) | |
self.dense = tf.keras.layers.Dense( | |
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" | |
) | |
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") | |
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) | |
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: | |
hidden_states = self.dense(inputs=hidden_states) | |
hidden_states = self.dropout(inputs=hidden_states, training=training) | |
hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) | |
return hidden_states | |
class TFBertAttention(tf.keras.layers.Layer): | |
def __init__(self, config: BertConfig, **kwargs): | |
super().__init__(**kwargs) | |
self.self_attention = TFBertSelfAttention(config, name="self") | |
self.dense_output = TFBertSelfOutput(config, name="output") | |
def prune_heads(self, heads): | |
raise NotImplementedError | |
def call( | |
self, | |
input_tensor: tf.Tensor, | |
attention_mask: tf.Tensor, | |
head_mask: tf.Tensor, | |
output_attentions: bool, | |
training: bool = False, | |
) -> Tuple[tf.Tensor]: | |
self_outputs = self.self_attention( | |
hidden_states=input_tensor, | |
attention_mask=attention_mask, | |
head_mask=head_mask, | |
output_attentions=output_attentions, | |
training=training, | |
) | |
attention_output = self.dense_output( | |
hidden_states=self_outputs[0], input_tensor=input_tensor, training=training | |
) | |
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them | |
return outputs | |
class TFBertIntermediate(tf.keras.layers.Layer): | |
def __init__(self, config: BertConfig, **kwargs): | |
super().__init__(**kwargs) | |
self.dense = tf.keras.layers.Dense( | |
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" | |
) | |
if isinstance(config.hidden_act, str): | |
self.intermediate_act_fn = get_tf_activation(config.hidden_act) | |
else: | |
self.intermediate_act_fn = config.hidden_act | |
def call(self, hidden_states: tf.Tensor) -> tf.Tensor: | |
hidden_states = self.dense(inputs=hidden_states) | |
hidden_states = self.intermediate_act_fn(hidden_states) | |
return hidden_states | |
class TFBertOutput(tf.keras.layers.Layer): | |
def __init__(self, config: BertConfig, **kwargs): | |
super().__init__(**kwargs) | |
self.dense = tf.keras.layers.Dense( | |
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" | |
) | |
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") | |
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) | |
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: | |
hidden_states = self.dense(inputs=hidden_states) | |
hidden_states = self.dropout(inputs=hidden_states, training=training) | |
hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) | |
return hidden_states | |
class TFBertLayer(tf.keras.layers.Layer): | |
def __init__(self, config: BertConfig, **kwargs): | |
super().__init__(**kwargs) | |
self.attention = TFBertAttention(config, name="attention") | |
self.intermediate = TFBertIntermediate(config, name="intermediate") | |
self.bert_output = TFBertOutput(config, name="output") | |
def call( | |
self, | |
hidden_states: tf.Tensor, | |
attention_mask: tf.Tensor, | |
head_mask: tf.Tensor, | |
output_attentions: bool, | |
training: bool = False, | |
) -> Tuple[tf.Tensor]: | |
attention_outputs = self.attention( | |
input_tensor=hidden_states, | |
attention_mask=attention_mask, | |
head_mask=head_mask, | |
output_attentions=output_attentions, | |
training=training, | |
) | |
attention_output = attention_outputs[0] | |
intermediate_output = self.intermediate(hidden_states=attention_output) | |
layer_output = self.bert_output( | |
hidden_states=intermediate_output, input_tensor=attention_output, training=training | |
) | |
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them | |
return outputs | |
class TFBertEncoder(tf.keras.layers.Layer): | |
def __init__(self, config: BertConfig, **kwargs): | |
super().__init__(**kwargs) | |
self.layer = [TFBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] | |
def call( | |
self, | |
hidden_states: tf.Tensor, | |
attention_mask: tf.Tensor, | |
head_mask: tf.Tensor, | |
output_attentions: bool, | |
output_hidden_states: bool, | |
return_dict: bool, | |
training: bool = False, | |
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: | |
all_hidden_states = () if output_hidden_states else None | |
all_attentions = () if output_attentions else None | |
for i, layer_module in enumerate(self.layer): | |
if output_hidden_states: | |
all_hidden_states = all_hidden_states + (hidden_states,) | |
layer_outputs = layer_module( | |
hidden_states=hidden_states, | |
attention_mask=attention_mask, | |
head_mask=head_mask[i], | |
output_attentions=output_attentions, | |
training=training, | |
) | |
hidden_states = layer_outputs[0] | |
if output_attentions: | |
all_attentions = all_attentions + (layer_outputs[1],) | |
# Add last layer | |
if output_hidden_states: | |
all_hidden_states = all_hidden_states + (hidden_states,) | |
if not return_dict: | |
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) | |
return TFBaseModelOutput( | |
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions | |
) | |
class TFBertPooler(tf.keras.layers.Layer): | |
def __init__(self, config: BertConfig, **kwargs): | |
super().__init__(**kwargs) | |
self.dense = tf.keras.layers.Dense( | |
units=config.hidden_size, | |
kernel_initializer=get_initializer(config.initializer_range), | |
activation="tanh", | |
name="dense", | |
) | |
def call(self, hidden_states: tf.Tensor) -> tf.Tensor: | |
# We "pool" the model by simply taking the hidden state corresponding | |
# to the first token. | |
first_token_tensor = hidden_states[:, 0] | |
pooled_output = self.dense(inputs=first_token_tensor) | |
return pooled_output | |
class TFBertPredictionHeadTransform(tf.keras.layers.Layer): | |
def __init__(self, config: BertConfig, **kwargs): | |
super().__init__(**kwargs) | |
self.dense = tf.keras.layers.Dense( | |
units=config.hidden_size, | |
kernel_initializer=get_initializer(config.initializer_range), | |
name="dense", | |
) | |
if isinstance(config.hidden_act, str): | |
self.transform_act_fn = get_tf_activation(config.hidden_act) | |
else: | |
self.transform_act_fn = config.hidden_act | |
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") | |
def call(self, hidden_states: tf.Tensor) -> tf.Tensor: | |
hidden_states = self.dense(inputs=hidden_states) | |
hidden_states = self.transform_act_fn(hidden_states) | |
hidden_states = self.LayerNorm(inputs=hidden_states) | |
return hidden_states | |
class TFBertLMPredictionHead(tf.keras.layers.Layer): | |
def __init__(self, config: BertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): | |
super().__init__(**kwargs) | |
self.vocab_size = config.vocab_size | |
self.hidden_size = config.hidden_size | |
self.transform = TFBertPredictionHeadTransform(config, name="transform") | |
# The output weights are the same as the input embeddings, but there is | |
# an output-only bias for each token. | |
self.input_embeddings = input_embeddings | |
def build(self, input_shape: tf.TensorShape): | |
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") | |
super().build(input_shape) | |
def get_output_embeddings(self) -> tf.keras.layers.Layer: | |
return self.input_embeddings | |
def set_output_embeddings(self, value: tf.Variable): | |
self.input_embeddings.weight = value | |
self.input_embeddings.vocab_size = shape_list(value)[0] | |
def get_bias(self) -> Dict[str, tf.Variable]: | |
return {"bias": self.bias} | |
def set_bias(self, value: tf.Variable): | |
self.bias = value["bias"] | |
self.vocab_size = shape_list(value["bias"])[0] | |
def call(self, hidden_states: tf.Tensor) -> tf.Tensor: | |
hidden_states = self.transform(hidden_states=hidden_states) | |
seq_length = shape_list(hidden_states)[1] | |
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) | |
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) | |
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size]) | |
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) | |
return hidden_states | |
class TFBertMLMHead(tf.keras.layers.Layer): | |
def __init__(self, config: BertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): | |
super().__init__(**kwargs) | |
self.predictions = TFBertLMPredictionHead(config, input_embeddings, name="predictions") | |
def call(self, sequence_output: tf.Tensor) -> tf.Tensor: | |
prediction_scores = self.predictions(hidden_states=sequence_output) | |
return prediction_scores | |
class TFBertNSPHead(tf.keras.layers.Layer): | |
def __init__(self, config: BertConfig, **kwargs): | |
super().__init__(**kwargs) | |
self.seq_relationship = tf.keras.layers.Dense( | |
units=2, | |
kernel_initializer=get_initializer(config.initializer_range), | |
name="seq_relationship", | |
) | |
def call(self, pooled_output: tf.Tensor) -> tf.Tensor: | |
seq_relationship_score = self.seq_relationship(inputs=pooled_output) | |
return seq_relationship_score | |
class TFBertMainLayer(tf.keras.layers.Layer): | |
config_class = BertConfig | |
def __init__(self, config: BertConfig, add_pooling_layer: bool = True, **kwargs): | |
super().__init__(**kwargs) | |
self.config = config | |
self.embeddings = TFBertEmbeddings(config, name="embeddings") | |
self.encoder = TFBertEncoder(config, name="encoder") | |
self.pooler = TFBertPooler(config, name="pooler") if add_pooling_layer else None | |
def get_input_embeddings(self) -> tf.keras.layers.Layer: | |
return self.embeddings | |
def set_input_embeddings(self, value: tf.Variable): | |
self.embeddings.weight = value | |
self.embeddings.vocab_size = shape_list(value)[0] | |
def _prune_heads(self, heads_to_prune): | |
""" | |
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base | |
class PreTrainedModel | |
""" | |
raise NotImplementedError | |
def call( | |
self, | |
input_ids: Optional[TFModelInputType] = None, | |
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
training: bool = False, | |
**kwargs, | |
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: | |
inputs = input_processing( | |
func=self.call, | |
config=self.config, | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
training=training, | |
kwargs_call=kwargs, | |
) | |
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None: | |
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") | |
elif inputs["input_ids"] is not None: | |
input_shape = shape_list(inputs["input_ids"]) | |
elif inputs["inputs_embeds"] is not None: | |
input_shape = shape_list(inputs["inputs_embeds"])[:-1] | |
else: | |
raise ValueError("You have to specify either input_ids or inputs_embeds") | |
if inputs["attention_mask"] is None: | |
inputs["attention_mask"] = tf.fill(dims=input_shape, value=1) | |
if inputs["token_type_ids"] is None: | |
inputs["token_type_ids"] = tf.fill(dims=input_shape, value=0) | |
embedding_output = self.embeddings( | |
input_ids=inputs["input_ids"], | |
position_ids=inputs["position_ids"], | |
token_type_ids=inputs["token_type_ids"], | |
inputs_embeds=inputs["inputs_embeds"], | |
training=inputs["training"], | |
) | |
# We create a 3D attention mask from a 2D tensor mask. | |
# Sizes are [batch_size, 1, 1, to_seq_length] | |
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] | |
# this attention mask is more simple than the triangular masking of causal attention | |
# used in OpenAI GPT, we just need to prepare the broadcast dimension here. | |
extended_attention_mask = tf.reshape(inputs["attention_mask"], (input_shape[0], 1, 1, input_shape[1])) | |
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for | |
# masked positions, this operation will create a tensor which is 0.0 for | |
# positions we want to attend and -10000.0 for masked positions. | |
# Since we are adding it to the raw scores before the softmax, this is | |
# effectively the same as removing these entirely. | |
extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) | |
one_cst = tf.constant(1.0, dtype=embedding_output.dtype) | |
ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) | |
extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) | |
# Prepare head mask if needed | |
# 1.0 in head_mask indicate we keep the head | |
# attention_probs has shape bsz x n_heads x N x N | |
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] | |
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] | |
if inputs["head_mask"] is not None: | |
raise NotImplementedError | |
else: | |
inputs["head_mask"] = [None] * self.config.num_hidden_layers | |
encoder_outputs = self.encoder( | |
hidden_states=embedding_output, | |
attention_mask=extended_attention_mask, | |
head_mask=inputs["head_mask"], | |
output_attentions=inputs["output_attentions"], | |
output_hidden_states=inputs["output_hidden_states"], | |
return_dict=inputs["return_dict"], | |
training=inputs["training"], | |
) | |
sequence_output = encoder_outputs[0] | |
pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None | |
if not inputs["return_dict"]: | |
return ( | |
sequence_output, | |
pooled_output, | |
) + encoder_outputs[1:] | |
return TFBaseModelOutputWithPooling( | |
last_hidden_state=sequence_output, | |
pooler_output=pooled_output, | |
hidden_states=encoder_outputs.hidden_states, | |
attentions=encoder_outputs.attentions, | |
) | |
class TFBertPreTrainedModel(TFPreTrainedModel): | |
""" | |
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained | |
models. | |
""" | |
config_class = BertConfig | |
base_model_prefix = "bert" | |
class TFBertForPreTrainingOutput(ModelOutput): | |
""" | |
Output type of :class:`~transformers.TFBertForPreTraining`. | |
Args: | |
prediction_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): | |
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). | |
seq_relationship_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, 2)`): | |
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation | |
before SoftMax). | |
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of | |
shape :obj:`(batch_size, sequence_length, hidden_size)`. | |
Hidden-states of the model at the output of each layer plus the initial embedding outputs. | |
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): | |
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, | |
sequence_length)`. | |
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention | |
heads. | |
""" | |
loss: Optional[tf.Tensor] = None | |
prediction_logits: tf.Tensor = None | |
seq_relationship_logits: tf.Tensor = None | |
hidden_states: Optional[Union[Tuple[tf.Tensor], tf.Tensor]] = None | |
attentions: Optional[Union[Tuple[tf.Tensor], tf.Tensor]] = None | |
BERT_START_DOCSTRING = r""" | |
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the | |
generic methods the library implements for all its model (such as downloading or saving, resizing the input | |
embeddings, pruning heads etc.) | |
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use | |
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage | |
and behavior. | |
.. note:: | |
TF 2.0 models accepts two formats as inputs: | |
- having all inputs as keyword arguments (like PyTorch models), or | |
- having all inputs as a list, tuple or dict in the first positional arguments. | |
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all | |
the tensors in the first argument of the model call function: :obj:`model(inputs)`. | |
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in | |
the first positional argument : | |
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)` | |
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: | |
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` | |
- a dictionary with one or several input Tensors associated to the input names given in the docstring: | |
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})` | |
Args: | |
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. | |
Initializing with a config file does not load the weights associated with the model, only the | |
configuration. Check out the :meth:`~transformers.TFPreTrainedModel.from_pretrained` method to load the | |
model weights. | |
""" | |
BERT_INPUTS_DOCSTRING = r""" | |
Args: | |
input_ids (:obj:`np.ndarray`, :obj:`tf.Tensor`, :obj:`List[tf.Tensor]` :obj:`Dict[str, tf.Tensor]` or :obj:`Dict[str, np.ndarray]` and each example must have the shape :obj:`({0})`): | |
Indices of input sequence tokens in the vocabulary. | |
Indices can be obtained using :class:`~transformers.BertTokenizer`. See | |
:func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for | |
details. | |
`What are input IDs? <../glossary.html#input-ids>`__ | |
attention_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): | |
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: | |
- 1 for tokens that are **not masked**, | |
- 0 for tokens that are **masked**. | |
`What are attention masks? <../glossary.html#attention-mask>`__ | |
token_type_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): | |
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, | |
1]``: | |
- 0 corresponds to a `sentence A` token, | |
- 1 corresponds to a `sentence B` token. | |
`What are token type IDs? <../glossary.html#token-type-ids>`__ | |
position_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): | |
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, | |
config.max_position_embeddings - 1]``. | |
`What are position IDs? <../glossary.html#position-ids>`__ | |
head_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): | |
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: | |
- 1 indicates the head is **not masked**, | |
- 0 indicates the head is **masked**. | |
inputs_embeds (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`): | |
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. | |
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated | |
vectors than the model's internal embedding lookup matrix. | |
output_attentions (:obj:`bool`, `optional`): | |
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned | |
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the | |
config will be used instead. | |
output_hidden_states (:obj:`bool`, `optional`): | |
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for | |
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be | |
used instead. | |
return_dict (:obj:`bool`, `optional`): | |
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This | |
argument can be used in eager mode, in graph mode the value will always be set to True. | |
training (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether or not to use the model in training mode (some modules like dropout modules have different | |
behaviors between training and evaluation). | |
""" | |
class TFBertModel(TFBertPreTrainedModel): | |
def __init__(self, config: BertConfig, *inputs, **kwargs): | |
super().__init__(config, *inputs, **kwargs) | |
self.bert = TFBertMainLayer(config, name="bert") | |
def call( | |
self, | |
input_ids: Optional[TFModelInputType] = None, | |
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
training: Optional[bool] = False, | |
**kwargs, | |
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: | |
inputs = input_processing( | |
func=self.call, | |
config=self.config, | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
training=training, | |
kwargs_call=kwargs, | |
) | |
outputs = self.bert( | |
input_ids=inputs["input_ids"], | |
attention_mask=inputs["attention_mask"], | |
token_type_ids=inputs["token_type_ids"], | |
position_ids=inputs["position_ids"], | |
head_mask=inputs["head_mask"], | |
inputs_embeds=inputs["inputs_embeds"], | |
output_attentions=inputs["output_attentions"], | |
output_hidden_states=inputs["output_hidden_states"], | |
return_dict=inputs["return_dict"], | |
training=inputs["training"], | |
) | |
return outputs | |
def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling: | |
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None | |
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None | |
return TFBaseModelOutputWithPooling( | |
last_hidden_state=output.last_hidden_state, | |
pooler_output=output.pooler_output, | |
hidden_states=hs, | |
attentions=attns, | |
) | |
class TFBertForPreTraining(TFBertPreTrainedModel, TFBertPreTrainingLoss): | |
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model | |
_keys_to_ignore_on_load_unexpected = [ | |
r"position_ids", | |
r"cls.predictions.decoder.weight", | |
r"cls.predictions.decoder.bias", | |
] | |
def __init__(self, config: BertConfig, *inputs, **kwargs): | |
super().__init__(config, *inputs, **kwargs) | |
self.bert = TFBertMainLayer(config, name="bert") | |
self.nsp = TFBertNSPHead(config, name="nsp___cls") | |
self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name="mlm___cls") | |
def get_lm_head(self) -> tf.keras.layers.Layer: | |
return self.mlm.predictions | |
def get_prefix_bias_name(self) -> str: | |
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) | |
return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name | |
def call( | |
self, | |
input_ids: Optional[TFModelInputType] = None, | |
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
next_sentence_label: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
training: Optional[bool] = False, | |
**kwargs, | |
) -> Union[TFBertForPreTrainingOutput, Tuple[tf.Tensor]]: | |
r""" | |
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`): | |
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., | |
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored | |
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` | |
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): | |
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair | |
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``: | |
- 0 indicates sequence B is a continuation of sequence A, | |
- 1 indicates sequence B is a random sequence. | |
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): | |
Used to hide legacy arguments that have been deprecated. | |
Return: | |
Examples:: | |
>>> import tensorflow as tf | |
>>> from transformers import BertTokenizer, TFBertForPreTraining | |
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') | |
>>> model = TFBertForPreTraining.from_pretrained('bert-base-uncased') | |
>>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1 | |
>>> outputs = model(input_ids) | |
>>> prediction_scores, seq_relationship_scores = outputs[:2] | |
""" | |
inputs = input_processing( | |
func=self.call, | |
config=self.config, | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
labels=labels, | |
next_sentence_label=next_sentence_label, | |
training=training, | |
kwargs_call=kwargs, | |
) | |
outputs = self.bert( | |
input_ids=inputs["input_ids"], | |
attention_mask=inputs["attention_mask"], | |
token_type_ids=inputs["token_type_ids"], | |
position_ids=inputs["position_ids"], | |
head_mask=inputs["head_mask"], | |
inputs_embeds=inputs["inputs_embeds"], | |
output_attentions=inputs["output_attentions"], | |
output_hidden_states=inputs["output_hidden_states"], | |
return_dict=inputs["return_dict"], | |
training=inputs["training"], | |
) | |
sequence_output, pooled_output = outputs[:2] | |
prediction_scores = self.mlm(sequence_output=sequence_output, training=inputs["training"]) | |
seq_relationship_score = self.nsp(pooled_output=pooled_output) | |
total_loss = None | |
if inputs["labels"] is not None and inputs["next_sentence_label"] is not None: | |
d_labels = {"labels": inputs["labels"]} | |
d_labels["next_sentence_label"] = inputs["next_sentence_label"] | |
total_loss = self.compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score)) | |
if not inputs["return_dict"]: | |
output = (prediction_scores, seq_relationship_score) + outputs[2:] | |
return ((total_loss,) + output) if total_loss is not None else output | |
return TFBertForPreTrainingOutput( | |
loss=total_loss, | |
prediction_logits=prediction_scores, | |
seq_relationship_logits=seq_relationship_score, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
def serving_output(self, output: TFBertForPreTrainingOutput) -> TFBertForPreTrainingOutput: | |
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None | |
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None | |
return TFBertForPreTrainingOutput( | |
prediction_logits=output.prediction_logits, | |
seq_relationship_logits=output.seq_relationship_logits, | |
hidden_states=hs, | |
attentions=attns, | |
) | |
class TFBertForMaskedLM(TFBertPreTrainedModel, TFMaskedLanguageModelingLoss): | |
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model | |
_keys_to_ignore_on_load_unexpected = [ | |
r"pooler", | |
r"cls.seq_relationship", | |
r"cls.predictions.decoder.weight", | |
r"nsp___cls", | |
] | |
def __init__(self, config: BertConfig, *inputs, **kwargs): | |
super().__init__(config, *inputs, **kwargs) | |
if config.is_decoder: | |
logger.warning( | |
"If you want to use `TFBertForMaskedLM` make sure `config.is_decoder=False` for " | |
"bi-directional self-attention." | |
) | |
self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert") | |
self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name="mlm___cls") | |
def get_lm_head(self) -> tf.keras.layers.Layer: | |
return self.mlm.predictions | |
def get_prefix_bias_name(self) -> str: | |
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) | |
return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name | |
def call( | |
self, | |
input_ids: Optional[TFModelInputType] = None, | |
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
training: Optional[bool] = False, | |
**kwargs, | |
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: | |
r""" | |
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`): | |
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., | |
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored | |
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` | |
""" | |
inputs = input_processing( | |
func=self.call, | |
config=self.config, | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
labels=labels, | |
training=training, | |
kwargs_call=kwargs, | |
) | |
outputs = self.bert( | |
input_ids=inputs["input_ids"], | |
attention_mask=inputs["attention_mask"], | |
token_type_ids=inputs["token_type_ids"], | |
position_ids=inputs["position_ids"], | |
head_mask=inputs["head_mask"], | |
inputs_embeds=inputs["inputs_embeds"], | |
output_attentions=inputs["output_attentions"], | |
output_hidden_states=inputs["output_hidden_states"], | |
return_dict=inputs["return_dict"], | |
training=inputs["training"], | |
) | |
sequence_output = outputs[0] | |
prediction_scores = self.mlm(sequence_output=sequence_output, training=inputs["training"]) | |
loss = ( | |
None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=prediction_scores) | |
) | |
if not inputs["return_dict"]: | |
output = (prediction_scores,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return TFMaskedLMOutput( | |
loss=loss, | |
logits=prediction_scores, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput: | |
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None | |
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None | |
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns) | |
class TFBertLMHeadModel(TFBertPreTrainedModel, TFCausalLanguageModelingLoss): | |
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model | |
_keys_to_ignore_on_load_unexpected = [ | |
r"pooler", | |
r"cls.seq_relationship", | |
r"cls.predictions.decoder.weight", | |
r"nsp___cls", | |
] | |
def __init__(self, config: BertConfig, *inputs, **kwargs): | |
super().__init__(config, *inputs, **kwargs) | |
if not config.is_decoder: | |
logger.warning("If you want to use `TFBertLMHeadModel` as a standalone, add `is_decoder=True.`") | |
self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert") | |
self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name="mlm___cls") | |
def get_lm_head(self) -> tf.keras.layers.Layer: | |
return self.mlm.predictions | |
def get_prefix_bias_name(self) -> str: | |
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) | |
return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name | |
def call( | |
self, | |
input_ids: Optional[TFModelInputType] = None, | |
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
training: Optional[bool] = False, | |
**kwargs, | |
) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]: | |
r""" | |
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`): | |
Labels for computing the cross entropy classification loss. Indices should be in ``[0, ..., | |
config.vocab_size - 1]``. | |
""" | |
inputs = input_processing( | |
func=self.call, | |
config=self.config, | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
labels=labels, | |
training=training, | |
kwargs_call=kwargs, | |
) | |
outputs = self.bert( | |
input_ids=inputs["input_ids"], | |
attention_mask=inputs["attention_mask"], | |
token_type_ids=inputs["token_type_ids"], | |
position_ids=inputs["position_ids"], | |
head_mask=inputs["head_mask"], | |
inputs_embeds=inputs["inputs_embeds"], | |
output_attentions=inputs["output_attentions"], | |
output_hidden_states=inputs["output_hidden_states"], | |
return_dict=inputs["return_dict"], | |
training=inputs["training"], | |
) | |
sequence_output = outputs[0] | |
logits = self.mlm(sequence_output=sequence_output, training=inputs["training"]) | |
loss = None | |
if inputs["labels"] is not None: | |
# shift labels to the left and cut last logit token | |
logits = logits[:, :-1] | |
labels = inputs["labels"][:, 1:] | |
loss = self.compute_loss(labels=labels, logits=logits) | |
if not inputs["return_dict"]: | |
output = (logits,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return TFCausalLMOutput( | |
loss=loss, | |
logits=logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput: | |
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None | |
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None | |
return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns) | |
class TFBertForNextSentencePrediction(TFBertPreTrainedModel, TFNextSentencePredictionLoss): | |
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model | |
_keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"cls.predictions"] | |
def __init__(self, config: BertConfig, *inputs, **kwargs): | |
super().__init__(config, *inputs, **kwargs) | |
self.bert = TFBertMainLayer(config, name="bert") | |
self.nsp = TFBertNSPHead(config, name="nsp___cls") | |
def call( | |
self, | |
input_ids: Optional[TFModelInputType] = None, | |
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
next_sentence_label: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
training: Optional[bool] = False, | |
**kwargs, | |
) -> Union[TFNextSentencePredictorOutput, Tuple[tf.Tensor]]: | |
r""" | |
Return: | |
Examples:: | |
>>> import tensorflow as tf | |
>>> from transformers import BertTokenizer, TFBertForNextSentencePrediction | |
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') | |
>>> model = TFBertForNextSentencePrediction.from_pretrained('bert-base-uncased') | |
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." | |
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." | |
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='tf') | |
>>> logits = model(encoding['input_ids'], token_type_ids=encoding['token_type_ids'])[0] | |
>>> assert logits[0][0] < logits[0][1] # the next sentence was random | |
""" | |
inputs = input_processing( | |
func=self.call, | |
config=self.config, | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
next_sentence_label=next_sentence_label, | |
training=training, | |
kwargs_call=kwargs, | |
) | |
outputs = self.bert( | |
input_ids=inputs["input_ids"], | |
attention_mask=inputs["attention_mask"], | |
token_type_ids=inputs["token_type_ids"], | |
position_ids=inputs["position_ids"], | |
head_mask=inputs["head_mask"], | |
inputs_embeds=inputs["inputs_embeds"], | |
output_attentions=inputs["output_attentions"], | |
output_hidden_states=inputs["output_hidden_states"], | |
return_dict=inputs["return_dict"], | |
training=inputs["training"], | |
) | |
pooled_output = outputs[1] | |
seq_relationship_scores = self.nsp(pooled_output=pooled_output) | |
next_sentence_loss = ( | |
None | |
if inputs["next_sentence_label"] is None | |
else self.compute_loss(labels=inputs["next_sentence_label"], logits=seq_relationship_scores) | |
) | |
if not inputs["return_dict"]: | |
output = (seq_relationship_scores,) + outputs[2:] | |
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output | |
return TFNextSentencePredictorOutput( | |
loss=next_sentence_loss, | |
logits=seq_relationship_scores, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
def serving_output(self, output: TFNextSentencePredictorOutput) -> TFNextSentencePredictorOutput: | |
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None | |
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None | |
return TFNextSentencePredictorOutput(logits=output.logits, hidden_states=hs, attentions=attns) | |
class TFBertForSequenceClassification(TFBertPreTrainedModel, TFSequenceClassificationLoss): | |
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model | |
_keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"] | |
_keys_to_ignore_on_load_missing = [r"dropout"] | |
def __init__(self, config: BertConfig, *inputs, **kwargs): | |
super().__init__(config, *inputs, **kwargs) | |
self.num_labels = config.num_labels | |
self.bert = TFBertMainLayer(config, name="bert") | |
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) | |
self.classifier = tf.keras.layers.Dense( | |
units=config.num_labels, | |
kernel_initializer=get_initializer(config.initializer_range), | |
name="classifier", | |
) | |
def call( | |
self, | |
input_ids: Optional[TFModelInputType] = None, | |
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
training: Optional[bool] = False, | |
**kwargs, | |
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: | |
r""" | |
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`): | |
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., | |
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), | |
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). | |
""" | |
inputs = input_processing( | |
func=self.call, | |
config=self.config, | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
labels=labels, | |
training=training, | |
kwargs_call=kwargs, | |
) | |
outputs = self.bert( | |
input_ids=inputs["input_ids"], | |
attention_mask=inputs["attention_mask"], | |
token_type_ids=inputs["token_type_ids"], | |
position_ids=inputs["position_ids"], | |
head_mask=inputs["head_mask"], | |
inputs_embeds=inputs["inputs_embeds"], | |
output_attentions=inputs["output_attentions"], | |
output_hidden_states=inputs["output_hidden_states"], | |
return_dict=inputs["return_dict"], | |
training=inputs["training"], | |
) | |
pooled_output = outputs[1] | |
pooled_output = self.dropout(inputs=pooled_output, training=inputs["training"]) | |
logits = self.classifier(inputs=pooled_output) | |
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits) | |
if not inputs["return_dict"]: | |
output = (logits,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return TFSequenceClassifierOutput( | |
loss=loss, | |
logits=logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput: | |
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None | |
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None | |
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns) | |
class TFBertForMultipleChoice(TFBertPreTrainedModel, TFMultipleChoiceLoss): | |
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model | |
_keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"] | |
_keys_to_ignore_on_load_missing = [r"dropout"] | |
def __init__(self, config: BertConfig, *inputs, **kwargs): | |
super().__init__(config, *inputs, **kwargs) | |
self.bert = TFBertMainLayer(config, name="bert") | |
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) | |
self.classifier = tf.keras.layers.Dense( | |
units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" | |
) | |
def dummy_inputs(self) -> Dict[str, tf.Tensor]: | |
""" | |
Dummy inputs to build the network. | |
Returns: | |
tf.Tensor with dummy inputs | |
""" | |
return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)} | |
def call( | |
self, | |
input_ids: Optional[TFModelInputType] = None, | |
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
training: Optional[bool] = False, | |
**kwargs, | |
) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: | |
r""" | |
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`): | |
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., | |
num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See | |
:obj:`input_ids` above) | |
""" | |
inputs = input_processing( | |
func=self.call, | |
config=self.config, | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
labels=labels, | |
training=training, | |
kwargs_call=kwargs, | |
) | |
if inputs["input_ids"] is not None: | |
num_choices = shape_list(inputs["input_ids"])[1] | |
seq_length = shape_list(inputs["input_ids"])[2] | |
else: | |
num_choices = shape_list(inputs["inputs_embeds"])[1] | |
seq_length = shape_list(inputs["inputs_embeds"])[2] | |
flat_input_ids = ( | |
tf.reshape(tensor=inputs["input_ids"], shape=(-1, seq_length)) if inputs["input_ids"] is not None else None | |
) | |
flat_attention_mask = ( | |
tf.reshape(tensor=inputs["attention_mask"], shape=(-1, seq_length)) | |
if inputs["attention_mask"] is not None | |
else None | |
) | |
flat_token_type_ids = ( | |
tf.reshape(tensor=inputs["token_type_ids"], shape=(-1, seq_length)) | |
if inputs["token_type_ids"] is not None | |
else None | |
) | |
flat_position_ids = ( | |
tf.reshape(tensor=inputs["position_ids"], shape=(-1, seq_length)) | |
if inputs["position_ids"] is not None | |
else None | |
) | |
flat_inputs_embeds = ( | |
tf.reshape(tensor=inputs["inputs_embeds"], shape=(-1, seq_length, shape_list(inputs["inputs_embeds"])[3])) | |
if inputs["inputs_embeds"] is not None | |
else None | |
) | |
outputs = self.bert( | |
input_ids=flat_input_ids, | |
attention_mask=flat_attention_mask, | |
token_type_ids=flat_token_type_ids, | |
position_ids=flat_position_ids, | |
head_mask=inputs["head_mask"], | |
inputs_embeds=flat_inputs_embeds, | |
output_attentions=inputs["output_attentions"], | |
output_hidden_states=inputs["output_hidden_states"], | |
return_dict=inputs["return_dict"], | |
training=inputs["training"], | |
) | |
pooled_output = outputs[1] | |
pooled_output = self.dropout(inputs=pooled_output, training=inputs["training"]) | |
logits = self.classifier(inputs=pooled_output) | |
reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices)) | |
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=reshaped_logits) | |
if not inputs["return_dict"]: | |
output = (reshaped_logits,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return TFMultipleChoiceModelOutput( | |
loss=loss, | |
logits=reshaped_logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
def serving(self, inputs: Dict[str, tf.Tensor]) -> TFMultipleChoiceModelOutput: | |
output = self.call(input_ids=inputs) | |
return self.serving_output(output) | |
def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput: | |
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None | |
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None | |
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns) | |
class TFBertForTokenClassification(TFBertPreTrainedModel, TFTokenClassificationLoss): | |
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model | |
_keys_to_ignore_on_load_unexpected = [ | |
r"pooler", | |
r"mlm___cls", | |
r"nsp___cls", | |
r"cls.predictions", | |
r"cls.seq_relationship", | |
] | |
_keys_to_ignore_on_load_missing = [r"dropout"] | |
def __init__(self, config: BertConfig, *inputs, **kwargs): | |
super().__init__(config, *inputs, **kwargs) | |
self.num_labels = config.num_labels | |
self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert") | |
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) | |
self.classifier = tf.keras.layers.Dense( | |
units=config.num_labels, | |
kernel_initializer=get_initializer(config.initializer_range), | |
name="classifier", | |
) | |
def call( | |
self, | |
input_ids: Optional[TFModelInputType] = None, | |
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
labels: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
training: Optional[bool] = False, | |
**kwargs, | |
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: | |
r""" | |
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`): | |
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - | |
1]``. | |
""" | |
inputs = input_processing( | |
func=self.call, | |
config=self.config, | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
labels=labels, | |
training=training, | |
kwargs_call=kwargs, | |
) | |
outputs = self.bert( | |
input_ids=inputs["input_ids"], | |
attention_mask=inputs["attention_mask"], | |
token_type_ids=inputs["token_type_ids"], | |
position_ids=inputs["position_ids"], | |
head_mask=inputs["head_mask"], | |
inputs_embeds=inputs["inputs_embeds"], | |
output_attentions=inputs["output_attentions"], | |
output_hidden_states=inputs["output_hidden_states"], | |
return_dict=inputs["return_dict"], | |
training=inputs["training"], | |
) | |
sequence_output = outputs[0] | |
sequence_output = self.dropout(inputs=sequence_output, training=inputs["training"]) | |
logits = self.classifier(inputs=sequence_output) | |
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits) | |
if not inputs["return_dict"]: | |
output = (logits,) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return TFTokenClassifierOutput( | |
loss=loss, | |
logits=logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput: | |
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None | |
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None | |
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns) | |
class TFBertForQuestionAnswering(TFBertPreTrainedModel, TFQuestionAnsweringLoss): | |
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model | |
_keys_to_ignore_on_load_unexpected = [ | |
r"pooler", | |
r"mlm___cls", | |
r"nsp___cls", | |
r"cls.predictions", | |
r"cls.seq_relationship", | |
] | |
def __init__(self, config: BertConfig, *inputs, **kwargs): | |
super().__init__(config, *inputs, **kwargs) | |
self.num_labels = config.num_labels | |
self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert") | |
self.qa_outputs = tf.keras.layers.Dense( | |
units=config.num_labels, | |
kernel_initializer=get_initializer(config.initializer_range), | |
name="qa_outputs", | |
) | |
def call( | |
self, | |
input_ids: Optional[TFModelInputType] = None, | |
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
output_attentions: Optional[bool] = None, | |
output_hidden_states: Optional[bool] = None, | |
return_dict: Optional[bool] = None, | |
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, | |
training: Optional[bool] = False, | |
**kwargs, | |
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: | |
r""" | |
start_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`): | |
Labels for position (index) of the start of the labelled span for computing the token classification loss. | |
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the | |
sequence are not taken into account for computing the loss. | |
end_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`): | |
Labels for position (index) of the end of the labelled span for computing the token classification loss. | |
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the | |
sequence are not taken into account for computing the loss. | |
""" | |
inputs = input_processing( | |
func=self.call, | |
config=self.config, | |
input_ids=input_ids, | |
attention_mask=attention_mask, | |
token_type_ids=token_type_ids, | |
position_ids=position_ids, | |
head_mask=head_mask, | |
inputs_embeds=inputs_embeds, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
return_dict=return_dict, | |
start_positions=start_positions, | |
end_positions=end_positions, | |
training=training, | |
kwargs_call=kwargs, | |
) | |
outputs = self.bert( | |
input_ids=inputs["input_ids"], | |
attention_mask=inputs["attention_mask"], | |
token_type_ids=inputs["token_type_ids"], | |
position_ids=inputs["position_ids"], | |
head_mask=inputs["head_mask"], | |
inputs_embeds=inputs["inputs_embeds"], | |
output_attentions=inputs["output_attentions"], | |
output_hidden_states=inputs["output_hidden_states"], | |
return_dict=inputs["return_dict"], | |
training=inputs["training"], | |
) | |
sequence_output = outputs[0] | |
logits = self.qa_outputs(inputs=sequence_output) | |
start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1) | |
start_logits = tf.squeeze(input=start_logits, axis=-1) | |
end_logits = tf.squeeze(input=end_logits, axis=-1) | |
loss = None | |
if inputs["start_positions"] is not None and inputs["end_positions"] is not None: | |
labels = {"start_position": inputs["start_positions"]} | |
labels["end_position"] = inputs["end_positions"] | |
loss = self.compute_loss(labels=labels, logits=(start_logits, end_logits)) | |
if not inputs["return_dict"]: | |
output = (start_logits, end_logits) + outputs[2:] | |
return ((loss,) + output) if loss is not None else output | |
return TFQuestionAnsweringModelOutput( | |
loss=loss, | |
start_logits=start_logits, | |
end_logits=end_logits, | |
hidden_states=outputs.hidden_states, | |
attentions=outputs.attentions, | |
) | |
def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput: | |
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None | |
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None | |
return TFQuestionAnsweringModelOutput( | |
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns | |
) | |