text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
from typing import Optional, Tuple import torch def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def sdpa_attention_forward( module: torch.nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], dropout: float = 0.0, scaling: Optional[float] = None, is_causal: Optional[bool] = None, **kwargs, ) -> Tuple[torch.Tensor, None]: if hasattr(module, "num_key_value_groups"): key = repeat_kv(key, module.num_key_value_groups) value = repeat_kv(value, module.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, : key.shape[-2]] # SDPA with memory-efficient backend is bugged with non-contiguous inputs and custom attn_mask for some torch versions # Reference: https://github.com/pytorch/pytorch/issues/112577. query = query.contiguous() key = key.contiguous() value = value.contiguous() # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. if is_causal is None: is_causal = causal_mask is None and query.shape[2] > 1 # Shapes (e.g. query.shape[2]) are tensors during jit tracing, resulting in `is_causal` being a tensor. # We convert it to a bool for the SDPA kernel that only accepts bools. if torch.jit.is_tracing() and isinstance(is_causal, torch.Tensor): is_causal = is_causal.item() attn_output = torch.nn.functional.scaled_dot_product_attention( query, key, value, attn_mask=causal_mask, dropout_p=dropout, scale=scaling, is_causal=is_causal, ) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, None
transformers/src/transformers/integrations/sdpa_attention.py/0
{ "file_path": "transformers/src/transformers/integrations/sdpa_attention.py", "repo_id": "transformers", "token_count": 977 }
#define min(a, b) ((a)<(b)?(a):(b)) #define max(a, b) ((a)>(b)?(a):(b)) #define ceil_divide(a, b) ((a)/(b)+((a)%(b)!=0)) #define select(cond, a, b) ((cond)?(a):(b)) #define PI 3.141592 #define EPSILON 1e-8 #define MAX_VAL 1e12 #define MIN_VAL -1e12 #define EMPTY_VALUE -1
transformers/src/transformers/kernels/yoso/common.h/0
{ "file_path": "transformers/src/transformers/kernels/yoso/common.h", "repo_id": "transformers", "token_count": 140 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Optional, Tuple import flax import jax.numpy as jnp from .utils import ModelOutput @flax.struct.dataclass class FlaxBaseModelOutput(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithNoAttention(ModelOutput): """ Base class for model's outputs, with potential hidden states. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPoolingAndNoAttention(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): Last layer hidden-state after a pooling operation on the spatial dimensions. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: jnp.ndarray = None pooler_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxImageClassifierOutputWithNoAttention(ModelOutput): """ Base class for outputs of image classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPast(ModelOutput): """ Base class for model's outputs, with potential hidden states and attentions. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. past_key_values (`Dict[str, jnp.ndarray]`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None past_key_values: Optional[Dict[str, jnp.ndarray]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPooling(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None pooler_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ last_hidden_state: jnp.ndarray = None pooler_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPastAndCrossAttentions(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxSeq2SeqModelOutput(ModelOutput): """ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential decoding. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxCausalLMOutputWithCrossAttentions(ModelOutput): """ Base class for causal language model (or autoregressive) outputs. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Cross attentions weights after the attention softmax, used to compute the weighted average in the cross-attention heads. past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `jnp.ndarray` tuples of length `config.n_layers`, with each tuple containing the cached key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder setting. Only relevant if `config.is_decoder = True`. Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. """ logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxMaskedLMOutput(ModelOutput): """ Base class for masked language models outputs. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None FlaxCausalLMOutput = FlaxMaskedLMOutput @flax.struct.dataclass class FlaxSeq2SeqLMOutput(ModelOutput): """ Base class for sequence-to-sequence language models outputs. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxNextSentencePredictorOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: logits (`jnp.ndarray` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sentence classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxSeq2SeqSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence sentence classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxMultipleChoiceModelOutput(ModelOutput): """ Base class for outputs of multiple choice models. Args: logits (`jnp.ndarray` of shape `(batch_size, num_choices)`): *num_choices* is the second dimension of the input tensors. (see *input_ids* above). Classification scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxTokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ start_logits: jnp.ndarray = None end_logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxSeq2SeqQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of sequence-to-sequence question answering models. Args: start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ start_logits: jnp.ndarray = None end_logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
transformers/src/transformers/modeling_flax_outputs.py/0
{ "file_path": "transformers/src/transformers/modeling_flax_outputs.py", "repo_id": "transformers", "token_count": 15429 }
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 ALBERT model.""" from __future__ import annotations import math from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPooling, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_albert import AlbertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "albert/albert-base-v2" _CONFIG_FOR_DOC = "AlbertConfig" class TFAlbertPreTrainingLoss: """ Loss function suitable for ALBERT pretraining, that is, the task of pretraining a language model by combining SOP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) if self.config.tf_legacy_loss: # make sure only labels that are not equal to -100 # are taken into account as loss masked_lm_active_loss = tf.not_equal(tf.reshape(tensor=labels["labels"], shape=(-1,)), -100) masked_lm_reduced_logits = tf.boolean_mask( tensor=tf.reshape(tensor=logits[0], shape=(-1, shape_list(logits[0])[2])), mask=masked_lm_active_loss, ) masked_lm_labels = tf.boolean_mask( tensor=tf.reshape(tensor=labels["labels"], shape=(-1,)), mask=masked_lm_active_loss ) sentence_order_active_loss = tf.not_equal( tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), -100 ) sentence_order_reduced_logits = tf.boolean_mask( tensor=tf.reshape(tensor=logits[1], shape=(-1, 2)), mask=sentence_order_active_loss ) sentence_order_label = tf.boolean_mask( tensor=tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), mask=sentence_order_active_loss ) masked_lm_loss = loss_fn(y_true=masked_lm_labels, y_pred=masked_lm_reduced_logits) sentence_order_loss = loss_fn(y_true=sentence_order_label, y_pred=sentence_order_reduced_logits) masked_lm_loss = tf.reshape(tensor=masked_lm_loss, shape=(-1, shape_list(sentence_order_loss)[0])) masked_lm_loss = tf.reduce_mean(input_tensor=masked_lm_loss, axis=0) return masked_lm_loss + sentence_order_loss # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0]) # make sure only labels that are not equal to -100 # are taken into account for the loss computation lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype) masked_lm_losses = unmasked_lm_losses * lm_loss_mask reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask) sop_logits = tf.reshape(logits[1], (-1, 2)) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_sop_loss = loss_fn(y_true=tf.nn.relu(labels["sentence_order_label"]), y_pred=sop_logits) sop_loss_mask = tf.cast(labels["sentence_order_label"] != -100, dtype=unmasked_sop_loss.dtype) masked_sop_loss = unmasked_sop_loss * sop_loss_mask reduced_masked_sop_loss = tf.reduce_sum(masked_sop_loss) / tf.reduce_sum(sop_loss_mask) return tf.reshape(reduced_masked_lm_loss + reduced_masked_sop_loss, (1,)) class TFAlbertEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = config.embedding_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.embedding_size], initializer=get_initializer(self.initializer_range), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.embedding_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, past_key_values_length=0, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ if input_ids is None and inputs_embeds is None: raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims( tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0 ) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFAlbertAttention(keras.layers.Layer): """Contains the complete attention sublayer, including both dropouts and layer norm.""" def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.output_attentions = config.output_attentions self.query = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") # Two different dropout probabilities; see https://github.com/google-research/albert/blob/master/modeling.py#L971-L993 self.attention_dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.output_dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: batch_size = shape_list(input_tensor)[0] mixed_query_layer = self.query(inputs=input_tensor) mixed_key_layer = self.key(inputs=input_tensor) mixed_value_layer = self.value(inputs=input_tensor) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.attention_dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) context_layer = tf.reshape(tensor=context_layer, shape=(batch_size, -1, self.all_head_size)) self_outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) hidden_states = self_outputs[0] hidden_states = self.dense(inputs=hidden_states) hidden_states = self.output_dropout(inputs=hidden_states, training=training) attention_output = self.LayerNorm(inputs=hidden_states + input_tensor) # add attentions if we output them outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFAlbertLayer(keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.attention = TFAlbertAttention(config, name="attention") self.ffn = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn" ) if isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act self.ffn_output = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn_output" ) self.full_layer_layer_norm = keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="full_layer_layer_norm" ) self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool = False, ) -> Tuple[tf.Tensor]: attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, training=training, ) ffn_output = self.ffn(inputs=attention_outputs[0]) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(inputs=ffn_output) ffn_output = self.dropout(inputs=ffn_output, training=training) hidden_states = self.full_layer_layer_norm(inputs=ffn_output + attention_outputs[0]) # add attentions if we output them outputs = (hidden_states,) + attention_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "ffn", None) is not None: with tf.name_scope(self.ffn.name): self.ffn.build([None, None, self.config.hidden_size]) if getattr(self, "ffn_output", None) is not None: with tf.name_scope(self.ffn_output.name): self.ffn_output.build([None, None, self.config.intermediate_size]) if getattr(self, "full_layer_layer_norm", None) is not None: with tf.name_scope(self.full_layer_layer_norm.name): self.full_layer_layer_norm.build([None, None, self.config.hidden_size]) class TFAlbertLayerGroup(keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.albert_layers = [ TFAlbertLayer(config, name=f"albert_layers_._{i}") for i in range(config.inner_group_num) ] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: layer_hidden_states = () if output_hidden_states else None layer_attentions = () if output_attentions else None for layer_index, albert_layer in enumerate(self.albert_layers): if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) layer_output = albert_layer( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[layer_index], output_attentions=output_attentions, training=training, ) hidden_states = layer_output[0] if output_attentions: layer_attentions = layer_attentions + (layer_output[1],) # Add last layer if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) return tuple(v for v in [hidden_states, layer_hidden_states, layer_attentions] if v is not None) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "albert_layers", None) is not None: for layer in self.albert_layers: with tf.name_scope(layer.name): layer.build(None) class TFAlbertTransformer(keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.num_hidden_layers = config.num_hidden_layers self.num_hidden_groups = config.num_hidden_groups # Number of layers in a hidden group self.layers_per_group = int(config.num_hidden_layers / config.num_hidden_groups) self.embedding_hidden_mapping_in = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="embedding_hidden_mapping_in", ) self.albert_layer_groups = [ TFAlbertLayerGroup(config, name=f"albert_layer_groups_._{i}") for i in range(config.num_hidden_groups) ] self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: hidden_states = self.embedding_hidden_mapping_in(inputs=hidden_states) all_attentions = () if output_attentions else None all_hidden_states = (hidden_states,) if output_hidden_states else None for i in range(self.num_hidden_layers): # Index of the hidden group group_idx = int(i / (self.num_hidden_layers / self.num_hidden_groups)) layer_group_output = self.albert_layer_groups[group_idx]( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[group_idx * self.layers_per_group : (group_idx + 1) * self.layers_per_group], output_attentions=output_attentions, output_hidden_states=output_hidden_states, training=training, ) hidden_states = layer_group_output[0] if output_attentions: all_attentions = all_attentions + layer_group_output[-1] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embedding_hidden_mapping_in", None) is not None: with tf.name_scope(self.embedding_hidden_mapping_in.name): self.embedding_hidden_mapping_in.build([None, None, self.config.embedding_size]) if getattr(self, "albert_layer_groups", None) is not None: for layer in self.albert_layer_groups: with tf.name_scope(layer.name): layer.build(None) class TFAlbertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = AlbertConfig base_model_prefix = "albert" class TFAlbertMLMHead(keras.layers.Layer): def __init__(self, config: AlbertConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = config.embedding_size self.dense = keras.layers.Dense( config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") self.decoder_bias = self.add_weight( shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias" ) if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.embedding_size]) def get_output_embeddings(self) -> keras.layers.Layer: return self.decoder def set_output_embeddings(self, value: tf.Variable): self.decoder.weight = value self.decoder.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias, "decoder_bias": self.decoder_bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.decoder_bias = value["decoder_bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size]) hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.decoder_bias) return hidden_states @keras_serializable class TFAlbertMainLayer(keras.layers.Layer): config_class = AlbertConfig def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFAlbertEmbeddings(config, name="embeddings") self.encoder = TFAlbertTransformer(config, name="encoder") self.pooler = ( keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="pooler", ) if add_pooling_layer else None ) def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(inputs=sequence_output[:, 0]) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build([None, None, self.config.hidden_size]) @dataclass class TFAlbertForPreTrainingOutput(ModelOutput): """ Output type of [`TFAlbertForPreTraining`]. Args: prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). sop_logits (`tf.Tensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor = None prediction_logits: tf.Tensor = None sop_logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None ALBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`AlbertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ ALBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) class TFAlbertModel(TFAlbertPreTrainedModel): def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, name="albert") @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "albert", None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) @add_start_docstrings( """ Albert Model with two heads on top for pretraining: a `masked language modeling` head and a `sentence order prediction` (classification) head. """, ALBERT_START_DOCSTRING, ) class TFAlbertForPreTraining(TFAlbertPreTrainedModel, TFAlbertPreTrainingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"predictions.decoder.weight"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, name="albert") self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions") self.sop_classifier = TFAlbertSOPHead(config, name="sop_classifier") def get_lm_head(self) -> keras.layers.Layer: return self.predictions @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, sentence_order_label: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFAlbertForPreTrainingOutput, Tuple[tf.Tensor]]: r""" Return: Example: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFAlbertForPreTraining >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2") >>> model = TFAlbertForPreTraining.from_pretrained("albert/albert-base-v2") >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] >>> # Batch size 1 >>> outputs = model(input_ids) >>> prediction_logits = outputs.prediction_logits >>> sop_logits = outputs.sop_logits ```""" outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output, pooled_output = outputs[:2] prediction_scores = self.predictions(hidden_states=sequence_output) sop_scores = self.sop_classifier(pooled_output=pooled_output, training=training) total_loss = None if labels is not None and sentence_order_label is not None: d_labels = {"labels": labels} d_labels["sentence_order_label"] = sentence_order_label total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, sop_scores)) if not return_dict: output = (prediction_scores, sop_scores) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return TFAlbertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, sop_logits=sop_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "albert", None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) if getattr(self, "sop_classifier", None) is not None: with tf.name_scope(self.sop_classifier.name): self.sop_classifier.build(None) class TFAlbertSOPHead(keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob) self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", ) self.config = config def call(self, pooled_output: tf.Tensor, training: bool) -> tf.Tensor: dropout_pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=dropout_pooled_output) return logits def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING) class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions.decoder.weight"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert") self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions") def get_lm_head(self) -> keras.layers.Layer: return self.predictions @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFAlbertForMaskedLM >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2") >>> model = TFAlbertForMaskedLM.from_pretrained("albert/albert-base-v2") >>> # add mask_token >>> inputs = tokenizer(f"The capital of [MASK] is Paris.", return_tensors="tf") >>> logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = tf.where(inputs.input_ids == tokenizer.mask_token_id)[0][1] >>> predicted_token_id = tf.math.argmax(logits[0, mask_token_index], axis=-1) >>> tokenizer.decode(predicted_token_id) 'france' ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(float(outputs.loss), 2) 0.81 ``` """ outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.predictions(hidden_states=sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "albert", None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) @add_start_docstrings( """ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ALBERT_START_DOCSTRING, ) class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"predictions"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, name="albert") self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob) self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="vumichien/albert-base-v2-imdb", output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'LABEL_1'", expected_loss=0.12, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "albert", None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ALBERT_START_DOCSTRING, ) class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert") classifier_dropout_prob = ( config.classifier_dropout_prob if config.classifier_dropout_prob is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(rate=classifier_dropout_prob) self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(inputs=sequence_output, training=training) logits = self.classifier(inputs=sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "albert", None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ALBERT_START_DOCSTRING, ) class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert") self.qa_outputs = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="vumichien/albert-base-v2-squad2", output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=12, qa_target_end_index=13, expected_output="'a nice puppet'", expected_loss=7.36, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs = self.albert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(inputs=sequence_output) start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1) start_logits = tf.squeeze(input=start_logits, axis=-1) end_logits = tf.squeeze(input=end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "albert", None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ALBERT_START_DOCSTRING, ) class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, name="albert") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.classifier = keras.layers.Dense( units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = ( tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None ) flat_token_type_ids = ( tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None ) flat_position_ids = ( tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None ) flat_inputs_embeds = ( tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) outputs = self.albert( input_ids=flat_input_ids, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, position_ids=flat_position_ids, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "albert", None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) __all__ = [ "TFAlbertPreTrainedModel", "TFAlbertModel", "TFAlbertForPreTraining", "TFAlbertForMaskedLM", "TFAlbertForSequenceClassification", "TFAlbertForTokenClassification", "TFAlbertForQuestionAnswering", "TFAlbertForMultipleChoice", "TFAlbertMainLayer", ]
transformers/src/transformers/models/albert/modeling_tf_albert.py/0
{ "file_path": "transformers/src/transformers/models/albert/modeling_tf_albert.py", "repo_id": "transformers", "token_count": 29548 }
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/aria/modular_aria.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_aria.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2024 The Rhymes-AI Teams Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Callable, List, Optional, Tuple, Union from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, ModelOutput from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import ( LossKwargs, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.deprecation import deprecate_kwarg from ...utils.import_utils import is_torch_available from ..auto import AutoModel, AutoModelForCausalLM from .configuration_aria import AriaConfig, AriaTextConfig if is_torch_available(): import torch from torch import nn logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "AriaTextConfig" class AriaTextRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ AriaTextRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class AriaProjectorMLP(nn.Module): """ Feed-Forward Network module for the Aria Projector. Args: in_features (`int`): Input embedding dimension. hidden_features (`int`): Hidden dimension of the feed-forward network. output_dim (`int`): Output dimension. """ def __init__(self, in_features, hidden_features, output_dim): super().__init__() self.linear_in = nn.Linear(in_features, hidden_features, bias=False) self.linear_out = nn.Linear(hidden_features, output_dim, bias=False) self.act = ACT2FN["gelu_new"] def forward(self, hidden_states): hidden_states = self.act(self.linear_in(hidden_states)) hidden_states = self.linear_out(hidden_states) return hidden_states class AriaCrossAttention(nn.Module): """ Aria Cross-Attention module. Args: config (`AriaConfig`): The configuration to use. """ def __init__(self, config: AriaConfig, dropout_rate: float = 0): super().__init__() hidden_size = config.vision_config.hidden_size num_heads = config.vision_config.num_attention_heads self.num_heads = num_heads self.q_proj = nn.Linear(hidden_size, hidden_size, bias=False) self.k_proj = nn.Linear(hidden_size, hidden_size, bias=False) self.v_proj = nn.Linear(hidden_size, hidden_size, bias=False) # Original code here: https://github.com/rhymes-ai/Aria/blob/719ff4e52b727443cba3793b0e27fe64e0244fe1/aria/model/projector.py#L48 self.multihead_attn = nn.MultiheadAttention(hidden_size, num_heads, batch_first=True) self.linear = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(dropout_rate) self.layer_norm = nn.LayerNorm(hidden_size) self.layer_norm_kv = nn.LayerNorm(hidden_size) def forward(self, key_value_states, hidden_states, attn_mask=None): """ Forward pass of the AriaCrossAttention module. Args: key_value_states (`torch.Tensor`): Input tensor for key and value. hidden_states (`torch.Tensor`): Input tensor for query. attn_mask (`torch.Tensor`, *optional*, defaults to None): Attention mask. Returns: torch.Tensor: Output tensor after cross-attention. """ query = self.q_proj(self.layer_norm(hidden_states)) key_value_states = self.layer_norm_kv(key_value_states) key = self.k_proj(key_value_states) value = self.v_proj(key_value_states) attn_output, _ = self.multihead_attn(query, key, value, attn_mask=attn_mask) attn_output = self.dropout(self.linear(attn_output)) return attn_output class AriaProjector(nn.Module): """ Aria Projector module. This module projects vision features into the language model's embedding space, enabling interaction between vision and language components. Args: config (`AriaConfig`): Configuration object for the model. """ def __init__( self, config: AriaConfig, ): super().__init__() self.patch_to_query_dict = config.projector_patch_to_query_dict self.in_features = config.vision_config.hidden_size self.num_heads = config.vision_config.num_attention_heads self.kv_dim = config.vision_config.hidden_size self.hidden_features = config.text_config.hidden_size self.output_dim = config.text_config.hidden_size self.query = nn.Parameter(torch.zeros(config.max_value_projector_patch_to_query_dict, self.in_features)) self.cross_attn = AriaCrossAttention(config) self.layer_norm = nn.LayerNorm(self.in_features) self.feed_forward = AriaProjectorMLP(self.in_features, self.hidden_features, self.output_dim) def forward(self, key_value_states: torch.Tensor, attn_mask: Optional[torch.Tensor] = None): """ Forward pass of the Projector module. Args: key_value_states (`torch.Tensor`): Input tensor of shape (batch_size, num_patches, kv_dim). attn_mask (`torch.Tensor`, *optional*, default is None): Attention mask. Returns: `torch.Tensor`: Output tensor of shape (batch_size, query_number, output_dim). """ batch_size, num_patches = key_value_states.shape[0], key_value_states.shape[1] if num_patches not in self.patch_to_query_dict.keys(): raise KeyError( f"Number of patches {num_patches} not found in patch_to_query_dict amongst possible values {self.patch_to_query_dict.keys()}." ) query_num = self.patch_to_query_dict[num_patches] queries = self.query[:query_num].unsqueeze(0).repeat(batch_size, 1, 1) if attn_mask is not None: attn_mask = attn_mask.repeat_interleave(self.num_heads, 0) attn_mask = attn_mask.unsqueeze(1).expand(-1, queries.size(1), -1) attention_out = self.cross_attn(key_value_states, queries, attn_mask=attn_mask) out = self.feed_forward(self.layer_norm(attention_out)) return out class AriaSharedExpertsMLP(nn.Module): """ Shared Expert MLP for shared experts. Unlike routed experts, shared experts process all tokens without routing. This class reconfigures the intermediate size in comparison to the LlamaMLP. Args: config (`AriaTextConfig`): Configuration object for the Aria language model. """ def __init__(self, config: AriaTextConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size * config.moe_num_shared_experts self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj def sequential_experts_gemm(token_states, expert_weights, tokens_per_expert): """ Compute the matrix multiplication (GEMM) for each expert sequentially. This approach is computationally inefficient, especially when dealing with a large number of experts. Args: token_states (torch.Tensor): Input tensor of shape (num_tokens, in_features). expert_weights (torch.Tensor): Weight tensor of shape (num_experts, in_features, out_features). tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert. Returns: torch.Tensor: Output tensor of shape (num_tokens, out_features). """ num_tokens = token_states.shape[0] out_features = expert_weights.shape[-1] output = torch.zeros(num_tokens, out_features, dtype=token_states.dtype, device=token_states.device) cumsum_num_tokens = torch.cumsum(tokens_per_expert, dim=0) # Insert zero at the begining for offset index's convenience zero_tensor = torch.zeros(1, dtype=torch.long, device=cumsum_num_tokens.device) cumsum_num_tokens = torch.cat((zero_tensor, cumsum_num_tokens)) for expert_num in range(expert_weights.shape[0]): start = cumsum_num_tokens[expert_num] end = cumsum_num_tokens[expert_num + 1] tokens = token_states[start:end] out = torch.matmul(tokens, expert_weights[expert_num]) output[start:end] = out return output class AriaGroupedExpertsGemm(nn.Module): """ Grouped GEMM (General Matrix Multiplication) module for efficient expert computation. This module utilizes the grouped_gemm library (https://github.com/fanshiqing/grouped_gemm) for optimized performance. If the grouped_gemm library is not installed, it gracefully falls back to a sequential GEMM implementation, which may be slower but ensures functionality. Args: in_features (`int`): Number of input features. out_features (`int`): Number of output features. groups (`int`): Number of expert groups. """ def __init__(self, in_features, out_features, groups): super().__init__() self.in_features = in_features self.out_features = out_features self.groups = groups self.weight = nn.Parameter(torch.empty(groups, in_features, out_features)) def forward(self, input, tokens_per_expert): """ Perform grouped matrix multiplication. Args: input (`torch.Tensor`): Input tensor of shape (num_tokens, in_features). tokens_per_expert (`torch.Tensor`): Number of tokens assigned to each expert. Returns: torch.Tensor: Output tensor of shape (num_tokens, out_features). """ return sequential_experts_gemm( input, self.weight, tokens_per_expert.cpu(), ) class AriaGroupedExpertsMLP(nn.Module): """ Grouped MLP module for Mixture of Experts. Args: config (`AriaTextConfig`): Configuration object for the model. """ def __init__(self, config: AriaTextConfig) -> None: super().__init__() self.config = config self.fc1 = AriaGroupedExpertsGemm(config.hidden_size, config.intermediate_size * 2, config.moe_num_experts) self.fc2 = AriaGroupedExpertsGemm(config.intermediate_size, config.hidden_size, config.moe_num_experts) def forward(self, permuted_tokens, tokens_per_expert): """ Forward pass of the Grouped MLP. Args: permuted_tokens (torch.Tensor): Permuted input tokens. tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert. Returns: torch.Tensor: Output tensor after passing through the MLP. """ fc1_output = self.fc1(permuted_tokens, tokens_per_expert) projection, gate = torch.chunk(fc1_output, 2, dim=-1) fc1_output = nn.functional.silu(projection) * gate fc2_output = self.fc2(fc1_output, tokens_per_expert) return fc2_output # Token permutation adapted from https://github.com/NVIDIA/Megatron-LM/blob/54f1f78529cbc2b9cddad313e7f9d96ac0420a27/megatron/core/transformer/moe/token_dispatcher.py#L291-L587 class AriaTextMoELayer(nn.Module): """ Aria Text Mixture of Experts (MoE) Layer. This layer applies a gating mechanism to route input tokens to different experts. Args: config (`AriaTextConfig`): Configuration object for the text component of the model. """ def __init__(self, config: AriaTextConfig): super().__init__() self.router = nn.Linear(config.hidden_size, config.moe_num_experts, bias=False) self.experts = AriaGroupedExpertsMLP(config) self.shared_experts = AriaSharedExpertsMLP(config) self.config = config def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: """ Forward pass of the MoE Layer. Args: hidden_states (`torch.Tensor`): Input tensor of shape (batch_size, sequence_length, hidden_size). Returns: torch.Tensor: Output tensor after passing through the MoE layer. Process: 1. Route tokens to experts using the router. 2. Permute tokens based on routing decisions. 3. Process tokens through experts. 4. Unpermute and combine expert outputs. 5. Add shared expert output to the final result. """ original_shape = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_states.size(-1)) # Top K Routing logits = self.router(hidden_states) top_logits, top_indices = torch.topk(logits, k=self.config.moe_topk, dim=1) scores = nn.functional.softmax(top_logits, dim=-1) original_dtype = top_indices.dtype tokens_per_expert = torch.histc( top_indices.flatten().to(torch.float32), bins=self.config.moe_num_experts, min=0, max=self.config.moe_num_experts - 1, ).to(original_dtype) indices = top_indices # Token permutation flatten_indices = indices.view(-1) sorted_indices = torch.argsort(flatten_indices) permuted_tokens = hidden_states.index_select(0, sorted_indices // self.config.moe_topk) # Process through experts expert_output = self.experts(permuted_tokens, tokens_per_expert) # Token unpermutation unpermuted_tokens = torch.zeros( (scores.shape[0] * self.config.moe_topk, expert_output.size(1)), dtype=expert_output.dtype, device=expert_output.device, ) unpermuted_tokens.index_copy_(0, sorted_indices, expert_output) unpermuted_tokens = unpermuted_tokens.view(-1, self.config.moe_topk, expert_output.size(1)) output = (unpermuted_tokens * scores.unsqueeze(-1)).sum(dim=1).view(original_shape) # Add shared expert output shared_expert_output = self.shared_experts(hidden_states.view(original_shape)) return output + shared_expert_output def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class AriaTextAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: AriaTextConfig, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) def forward( self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class AriaTextDecoderLayer(nn.Module): """ Aria Text Decoder Layer. This class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network. Args: config (`AriaTextConfig`): Configuration object for the text component of the model. layer_idx (`int`): Index of the layer. """ def __init__(self, config: AriaTextConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = AriaTextAttention(config=config, layer_idx=layer_idx) self.mlp = AriaTextMoELayer(config) self.input_layernorm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs class AriaTextPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = AriaConfig base_model_prefix = "model" _no_split_modules = ["AriaTextDecoderLayer", "AriaGroupedExpertsGemm"] supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = False _supports_sdpa = True _supports_cache_class = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, AriaGroupedExpertsGemm): module.weight.data.normal_(mean=0.0, std=std) elif isinstance(module, nn.Conv2d): module.weight.data.normal_(mean=0.0, std=std) if hasattr(module, "bias") and module.bias is not None: module.bias.data.zero_() ARIA_TEXT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`AriaTextConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare Aria Model outputting raw hidden-states without any specific head on top.", ARIA_TEXT_START_DOCSTRING, ) class AriaPreTrainedModel(PreTrainedModel): config_class = AriaTextConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["AriaDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn_2 = True _supports_sdpa = True _supports_flex_attn = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = False # MoE models don't work with torch.compile (dynamic slicing) _supports_attention_backend = False def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, AriaProjector): nn.init.trunc_normal_(module.query, std=std) class AriaTextRotaryEmbedding(nn.Module): def __init__(self, config: AriaTextConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): """ dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) """ seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: # growth inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len) self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset # This .to() is needed if the model has been moved to a device after being initialized (because # the buffer is automatically moved, but not the original copy) self.original_inv_freq = self.original_inv_freq.to(device) self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) # Core RoPE block inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) ARIA_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. Two formats are allowed: - a [`~cache_utils.Cache`] instance, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache); - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy cache format. The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the legacy cache format will be returned. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, this tensor is not affected by padding. It is used to update the cache in the correct position and to infer the complete sequence length. """ @add_start_docstrings( "The bare AriaText Model outputting raw hidden-states without any specific head on top.", ARIA_TEXT_START_DOCSTRING, ) class AriaTextModel(AriaTextPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`AriaTextDecoderLayer`] Args: config: AriaTextConfig """ def __init__(self, config: AriaTextConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [AriaTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = AriaTextRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(ARIA_TEXT_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **flash_attn_kwargs: Unpack[FlashAttentionKwargs], ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers[: self.config.num_hidden_layers]: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **flash_attn_kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) output = BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns, ) return output if return_dict else output.to_tuple() def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ... class AriaTextForCausalLM(AriaTextPreTrainedModel, GenerationMixin): """ Aria model for causal language modeling tasks. This class extends `LlamaForCausalLM` to incorporate the Mixture of Experts (MoE) approach, allowing for more efficient and scalable language modeling. Args: config (`AriaTextConfig`): Configuration object for the model. """ _tied_weights_keys = ["lm_head.weight"] _tp_plan = {"lm_head": "colwise_rep"} config_class = AriaTextConfig def __init__(self, config: AriaTextConfig): super().__init__(config) self.model = AriaTextModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep") @add_start_docstrings_to_model_forward(ARIA_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[KwargsForCausalLM], ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. logits_to_keep (`int` or `torch.Tensor`, *optional*): If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension. This is useful when using packed tensor format (single dimension for batch and sequence length). Returns: Example: ```python >>> from transformers import AutoTokenizer, AriaTextForCausalLM >>> model = AriaTextForCausalLM.from_pretrained("meta-aria_text/AriaText-2-7b-hf") >>> tokenizer = AutoTokenizer.from_pretrained("meta-aria_text/AriaText-2-7b-hf") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @dataclass class AriaCausalLMOutputWithPast(ModelOutput): """ Base class for Aria causal language model (or autoregressive) outputs. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size (batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None ARIA_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor`, *optional*): Input token IDs. pixel_values (`torch.FloatTensor`, *optional*): Pixel values of the images. pixel_mask (`torch.LongTensor`, *optional*): Mask for the pixel values. attention_mask (`torch.Tensor`, *optional*): Attention mask. position_ids (`torch.LongTensor`, *optional*): Position IDs. past_key_values (`List[torch.FloatTensor]`, *optional*): Past key values for efficient processing. inputs_embeds (`torch.FloatTensor`, *optional*): Input embeddings. labels (`torch.LongTensor`, *optional*): Labels for computing the language modeling loss. use_cache (`bool`, *optional*): Whether to use the model's cache mechanism. output_attentions (`bool`, *optional*): Whether to output attention weights. output_hidden_states (`bool`, *optional*): Whether to output hidden states. return_dict (`bool`, *optional*): Whether to return a `ModelOutput` object. logits_to_keep (`int` or `torch.Tensor`, *optional*, defaults to 0): If an `int`, calculate logits for the last `logits_to_keep` tokens, or all `input_ids` if `0`. Otherwise, slice according to the 1D tensor in the sequence length dimension cache_position (`torch.LongTensor`, *optional*): Cache positions. **loss_kwargs: Additional keyword arguments for loss calculation. """ ARIA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (`AriaConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( """Aria model for conditional generation tasks. This model combines a vision tower, a multi-modal projector, and a language model to perform tasks that involve both image and text inputs.""", ARIA_START_DOCSTRING, ) class AriaForConditionalGeneration(AriaPreTrainedModel, GenerationMixin): config_class = AriaConfig _supports_flash_attn_2 = False _supports_sdpa = False _tied_weights_keys = ["language_model.lm_head.weight"] def __init__(self, config: AriaConfig): super().__init__(config) self.vision_tower = AutoModel.from_config(config.vision_config) self.multi_modal_projector = AriaProjector(config) self.vocab_size = config.text_config.vocab_size self.language_model = AutoModelForCausalLM.from_config(config.text_config) self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 self._use_flash_attention_2 = config.text_config._attn_implementation == "flash_attention_2" self.post_init() def _create_patch_attention_mask(self, pixel_mask): if pixel_mask is None: return None patches_subgrid = pixel_mask.unfold( dimension=1, size=self.vision_tower.config.patch_size, step=self.vision_tower.config.patch_size, ) patches_subgrid = patches_subgrid.unfold( dimension=2, size=self.vision_tower.config.patch_size, step=self.vision_tower.config.patch_size, ) return (patches_subgrid.sum(dim=(-1, -2)) > 0).bool() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def get_output_embeddings(self): return self.language_model.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def set_decoder(self, decoder): self.language_model.set_decoder(decoder) def get_decoder(self): return self.language_model.get_decoder() def get_image_features( self, pixel_values: torch.FloatTensor, pixel_mask: torch.FloatTensor = None, vision_feature_layer: int = -1, ): patch_attention_mask = self._create_patch_attention_mask(pixel_mask) image_outputs = self.vision_tower( pixel_values, patch_attention_mask=patch_attention_mask, output_hidden_states=True ) image_attn_mask = None if patch_attention_mask is not None: flattened_mask = patch_attention_mask.flatten(1) image_attn_mask = torch.logical_not(flattened_mask) selected_image_feature = image_outputs.hidden_states[vision_feature_layer] image_features = self.multi_modal_projector(selected_image_feature, attn_mask=image_attn_mask) return image_features @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep") @add_start_docstrings_to_model_forward(ARIA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=AriaCausalLMOutputWithPast, config_class=AriaConfig) def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, pixel_mask: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, logits_to_keep: Union[int, torch.Tensor] = 0, cache_position: Optional[torch.LongTensor] = None, **loss_kwargs, ) -> Union[Tuple, AriaCausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `Idefics3ForConditionalGeneration`). Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import requests >>> import torch >>> from PIL import Image >>> from io import BytesIO >>> from transformers import AutoProcessor, AutoModel >>> from transformers.image_utils import load_image >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg") >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg") >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg") >>> processor = AutoProcessor.from_pretrained("Rhymes-AI/Aria") >>> model = AutoModel.from_pretrained("Rhymes-AI/Aria", torch_dtype=torch.bfloat16, device_map="auto") >>> # Create inputs >>> messages = [ ... { ... "role": "user", ... "content": [ ... {"type": "image"}, ... {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."}, ... {"type": "image"}, ... {"type": "text", "text": "What can we see in this image?"}, ... ] ... }, ... { ... "role": "user", ... "content": [ ... {"type": "image"}, ... {"type": "text", "text": "In which city is that bridge located?"}, ... ] ... } ... ] >>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages] >>> images = [[image1, image2], [image3]] >>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device) >>> # Generate >>> generated_ids = model.generate(**inputs, max_new_tokens=256) >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True) >>> print(generated_texts[0]) Assistant: There are buildings, trees, lights, and water visible in this image. >>> print(generated_texts[1]) Assistant: The bridge is in San Francisco. ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) # 2. Merge text and images if pixel_values is not None and inputs_embeds.shape[1] != 1: if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_index, dtype=torch.long, device=inputs_embeds.device) ) n_image_tokens = (special_image_mask).sum(dim=1).sum(dim=0)[0] else: image_embeds = input_ids == self.config.image_token_index special_image_mask = image_embeds.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) n_image_tokens = (image_embeds).sum(dim=1).sum(dim=0) image_features = self.get_image_features( pixel_values=pixel_values, pixel_mask=pixel_mask, vision_feature_layer=self.config.vision_feature_layer, ) n_images, n_features_per_image = image_features.shape[0], image_features.shape[1] n_image_features = n_images * n_features_per_image if n_image_tokens != n_image_features: raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" ) image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) outputs = self.language_model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, logits_to_keep=logits_to_keep, cache_position=cache_position, ) logits = outputs[0] loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **loss_kwargs ) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return AriaCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, pixel_mask=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs, ): model_inputs = self.language_model.prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs, ) if cache_position[0] == 0: # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore # Otherwise we need pixel values to be passed to model model_inputs["pixel_values"] = pixel_values model_inputs["pixel_mask"] = pixel_mask return model_inputs __all__ = [ "AriaForConditionalGeneration", "AriaPreTrainedModel", "AriaTextPreTrainedModel", "AriaTextModel", "AriaTextForCausalLM", ]
transformers/src/transformers/models/aria/modeling_aria.py/0
{ "file_path": "transformers/src/transformers/models/aria/modeling_aria.py", "repo_id": "transformers", "token_count": 30298 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """AutoProcessor class.""" import importlib import inspect import json import os import warnings from collections import OrderedDict # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...image_processing_utils import ImageProcessingMixin from ...processing_utils import ProcessorMixin from ...tokenization_utils import TOKENIZER_CONFIG_FILE from ...utils import FEATURE_EXTRACTOR_NAME, PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) from .feature_extraction_auto import AutoFeatureExtractor from .image_processing_auto import AutoImageProcessor from .tokenization_auto import AutoTokenizer logger = logging.get_logger(__name__) PROCESSOR_MAPPING_NAMES = OrderedDict( [ ("align", "AlignProcessor"), ("altclip", "AltCLIPProcessor"), ("aria", "AriaProcessor"), ("bark", "BarkProcessor"), ("blip", "BlipProcessor"), ("blip-2", "Blip2Processor"), ("bridgetower", "BridgeTowerProcessor"), ("chameleon", "ChameleonProcessor"), ("chinese_clip", "ChineseCLIPProcessor"), ("clap", "ClapProcessor"), ("clip", "CLIPProcessor"), ("clipseg", "CLIPSegProcessor"), ("clvp", "ClvpProcessor"), ("colpali", "ColPaliProcessor"), ("emu3", "Emu3Processor"), ("flava", "FlavaProcessor"), ("fuyu", "FuyuProcessor"), ("git", "GitProcessor"), ("got_ocr2", "GotOcr2Processor"), ("grounding-dino", "GroundingDinoProcessor"), ("groupvit", "CLIPProcessor"), ("hubert", "Wav2Vec2Processor"), ("idefics", "IdeficsProcessor"), ("idefics2", "Idefics2Processor"), ("idefics3", "Idefics3Processor"), ("instructblip", "InstructBlipProcessor"), ("instructblipvideo", "InstructBlipVideoProcessor"), ("kosmos-2", "Kosmos2Processor"), ("layoutlmv2", "LayoutLMv2Processor"), ("layoutlmv3", "LayoutLMv3Processor"), ("llava", "LlavaProcessor"), ("llava_next", "LlavaNextProcessor"), ("llava_next_video", "LlavaNextVideoProcessor"), ("llava_onevision", "LlavaOnevisionProcessor"), ("markuplm", "MarkupLMProcessor"), ("mctct", "MCTCTProcessor"), ("mgp-str", "MgpstrProcessor"), ("mllama", "MllamaProcessor"), ("moonshine", "Wav2Vec2Processor"), ("oneformer", "OneFormerProcessor"), ("owlv2", "Owlv2Processor"), ("owlvit", "OwlViTProcessor"), ("paligemma", "PaliGemmaProcessor"), ("pix2struct", "Pix2StructProcessor"), ("pixtral", "PixtralProcessor"), ("pop2piano", "Pop2PianoProcessor"), ("qwen2_5_vl", "Qwen2_5_VLProcessor"), ("qwen2_audio", "Qwen2AudioProcessor"), ("qwen2_vl", "Qwen2VLProcessor"), ("sam", "SamProcessor"), ("seamless_m4t", "SeamlessM4TProcessor"), ("sew", "Wav2Vec2Processor"), ("sew-d", "Wav2Vec2Processor"), ("siglip", "SiglipProcessor"), ("speech_to_text", "Speech2TextProcessor"), ("speech_to_text_2", "Speech2Text2Processor"), ("speecht5", "SpeechT5Processor"), ("trocr", "TrOCRProcessor"), ("tvlt", "TvltProcessor"), ("tvp", "TvpProcessor"), ("udop", "UdopProcessor"), ("unispeech", "Wav2Vec2Processor"), ("unispeech-sat", "Wav2Vec2Processor"), ("video_llava", "VideoLlavaProcessor"), ("vilt", "ViltProcessor"), ("vipllava", "LlavaProcessor"), ("vision-text-dual-encoder", "VisionTextDualEncoderProcessor"), ("wav2vec2", "Wav2Vec2Processor"), ("wav2vec2-bert", "Wav2Vec2Processor"), ("wav2vec2-conformer", "Wav2Vec2Processor"), ("wavlm", "Wav2Vec2Processor"), ("whisper", "WhisperProcessor"), ("xclip", "XCLIPProcessor"), ] ) PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, PROCESSOR_MAPPING_NAMES) def processor_class_from_name(class_name: str): for module_name, processors in PROCESSOR_MAPPING_NAMES.items(): if class_name in processors: module_name = model_type_to_module_name(module_name) module = importlib.import_module(f".{module_name}", "transformers.models") try: return getattr(module, class_name) except AttributeError: continue for processor in PROCESSOR_MAPPING._extra_content.values(): if getattr(processor, "__name__", None) == class_name: return processor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. main_module = importlib.import_module("transformers") if hasattr(main_module, class_name): return getattr(main_module, class_name) return None class AutoProcessor: r""" This is a generic processor class that will be instantiated as one of the processor classes of the library when created with the [`AutoProcessor.from_pretrained`] class method. This class cannot be instantiated directly using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoProcessor is designed to be instantiated " "using the `AutoProcessor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(PROCESSOR_MAPPING_NAMES) def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate one of the processor classes of the library from a pretrained model vocabulary. The processor class to instantiate is selected based on the `model_type` property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible): List options Params: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on huggingface.co. - a path to a *directory* containing a processor files saved using the `save_pretrained()` method, e.g., `./my_model_directory/`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model feature extractor should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the feature extractor files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. return_unused_kwargs (`bool`, *optional*, defaults to `False`): If `False`, then this function returns just the final feature extractor object. If `True`, then this functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. kwargs (`Dict[str, Any]`, *optional*): The values in kwargs of any keys which are feature extractor attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is controlled by the `return_unused_kwargs` keyword parameter. <Tip> Passing `token=True` is required when you want to use a private model. </Tip> Examples: ```python >>> from transformers import AutoProcessor >>> # Download processor from huggingface.co and cache. >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") >>> # If processor files are in a directory (e.g. processor was saved using *save_pretrained('./test/saved_model/')*) >>> # processor = AutoProcessor.from_pretrained("./test/saved_model/") ```""" use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token", None) is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token config = kwargs.pop("config", None) trust_remote_code = kwargs.pop("trust_remote_code", None) kwargs["_from_auto"] = True processor_class = None processor_auto_map = None # First, let's see if we have a processor or preprocessor config. # Filter the kwargs for `get_file_from_repo`. get_file_from_repo_kwargs = { key: kwargs[key] for key in inspect.signature(get_file_from_repo).parameters.keys() if key in kwargs } # Let's start by checking whether the processor class is saved in a processor config processor_config_file = get_file_from_repo( pretrained_model_name_or_path, PROCESSOR_NAME, **get_file_from_repo_kwargs ) if processor_config_file is not None: config_dict, _ = ProcessorMixin.get_processor_dict(pretrained_model_name_or_path, **kwargs) processor_class = config_dict.get("processor_class", None) if "AutoProcessor" in config_dict.get("auto_map", {}): processor_auto_map = config_dict["auto_map"]["AutoProcessor"] if processor_class is None: # If not found, let's check whether the processor class is saved in an image processor config preprocessor_config_file = get_file_from_repo( pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, **get_file_from_repo_kwargs ) if preprocessor_config_file is not None: config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs) processor_class = config_dict.get("processor_class", None) if "AutoProcessor" in config_dict.get("auto_map", {}): processor_auto_map = config_dict["auto_map"]["AutoProcessor"] # If not found, let's check whether the processor class is saved in a feature extractor config if preprocessor_config_file is not None and processor_class is None: config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict( pretrained_model_name_or_path, **kwargs ) processor_class = config_dict.get("processor_class", None) if "AutoProcessor" in config_dict.get("auto_map", {}): processor_auto_map = config_dict["auto_map"]["AutoProcessor"] if processor_class is None: # Next, let's check whether the processor class is saved in a tokenizer tokenizer_config_file = get_file_from_repo( pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, **get_file_from_repo_kwargs ) if tokenizer_config_file is not None: with open(tokenizer_config_file, encoding="utf-8") as reader: config_dict = json.load(reader) processor_class = config_dict.get("processor_class", None) if "AutoProcessor" in config_dict.get("auto_map", {}): processor_auto_map = config_dict["auto_map"]["AutoProcessor"] if processor_class is None: # Otherwise, load config, if it can be loaded. if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained( pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs ) # And check if the config contains the processor class. processor_class = getattr(config, "processor_class", None) if hasattr(config, "auto_map") and "AutoProcessor" in config.auto_map: processor_auto_map = config.auto_map["AutoProcessor"] if processor_class is not None: processor_class = processor_class_from_name(processor_class) has_remote_code = processor_auto_map is not None has_local_code = processor_class is not None or type(config) in PROCESSOR_MAPPING trust_remote_code = resolve_trust_remote_code( trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code ) if has_remote_code and trust_remote_code: processor_class = get_class_from_dynamic_module( processor_auto_map, pretrained_model_name_or_path, **kwargs ) _ = kwargs.pop("code_revision", None) if os.path.isdir(pretrained_model_name_or_path): processor_class.register_for_auto_class() return processor_class.from_pretrained( pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs ) elif processor_class is not None: return processor_class.from_pretrained( pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs ) # Last try: we use the PROCESSOR_MAPPING. elif type(config) in PROCESSOR_MAPPING: return PROCESSOR_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, **kwargs) # At this stage, there doesn't seem to be a `Processor` class available for this model, so let's try a # tokenizer. try: return AutoTokenizer.from_pretrained( pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs ) except Exception: try: return AutoImageProcessor.from_pretrained( pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs ) except Exception: pass try: return AutoFeatureExtractor.from_pretrained( pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs ) except Exception: pass raise ValueError( f"Unrecognized processing class in {pretrained_model_name_or_path}. Can't instantiate a processor, a " "tokenizer, an image processor or a feature extractor for this model. Make sure the repository contains " "the files of at least one of those processing classes." ) @staticmethod def register(config_class, processor_class, exist_ok=False): """ Register a new processor for this class. Args: config_class ([`PretrainedConfig`]): The configuration corresponding to the model to register. processor_class ([`FeatureExtractorMixin`]): The processor to register. """ PROCESSOR_MAPPING.register(config_class, processor_class, exist_ok=exist_ok)
transformers/src/transformers/models/auto/processing_auto.py/0
{ "file_path": "transformers/src/transformers/models/auto/processing_auto.py", "repo_id": "transformers", "token_count": 7522 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for BLIP.""" from ...image_processing_utils_fast import BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, BaseImageProcessorFast from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling from ...utils import add_start_docstrings @add_start_docstrings( "Constructs a fast BLIP image processor.", BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, ) class BlipImageProcessorFast(BaseImageProcessorFast): # To be checked against the slow image processor # None values left after checking can be removed resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {"height": 384, "width": 384} do_resize = True do_rescale = True do_normalize = True do_convert_rgb = True __all__ = ["BlipImageProcessorFast"]
transformers/src/transformers/models/blip/image_processing_blip_fast.py/0
{ "file_path": "transformers/src/transformers/models/blip/image_processing_blip_fast.py", "repo_id": "transformers", "token_count": 461 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Bloom.""" import pickle from typing import Optional, Tuple from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"tokenizer_file": "tokenizer.json"} class BloomTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" Bloom tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import BloomTokenizerFast >>> tokenizer = BloomTokenizerFast.from_pretrained("bigscience/bloom") >>> tokenizer("Hello world")["input_ids"] [59414, 8876] >>> tokenizer(" Hello world")["input_ids"] [86153, 8876] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. unk_token (`str`, *optional*, defaults to `<|endoftext|>`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `<|endoftext|>`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `<|endoftext|>`): The end of sequence token. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (Bloom tokenizer detect beginning of words by the preceding space). trim_offsets (`bool`, *optional*, defaults to `True`): Whether or not the post-processing step should trim offsets to avoid including whitespaces. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = None # No `max_model_input_sizes` as BLOOM uses ALiBi positional embeddings def __init__( self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token="<unk>", bos_token="<s>", eos_token="</s>", pad_token="<pad>", add_prefix_space=False, clean_up_tokenization_spaces=False, **kwargs, ): super().__init__( vocab_file=vocab_file, merges_file=merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) # TODO @ArthurZucker this can only work one way for now, to update later-on. Tests should also properly # check this as they were green before. pre_tok_state = pickle.dumps(self.backend_tokenizer.pre_tokenizer) decoder_state = pickle.dumps(self.backend_tokenizer.decoder) if add_prefix_space: pre_tok_state = pre_tok_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true') decoder_state = decoder_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true') self.backend_tokenizer.pre_tokenizer = pickle.loads(pre_tok_state) self.backend_tokenizer.decoder = pickle.loads(decoder_state) self.add_prefix_space = add_prefix_space def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get("is_split_into_words", False) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with" " pretokenized inputs." ) return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get("is_split_into_words", False) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with" " pretokenized inputs." ) return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) __all__ = ["BloomTokenizerFast"]
transformers/src/transformers/models/bloom/tokenization_bloom_fast.py/0
{ "file_path": "transformers/src/transformers/models/bloom/tokenization_bloom_fast.py", "repo_id": "transformers", "token_count": 2423 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Feature extractor class for CLVP """ from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging logger = logging.get_logger(__name__) class ClvpFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a CLVP feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts log-mel-spectrogram features from raw speech using a custom numpy implementation of the `Short Time Fourier Transform` which should match pytorch's `torch.stft` equivalent. Args: feature_size (`int`, *optional*, defaults to 80): The feature dimension of the extracted features. sampling_rate (`int`, *optional*, defaults to 22050): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). default_audio_length (`int`, *optional*, defaults to 6): The default length of raw audio in seconds. If `max_length` is not set during `__call__` then it will automatically be set to default_audio_length * `self.sampling_rate`. hop_length (`int`, *optional*, defaults to 256): Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients. chunk_length (`int`, *optional*, defaults to 30): The maximum number of chuncks of `sampling_rate` samples used to trim and pad longer or shorter audio sequences. n_fft (`int`, *optional*, defaults to 1024): Size of the Fourier transform. padding_value (`float`, *optional*, defaults to 0.0): Padding value used to pad the audio. Should correspond to silences. mel_norms (`list` of length `feature_size`, *optional*): If `mel_norms` is provided then it will be used to normalize the log-mel spectrograms along each mel-filter. return_attention_mask (`bool`, *optional*, defaults to `False`): Whether to return the attention mask. If left to the default, it will return the attention mask. [What are attention masks?](../glossary#attention-mask) """ model_input_names = ["input_features", "attention_mask"] def __init__( self, feature_size=80, sampling_rate=22050, default_audio_length=6, hop_length=256, chunk_length=30, n_fft=1024, padding_value=0.0, mel_norms=None, return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask **kwargs, ): super().__init__( feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, return_attention_mask=return_attention_mask, **kwargs, ) self.n_fft = n_fft self.hop_length = hop_length self.chunk_length = chunk_length self.n_samples = chunk_length * sampling_rate self.nb_max_frames = self.n_samples // hop_length self.sampling_rate = sampling_rate self.default_audio_length = default_audio_length self.mel_norms = mel_norms self.mel_filters = mel_filter_bank( num_frequency_bins=1 + (n_fft // 2), num_mel_filters=feature_size, min_frequency=0.0, max_frequency=8000.0, sampling_rate=sampling_rate, norm="slaney", mel_scale="htk", ) def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray: """ This method first computes the log-mel spectrogram of the provided audio then applies normalization along the each mel-filterbank, if `mel_norms` is provided. """ log_spec = spectrogram( waveform, window_function(self.n_fft, "hann"), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters, log_mel=None, ) log_spec = np.log(np.clip(log_spec, a_min=1e-5, a_max=None)) if self.mel_norms is not None: log_spec = log_spec / np.array(self.mel_norms)[:, None] return log_spec def __call__( self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], sampling_rate: Optional[int] = None, truncation: bool = True, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_attention_mask: Optional[bool] = True, padding: Optional[str] = "max_length", max_length: Optional[int] = None, **kwargs, ) -> BatchFeature: """ `ClvpFeatureExtractor` is used to extract various voice specific properties such as the pitch and tone of the voice, speaking speed, and even speaking defects like a lisp or stuttering from a sample voice or `raw_speech`. First the voice is padded or truncated in a way such that it becomes a waveform of `self.default_audio_length` seconds long and then the log-mel spectrogram is extracted from it. Args: raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition pipeline. truncation (`bool`, *optional*, default to `True`): Activates truncation to cut input sequences longer than *max_length* to *max_length*. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. return_attention_mask (`bool`, *optional*, defaults to `True`): Whether to return the attention mask. If left to the default, it will return the attention mask. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values / vectors. max_length (`int`, *optional*): The maximum input length of the inputs. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) if is_batched: raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech] elif not is_batched and not isinstance(raw_speech, np.ndarray): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) # always return batch if not is_batched: raw_speech = [np.asarray([raw_speech]).T] batched_speech = BatchFeature({"input_features": raw_speech}) max_length = self.default_audio_length * self.sampling_rate if max_length is None else max_length padded_inputs = self.pad( batched_speech, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) # make sure list is in array format input_features = padded_inputs.get("input_features").transpose(2, 0, 1) input_features = [ self._np_extract_fbank_features(waveform).astype(np.float32) for waveform in input_features[0] ] if isinstance(input_features[0], List): padded_inputs["input_features"] = [np.asarray(feature) for feature in input_features] else: padded_inputs["input_features"] = input_features return padded_inputs.convert_to_tensors(return_tensors) __all__ = ["ClvpFeatureExtractor"]
transformers/src/transformers/models/clvp/feature_extraction_clvp.py/0
{ "file_path": "transformers/src/transformers/models/clvp/feature_extraction_clvp.py", "repo_id": "transformers", "token_count": 4470 }
# coding=utf-8 # Copyright 2024 Cohere team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is based on the LLama model definition file in transformers """PyTorch Cohere model.""" from typing import Callable, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...cache_utils import Cache from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import CausalLMOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import LossKwargs, logging from ..llama.modeling_llama import ( LlamaAttention, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRotaryEmbedding, eager_attention_forward, ) from .configuration_cohere import CohereConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "CohereConfig" class CohereLayerNorm(nn.Module): def __init__(self, hidden_size=None, eps=1e-5, bias=False): """The hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dim""" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) mean = hidden_states.mean(-1, keepdim=True) variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True) hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon) hidden_states = self.weight.to(torch.float32) * hidden_states return hidden_states.to(input_dtype) ALL_LAYERNORM_LAYERS.append(CohereLayerNorm) class CohereRotaryEmbedding(LlamaRotaryEmbedding): @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) # Core RoPE block inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.repeat_interleave(freqs, 2, dim=-1) # diff from Llama: we interleave() instead of cat() cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) def rotate_half(x): # Split and rotate. Note that this function is different from e.g. Llama. x1 = x[..., ::2] x2 = x[..., 1::2] rot_x = torch.stack([-x2, x1], dim=-1).flatten(-2) return rot_x def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ dtype = q.dtype q = q.float() k = k.float() cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed.to(dtype=dtype), k_embed.to(dtype=dtype) class CohereMLP(LlamaMLP): def __init__(self, config): super().__init__(config) self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) class CohereAttention(LlamaAttention): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: CohereConfig, layer_idx: Optional[int] = None): super().__init__(config, layer_idx) self.use_qk_norm = config.use_qk_norm if self.use_qk_norm: # When sharding the model using Tensor Parallelism, need to be careful to use n_local_heads self.q_norm = CohereLayerNorm( hidden_size=(config.num_attention_heads, self.head_dim), eps=config.layer_norm_eps ) self.k_norm = CohereLayerNorm( hidden_size=(config.num_key_value_heads, self.head_dim), eps=config.layer_norm_eps ) def forward( self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape) key_states = self.k_proj(hidden_states).view(hidden_shape) value_states = self.v_proj(hidden_states).view(hidden_shape) if self.use_qk_norm: # main diff from Llama query_states = self.q_norm(query_states) key_states = self.k_norm(key_states) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; position_ids needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class CohereDecoderLayer(nn.Module): def __init__(self, config: CohereConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = CohereAttention(config=config, layer_idx=layer_idx) self.mlp = CohereMLP(config) self.input_layernorm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states_attention, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) # Fully Connected hidden_states_mlp = self.mlp(hidden_states) # Add everything together hidden_states = residual + hidden_states_attention + hidden_states_mlp outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs class CohereModel(LlamaModel): def __init__(self, config: CohereConfig): super().__init__(config) self.layers = nn.ModuleList( [CohereDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.rotary_emb = CohereRotaryEmbedding(config=config) self.norm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps) class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ... class CohereForCausalLM(LlamaForCausalLM): def __init__(self, config): super().__init__(config) self.model = CohereModel(config) self.logit_scale = config.logit_scale self.tie_word_embeddings = config.tie_word_embeddings def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[KwargsForCausalLM], ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. logits_to_keep (`int` or `torch.Tensor`, *optional*): If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension. This is useful when using packed tensor format (single dimension for batch and sequence length). Returns: Example: ```python >> from transformers import AutoTokenizer, CohereForCausalLM >> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01") >> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01") >> prompt = "Hey, are you conscious? Can you talk to me?" >> inputs = tokenizer(prompt, return_tensors="pt") >> # Generate >> generate_ids = model.generate(inputs.input_ids, max_length=30) >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) logits = logits * self.logit_scale # main diff from Llama loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "CohereForCausalLM", "CohereModel", "CoherePreTrainedModel", # noqa: F822 ]
transformers/src/transformers/models/cohere/modular_cohere.py/0
{ "file_path": "transformers/src/transformers/models/cohere/modular_cohere.py", "repo_id": "transformers", "token_count": 7530 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Conditional DETR.""" import io import pathlib from collections import defaultdict from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_processing_utils import BaseImageProcessor, get_size_dict from ...image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, id_to_rgb, pad, rescale, resize, rgb_to_id, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, AnnotationType, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_annotations, validate_kwargs, validate_preprocess_arguments, ) from ...utils import ( TensorType, is_flax_available, is_jax_tensor, is_scipy_available, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_vision_available, logging, ) if is_torch_available(): import torch from torch import nn if is_vision_available(): import PIL if is_scipy_available(): import scipy.special import scipy.stats logger = logging.get_logger(__name__) # pylint: disable=invalid-name SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ height, width = image_size raw_size = None if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: raw_size = max_size * min_original_size / max_original_size size = int(round(raw_size)) if (height <= width and height == size) or (width <= height and width == size): oh, ow = height, width elif width < height: ow = size if max_size is not None and raw_size is not None: oh = int(raw_size * height / width) else: oh = int(size * height / width) else: oh = size if max_size is not None and raw_size is not None: ow = int(raw_size * width / height) else: ow = int(size * width / height) return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. If the desired output size is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output image size is computed by keeping the aspect ratio of the input image size. Args: input_image (`np.ndarray`): The image to resize. size (`int` or `Tuple[int, int]` or `List[int]`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ image_size = get_image_size(input_image, input_data_format) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) # Copied from transformers.models.detr.image_processing_detr.get_image_size_for_max_height_width def get_image_size_for_max_height_width( input_image: np.ndarray, max_height: int, max_width: int, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[int, int]: """ Computes the output image size given the input image and the maximum allowed height and width. Keep aspect ratio. Important, even if image_height < max_height and image_width < max_width, the image will be resized to at least one of the edges be equal to max_height or max_width. For example: - input_size: (100, 200), max_height: 50, max_width: 50 -> output_size: (25, 50) - input_size: (100, 200), max_height: 200, max_width: 500 -> output_size: (200, 400) Args: input_image (`np.ndarray`): The image to resize. max_height (`int`): The maximum allowed height. max_width (`int`): The maximum allowed width. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ image_size = get_image_size(input_image, input_data_format) height, width = image_size height_scale = max_height / height width_scale = max_width / width min_scale = min(height_scale, width_scale) new_height = int(height * min_scale) new_width = int(width * min_scale) return new_height, new_width # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn def get_numpy_to_framework_fn(arr) -> Callable: """ Returns a function that converts a numpy array to the framework of the input array. Args: arr (`np.ndarray`): The array to convert. """ if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f"Cannot convert arrays of type {type(arr)}") # Copied from transformers.models.detr.image_processing_detr.safe_squeeze def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """ Squeezes an array, but only if the axis specified has dim 1. """ if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr # Copied from transformers.models.detr.image_processing_detr.normalize_annotation def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width( images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> List[int]: """ Get the maximum height and width across all images in a batch. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if input_data_format == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_data_format == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_data_format}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask( image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: """ Convert a COCO polygon annotation to a mask. Args: segmentations (`List[List[float]]`): List of polygons, each polygon represented by a list of x-y coordinates. height (`int`): Height of the mask. width (`int`): Width of the mask. """ try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->ConditionalDetr def prepare_coco_detection_annotation( image, target, return_segmentation_masks: bool = False, input_data_format: Optional[Union[ChannelDimension, str]] = None, ): """ Convert the target in COCO format into the format expected by ConditionalDetr. """ image_height, image_width = get_image_size(image, channel_dim=input_data_format) image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # Get all COCO annotations for the given image. annotations = target["annotations"] annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] classes = [obj["category_id"] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) # for conversion to coco api area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj["bbox"] for obj in annotations] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target["image_id"] = image_id new_target["class_labels"] = classes[keep] new_target["boxes"] = boxes[keep] new_target["area"] = area[keep] new_target["iscrowd"] = iscrowd[keep] new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and "keypoints" in annotations[0]: keypoints = [obj["keypoints"] for obj in annotations] # Converting the filtered keypoints list to a numpy array keypoints = np.asarray(keypoints, dtype=np.float32) # Apply the keep mask here to filter the relevant annotations keypoints = keypoints[keep] num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints if return_segmentation_masks: segmentation_masks = [obj["segmentation"] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target["masks"] = masks[keep] return new_target # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes def masks_to_boxes(masks: np.ndarray) -> np.ndarray: """ Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->ConditionalDetr def prepare_coco_panoptic_annotation( image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True, input_data_format: Union[ChannelDimension, str] = None, ) -> Dict: """ Prepare a coco panoptic annotation for ConditionalDetr. """ image_height, image_width = get_image_size(image, channel_dim=input_data_format) annotation_path = pathlib.Path(masks_path) / target["file_name"] new_target = {} new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) if "segments_info" in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target["masks"] = masks new_target["boxes"] = masks_to_boxes(masks) new_target["class_labels"] = np.array( [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["iscrowd"] = np.asarray( [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["area"] = np.asarray( [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 ) return new_target # Copied from transformers.models.detr.image_processing_detr.get_segmentation_image def get_segmentation_image( masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False ): h, w = input_size final_h, final_w = target_size m_id = scipy.special.softmax(masks.transpose(0, 1), -1) if m_id.shape[-1] == 0: # We didn't detect any mask :( m_id = np.zeros((h, w), dtype=np.int64) else: m_id = m_id.argmax(-1).reshape(h, w) if deduplicate: # Merge the masks corresponding to the same stuff class for equiv in stuff_equiv_classes.values(): for eq_id in equiv: m_id[m_id == eq_id] = equiv[0] seg_img = id_to_rgb(m_id) seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST) return seg_img # Copied from transformers.models.detr.image_processing_detr.get_mask_area def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray: final_h, final_w = target_size np_seg_img = seg_img.astype(np.uint8) np_seg_img = np_seg_img.reshape(final_h, final_w, 3) m_id = rgb_to_id(np_seg_img) area = [(m_id == i).sum() for i in range(n_classes)] return area # Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: probs = scipy.special.softmax(logits, axis=-1) labels = probs.argmax(-1, keepdims=True) scores = np.take_along_axis(probs, labels, axis=-1) scores, labels = scores.squeeze(-1), labels.squeeze(-1) return scores, labels # Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample with DetrForSegmentation->ConditionalDetrForSegmentation def post_process_panoptic_sample( out_logits: np.ndarray, masks: np.ndarray, boxes: np.ndarray, processed_size: Tuple[int, int], target_size: Tuple[int, int], is_thing_map: Dict, threshold=0.85, ) -> Dict: """ Converts the output of [`ConditionalDetrForSegmentation`] into panoptic segmentation predictions for a single sample. Args: out_logits (`torch.Tensor`): The logits for this sample. masks (`torch.Tensor`): The predicted segmentation masks for this sample. boxes (`torch.Tensor`): The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y, width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding). processed_size (`Tuple[int, int]`): The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size after data augmentation but before batching. target_size (`Tuple[int, int]`): The target size of the image, `(height, width)` corresponding to the requested final size of the prediction. is_thing_map (`Dict`): A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not. threshold (`float`, *optional*, defaults to 0.85): The threshold used to binarize the segmentation masks. """ # we filter empty queries and detection below threshold scores, labels = score_labels_from_class_probabilities(out_logits) keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold) cur_scores = scores[keep] cur_classes = labels[keep] cur_boxes = center_to_corners_format(boxes[keep]) if len(cur_boxes) != len(cur_classes): raise ValueError("Not as many boxes as there are classes") cur_masks = masks[keep] cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR) cur_masks = safe_squeeze(cur_masks, 1) b, h, w = cur_masks.shape # It may be that we have several predicted masks for the same stuff class. # In the following, we track the list of masks ids for each stuff class (they are merged later on) cur_masks = cur_masks.reshape(b, -1) stuff_equiv_classes = defaultdict(list) for k, label in enumerate(cur_classes): if not is_thing_map[label]: stuff_equiv_classes[label].append(k) seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores)) # We filter out any mask that is too small if cur_classes.size() > 0: # We know filter empty masks as long as we find some filtered_small = np.array([a <= 4 for a in area], dtype=bool) while filtered_small.any(): cur_masks = cur_masks[~filtered_small] cur_scores = cur_scores[~filtered_small] cur_classes = cur_classes[~filtered_small] seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores)) filtered_small = np.array([a <= 4 for a in area], dtype=bool) else: cur_classes = np.ones((1, 1), dtype=np.int64) segments_info = [ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a} for i, (cat, a) in enumerate(zip(cur_classes, area)) ] del cur_classes with io.BytesIO() as out: PIL.Image.fromarray(seg_img).save(out, format="PNG") predictions = {"png_string": out.getvalue(), "segments_info": segments_info} return predictions # Copied from transformers.models.detr.image_processing_detr.resize_annotation def resize_annotation( annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float = 0.5, resample: PILImageResampling = PILImageResampling.NEAREST, ): """ Resizes an annotation to a target size. Args: annotation (`Dict[str, Any]`): The annotation dictionary. orig_size (`Tuple[int, int]`): The original size of the input image. target_size (`Tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): The resampling filter to use when resizing the masks. """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) ratio_height, ratio_width = ratios new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments class ConditionalDetrImageProcessor(BaseImageProcessor): r""" Constructs a Conditional Detr image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_annotations (`bool`, *optional*, defaults to `True`): Controls whether to convert the annotations to the format expected by the DETR model. Converts the bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`. Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. If `True`, padding will be applied to the bottom and right of the image with zeros. If `pad_size` is provided, the image will be padded to the specified dimensions. Otherwise, the image will be padded to the maximum height and width of the batch. pad_size (`Dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """ model_input_names = ["pixel_values", "pixel_mask"] # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__ def __init__( self, format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, do_convert_annotations: Optional[bool] = None, do_pad: bool = True, pad_size: Optional[Dict[str, int]] = None, **kwargs, ) -> None: if "pad_and_return_pixel_mask" in kwargs: do_pad = kwargs.pop("pad_and_return_pixel_mask") if "max_size" in kwargs: logger.warning_once( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", ) max_size = kwargs.pop("max_size") else: max_size = None if size is None else 1333 size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} size = get_size_dict(size, max_size=max_size, default_to_square=False) # Backwards compatibility if do_convert_annotations is None: do_convert_annotations = do_normalize super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.do_convert_annotations = do_convert_annotations self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad self.pad_size = pad_size self._valid_processor_keys = [ "images", "annotations", "return_segmentation_masks", "masks_path", "do_resize", "size", "resample", "do_rescale", "rescale_factor", "do_normalize", "do_convert_annotations", "image_mean", "image_std", "do_pad", "pad_size", "format", "return_tensors", "data_format", "input_data_format", ] @classmethod # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->ConditionalDetr def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `ConditionalDetrImageProcessor.from_pretrained(checkpoint, size=600, max_size=800)` """ image_processor_dict = image_processor_dict.copy() if "max_size" in kwargs: image_processor_dict["max_size"] = kwargs.pop("max_size") if "pad_and_return_pixel_mask" in kwargs: image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask") return super().from_dict(image_processor_dict, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->ConditionalDetr def prepare_annotation( self, image: np.ndarray, target: Dict, format: Optional[AnnotationFormat] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Dict: """ Prepare an annotation for feeding into ConditionalDetr model. """ format = format if format is not None else self.format if format == AnnotationFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation( image, target, return_segmentation_masks, input_data_format=input_data_format ) elif format == AnnotationFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation( image, target, masks_path=masks_path, return_masks=return_segmentation_masks, input_data_format=input_data_format, ) else: raise ValueError(f"Format {format} is not supported.") return target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the image's `(height, width)` dimensions after resizing. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if "max_size" in kwargs: logger.warning_once( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: new_size = get_resize_output_image_size( image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format ) elif "max_height" in size and "max_width" in size: new_size = get_image_size_for_max_height_width( image, size["max_height"], size["max_width"], input_data_format=input_data_format ) elif "height" in size and "width" in size: new_size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = resize( image, size=new_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation def resize_annotation( self, annotation, orig_size, size, resample: PILImageResampling = PILImageResampling.NEAREST, ) -> Dict: """ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. """ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: """ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format and from absolute to relative pixel values. """ return normalize_annotation(annotation, image_size=image_size) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image def _update_annotation_for_padded_image( self, annotation: Dict, input_image_size: Tuple[int, int], output_image_size: Tuple[int, int], padding, update_bboxes, ) -> Dict: """ Update the annotation for a padded image. """ new_annotation = {} new_annotation["size"] = output_image_size for key, value in annotation.items(): if key == "masks": masks = value masks = pad( masks, padding, mode=PaddingMode.CONSTANT, constant_values=0, input_data_format=ChannelDimension.FIRST, ) masks = safe_squeeze(masks, 1) new_annotation["masks"] = masks elif key == "boxes" and update_bboxes: boxes = value boxes *= np.asarray( [ input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0], input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0], ] ) new_annotation["boxes"] = boxes elif key == "size": new_annotation["size"] = output_image_size else: new_annotation[key] = value return new_annotation # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], annotation: Optional[Dict[str, Any]] = None, constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, update_bboxes: bool = True, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) if annotation is not None: annotation = self._update_annotation_for_padded_image( annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes ) return padded_image, annotation # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, update_bboxes: bool = True, pad_size: Optional[Dict[str, int]] = None, ) -> BatchFeature: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: images (List[`np.ndarray`]): Images to pad. annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): Annotations to transform according to the padding that is applied to the images. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. update_bboxes (`bool`, *optional*, defaults to `True`): Whether to update the bounding boxes in the annotations to match the padded images. If the bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)` format, the bounding boxes will not be updated. pad_size (`Dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """ pad_size = pad_size if pad_size is not None else self.pad_size if pad_size is not None: padded_size = (pad_size["height"], pad_size["width"]) else: padded_size = get_max_height_width(images, input_data_format=input_data_format) annotation_list = annotations if annotations is not None else [None] * len(images) padded_images = [] padded_annotations = [] for image, annotation in zip(images, annotation_list): padded_image, padded_annotation = self._pad_image( image, padded_size, annotation, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, update_bboxes=update_bboxes, ) padded_images.append(padded_image) padded_annotations.append(padded_annotation) data = {"pixel_values": padded_images} if return_pixel_mask: masks = [ make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images ] data["pixel_mask"] = masks encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations ] return encoded_inputs # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess def preprocess( self, images: ImageInput, annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample=None, # PILImageResampling do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, do_convert_annotations: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, format: Optional[Union[str, AnnotationFormat]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, pad_size: Optional[Dict[str, int]] = None, **kwargs, ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): List of annotations associated with the image or batch of images. If annotation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to self.size): Size of the image's `(height, width)` dimensions after resizing. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations): Whether to convert the annotations to the format expected by the model. Converts the bounding boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)` and in relative coordinates. image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. If `True`, padding will be applied to the bottom and right of the image with zeros. If `pad_size` is provided, the image will be padded to the specified dimensions. Otherwise, the image will be padded to the maximum height and width of the batch. format (`str` or `AnnotationFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. pad_size (`Dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """ if "pad_and_return_pixel_mask" in kwargs: logger.warning_once( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, " "use `do_pad` instead." ) do_pad = kwargs.pop("pad_and_return_pixel_mask") max_size = None if "max_size" in kwargs: logger.warning_once( "The `max_size` argument is deprecated and will be removed in a future version, use" " `size['longest_edge']` instead." ) size = kwargs.pop("max_size") do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, max_size=max_size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_convert_annotations = ( self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations ) do_pad = self.do_pad if do_pad is None else do_pad pad_size = self.pad_size if pad_size is None else pad_size format = self.format if format is None else format images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) # Here, the pad() method pads to the maximum of (width, height). It does not need to be validated. validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) if annotations is not None and isinstance(annotations, dict): annotations = [annotations] if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) format = AnnotationFormat(format) if annotations is not None: validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations) if ( masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and not isinstance(masks_path, (pathlib.Path, str)) ): raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" f" `pathlib.Path` or string object, but is {type(masks_path)} instead." ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: prepared_images = [] prepared_annotations = [] for image, target in zip(images, annotations): target = self.prepare_annotation( image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format, ) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations # transformations if do_resize: if annotations is not None: resized_images, resized_annotations = [], [] for image, target in zip(images, annotations): orig_size = get_image_size(image, input_data_format) resized_image = self.resize( image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format ) resized_annotation = self.resize_annotation( target, orig_size, get_image_size(resized_image, input_data_format) ) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [ self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images ] if do_convert_annotations and annotations is not None: annotations = [ self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for annotation, image in zip(annotations, images) ] if do_pad: # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} encoded_inputs = self.pad( images, annotations=annotations, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format, update_bboxes=do_convert_annotations, return_tensors=return_tensors, pad_size=pad_size, ) else: images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs # POSTPROCESSING METHODS - TODO: add support for other frameworks def post_process(self, outputs, target_sizes): """ Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the Pascal VOC format (xmin, ymin, xmax, ymax). Only supports PyTorch. Args: outputs ([`ConditionalDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ logging.warning_once( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.", ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor") labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results # Copied from transformers.models.deformable_detr.image_processing_deformable_detr.DeformableDetrImageProcessor.post_process_object_detection with DeformableDetr->ConditionalDetr def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, top_k: int = 100 ): """ Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. top_k (`int`, *optional*, defaults to 100): Keep only top k bounding boxes before filtering by thresholding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() prob = prob.view(out_logits.shape[0], -1) k_value = min(top_k, prob.size(1)) topk_values, topk_indexes = torch.topk(prob, k_value, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor") labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_semantic_segmentation with Detr->ConditionalDetr def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None): """ Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If unset, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_instance_segmentation with Detr->ConditionalDetr def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, ) -> List[Dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If unset, predictions will not be resized. return_coco_annotation (`bool`, *optional*): Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=[], target_size=target_size, ) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({"segmentation": segmentation, "segments_info": segments}) return results # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_panoptic_segmentation with Detr->ConditionalDetr def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): The outputs from [`ConditionalDetrForSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If unset, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning_once("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results __all__ = ["ConditionalDetrImageProcessor"]
transformers/src/transformers/models/conditional_detr/image_processing_conditional_detr.py/0
{ "file_path": "transformers/src/transformers/models/conditional_detr/image_processing_conditional_detr.py", "repo_id": "transformers", "token_count": 36654 }
# coding=utf-8 # Copyright 2022 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 ConvNext model.""" from __future__ import annotations from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_convnext import ConvNextConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "ConvNextConfig" _CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224" class TFConvNextDropPath(keras.layers.Layer): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). References: (1) github.com:rwightman/pytorch-image-models """ def __init__(self, drop_path: float, **kwargs): super().__init__(**kwargs) self.drop_path = drop_path def call(self, x: tf.Tensor, training=None): if training: keep_prob = 1 - self.drop_path shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1) random_tensor = keep_prob + tf.random.uniform(shape, 0, 1) random_tensor = tf.floor(random_tensor) return (x / keep_prob) * random_tensor return x class TFConvNextEmbeddings(keras.layers.Layer): """This class is comparable to (and inspired by) the SwinEmbeddings class found in src/transformers/models/swin/modeling_swin.py. """ def __init__(self, config: ConvNextConfig, **kwargs): super().__init__(**kwargs) self.patch_embeddings = keras.layers.Conv2D( filters=config.hidden_sizes[0], kernel_size=config.patch_size, strides=config.patch_size, name="patch_embeddings", kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), ) self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm") self.num_channels = config.num_channels self.config = config def call(self, pixel_values): if isinstance(pixel_values, dict): pixel_values = pixel_values["pixel_values"] tf.debugging.assert_equal( shape_list(pixel_values)[1], self.num_channels, message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.", ) # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) embeddings = self.patch_embeddings(pixel_values) embeddings = self.layernorm(embeddings) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "patch_embeddings", None) is not None: with tf.name_scope(self.patch_embeddings.name): self.patch_embeddings.build([None, None, None, self.config.num_channels]) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, None, self.config.hidden_sizes[0]]) class TFConvNextLayer(keras.layers.Layer): """This corresponds to the `Block` class in the original implementation. There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow NHWC ordering, we can just apply the operations straight-away without the permutation. Args: config ([`ConvNextConfig`]): Model configuration class. dim (`int`): Number of input channels. drop_path (`float`): Stochastic depth rate. Default: 0.0. """ def __init__(self, config, dim, drop_path=0.0, **kwargs): super().__init__(**kwargs) self.dim = dim self.config = config self.dwconv = keras.layers.Conv2D( filters=dim, kernel_size=7, padding="same", groups=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="dwconv", ) # depthwise conv self.layernorm = keras.layers.LayerNormalization( epsilon=1e-6, name="layernorm", ) self.pwconv1 = keras.layers.Dense( units=4 * dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="pwconv1", ) # pointwise/1x1 convs, implemented with linear layers self.act = get_tf_activation(config.hidden_act) self.pwconv2 = keras.layers.Dense( units=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="pwconv2", ) # Using `layers.Activation` instead of `tf.identity` to better control `training` # behaviour. self.drop_path = ( TFConvNextDropPath(drop_path, name="drop_path") if drop_path > 0.0 else keras.layers.Activation("linear", name="drop_path") ) def build(self, input_shape: tf.TensorShape = None): # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa) self.layer_scale_parameter = ( self.add_weight( shape=(self.dim,), initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value), trainable=True, name="layer_scale_parameter", ) if self.config.layer_scale_init_value > 0 else None ) if self.built: return self.built = True if getattr(self, "dwconv", None) is not None: with tf.name_scope(self.dwconv.name): self.dwconv.build([None, None, None, self.dim]) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, None, self.dim]) if getattr(self, "pwconv1", None) is not None: with tf.name_scope(self.pwconv1.name): self.pwconv1.build([None, None, self.dim]) if getattr(self, "pwconv2", None) is not None: with tf.name_scope(self.pwconv2.name): self.pwconv2.build([None, None, 4 * self.dim]) if getattr(self, "drop_path", None) is not None: with tf.name_scope(self.drop_path.name): self.drop_path.build(None) def call(self, hidden_states, training=False): input = hidden_states x = self.dwconv(hidden_states) x = self.layernorm(x) x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) if self.layer_scale_parameter is not None: x = self.layer_scale_parameter * x x = input + self.drop_path(x, training=training) return x class TFConvNextStage(keras.layers.Layer): """ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks. Args: config (`ConvNextV2Config`): Model configuration class. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. depth (`int`): Number of residual blocks. drop_path_rates(`List[float]`): Stochastic depth rates for each layer. """ def __init__( self, config: ConvNextConfig, in_channels: int, out_channels: int, kernel_size: int = 2, stride: int = 2, depth: int = 2, drop_path_rates: Optional[List[float]] = None, **kwargs, ): super().__init__(**kwargs) if in_channels != out_channels or stride > 1: self.downsampling_layer = [ keras.layers.LayerNormalization( epsilon=1e-6, name="downsampling_layer.0", ), # Inputs to this layer will follow NHWC format since we # transposed the inputs from NCHW to NHWC in the `TFConvNextEmbeddings` # layer. All the outputs throughout the model will be in NHWC # from this point on until the output where we again change to # NCHW. keras.layers.Conv2D( filters=out_channels, kernel_size=kernel_size, strides=stride, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name="downsampling_layer.1", ), ] else: self.downsampling_layer = [tf.identity] drop_path_rates = drop_path_rates or [0.0] * depth self.layers = [ TFConvNextLayer( config, dim=out_channels, drop_path=drop_path_rates[j], name=f"layers.{j}", ) for j in range(depth) ] self.in_channels = in_channels self.out_channels = out_channels self.stride = stride def call(self, hidden_states): for layer in self.downsampling_layer: hidden_states = layer(hidden_states) for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) if self.in_channels != self.out_channels or self.stride > 1: with tf.name_scope(self.downsampling_layer[0].name): self.downsampling_layer[0].build([None, None, None, self.in_channels]) with tf.name_scope(self.downsampling_layer[1].name): self.downsampling_layer[1].build([None, None, None, self.in_channels]) class TFConvNextEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.stages = [] drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths)) drop_path_rates = tf.split(drop_path_rates, config.depths) drop_path_rates = [x.numpy().tolist() for x in drop_path_rates] prev_chs = config.hidden_sizes[0] for i in range(config.num_stages): out_chs = config.hidden_sizes[i] stage = TFConvNextStage( config, in_channels=prev_chs, out_channels=out_chs, stride=2 if i > 0 else 1, depth=config.depths[i], drop_path_rates=drop_path_rates[i], name=f"stages.{i}", ) self.stages.append(stage) prev_chs = out_chs def call(self, hidden_states, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.stages): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states) def build(self, input_shape=None): for stage in self.stages: with tf.name_scope(stage.name): stage.build(None) @keras_serializable class TFConvNextMainLayer(keras.layers.Layer): config_class = ConvNextConfig def __init__(self, config: ConvNextConfig, add_pooling_layer: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFConvNextEmbeddings(config, name="embeddings") self.encoder = TFConvNextEncoder(config, name="encoder") self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") # We are setting the `data_format` like so because from here on we will revert to the # NCHW output format self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_first") if add_pooling_layer else None @unpack_inputs def call( self, pixel_values: TFModelInputType | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values, training=training) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) last_hidden_state = encoder_outputs[0] # Change to NCHW output format have uniformity in the modules last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2)) pooled_output = self.layernorm(self.pooler(last_hidden_state)) # Change the other hidden state outputs to NCHW as well if output_hidden_states: hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: hidden_states = hidden_states if output_hidden_states else () return (last_hidden_state, pooled_output) + hidden_states return TFBaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, self.config.hidden_sizes[-1]]) class TFConvNextPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ConvNextConfig base_model_prefix = "convnext" main_input_name = "pixel_values" CONVNEXT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ CONVNEXT_INPUTS_DOCSTRING = r""" Args: pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. """ @add_start_docstrings( "The bare ConvNext model outputting raw features without any specific head on top.", CONVNEXT_START_DOCSTRING, ) class TFConvNextModel(TFConvNextPreTrainedModel): def __init__(self, config, *inputs, add_pooling_layer=True, **kwargs): super().__init__(config, *inputs, **kwargs) self.convnext = TFConvNextMainLayer(config, add_pooling_layer=add_pooling_layer, name="convnext") @unpack_inputs @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def call( self, pixel_values: TFModelInputType | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, TFConvNextModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") >>> model = TFConvNextModel.from_pretrained("facebook/convnext-tiny-224") >>> inputs = image_processor(images=image, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") outputs = self.convnext( pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPooling( last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convnext", None) is not None: with tf.name_scope(self.convnext.name): self.convnext.build(None) @add_start_docstrings( """ ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, CONVNEXT_START_DOCSTRING, ) class TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: ConvNextConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.convnext = TFConvNextMainLayer(config, name="convnext") # Classifier head self.classifier = keras.layers.Dense( units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="classifier", ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call( self, pixel_values: TFModelInputType | None = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, TFConvNextForImageClassification >>> import tensorflow as tf >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") >>> model = TFConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224") >>> inputs = image_processor(images=image, return_tensors="tf") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0] >>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)]) ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") outputs = self.convnext( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convnext", None) is not None: with tf.name_scope(self.convnext.name): self.convnext.build(None) if getattr(self, "classifier", None) is not None: if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_sizes[-1]]) __all__ = ["TFConvNextForImageClassification", "TFConvNextModel", "TFConvNextPreTrainedModel"]
transformers/src/transformers/models/convnext/modeling_tf_convnext.py/0
{ "file_path": "transformers/src/transformers/models/convnext/modeling_tf_convnext.py", "repo_id": "transformers", "token_count": 11636 }
# coding=utf-8 # Copyright 2018 Salesforce and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 CTRL model.""" from __future__ import annotations from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast, TFSequenceClassifierOutput from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_ctrl import CTRLConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "Salesforce/ctrl" _CONFIG_FOR_DOC = "CTRLConfig" def angle_defn(pos, i, d_model_size): angle_rates = 1 / np.power(10000, (2 * (i // 2)) / d_model_size) return pos * angle_rates def positional_encoding(position, d_model_size): # create the sinusoidal pattern for the positional encoding angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size) sines = np.sin(angle_rads[:, 0::2]) cosines = np.cos(angle_rads[:, 1::2]) pos_encoding = tf.convert_to_tensor(np.concatenate([sines, cosines], axis=-1)) return pos_encoding def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None): # calculate attention matmul_qk = tf.matmul(q, k, transpose_b=True) dk = tf.cast(shape_list(k)[-1], dtype=matmul_qk.dtype) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) if mask is not None: scaled_attention_logits += tf.cast(mask * -1e4, dtype=scaled_attention_logits.dtype) if attention_mask is not None: # Apply the attention mask attention_mask = tf.cast(attention_mask, dtype=scaled_attention_logits.dtype) scaled_attention_logits = scaled_attention_logits + attention_mask attention_weights = stable_softmax(scaled_attention_logits, axis=-1) # Mask heads if we want to if head_mask is not None: attention_weights = attention_weights * head_mask output = tf.matmul(attention_weights, v) return output, attention_weights class TFMultiHeadAttention(keras.layers.Layer): def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs): super().__init__(**kwargs) self.num_heads = num_heads self.d_model_size = d_model_size self.output_attentions = output_attentions self.depth = int(d_model_size / self.num_heads) self.Wq = keras.layers.Dense(d_model_size, name="Wq") self.Wk = keras.layers.Dense(d_model_size, name="Wk") self.Wv = keras.layers.Dense(d_model_size, name="Wv") self.dense = keras.layers.Dense(d_model_size, name="dense") def split_into_heads(self, x, batch_size): x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v, k, q, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False): batch_size = shape_list(q)[0] q = self.Wq(q) k = self.Wk(k) v = self.Wv(v) q = self.split_into_heads(q, batch_size) k = self.split_into_heads(k, batch_size) v = self.split_into_heads(v, batch_size) if layer_past is not None: past_key, past_value = tf.unstack(layer_past, axis=0) k = tf.concat((past_key, k), axis=-2) v = tf.concat((past_value, v), axis=-2) if use_cache: present = tf.stack((k, v), axis=0) else: present = (None,) output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask) scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3]) attn = output[1] original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size)) output = self.dense(original_size_attention) outputs = (output, present) if output_attentions: outputs = outputs + (attn,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "Wq", None) is not None: with tf.name_scope(self.Wq.name): self.Wq.build([None, None, self.d_model_size]) if getattr(self, "Wk", None) is not None: with tf.name_scope(self.Wk.name): self.Wk.build([None, None, self.d_model_size]) if getattr(self, "Wv", None) is not None: with tf.name_scope(self.Wv.name): self.Wv.build([None, None, self.d_model_size]) if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.d_model_size]) class TFPointWiseFeedForwardLayer(keras.layers.Layer): def __init__(self, d_model_size, dff, **kwargs): super().__init__(**kwargs) self.dense_0 = keras.layers.Dense(dff, activation="relu", name="0") self.dense_2 = keras.layers.Dense(d_model_size, name="2") self.d_model_size = d_model_size self.dff = dff def call(self, inputs, trainable=False): dense_0_output = self.dense_0(inputs) dense_2_output = self.dense_2(dense_0_output) return dense_2_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense_0", None) is not None: with tf.name_scope(self.dense_0.name): self.dense_0.build([None, None, self.d_model_size]) if getattr(self, "dense_2", None) is not None: with tf.name_scope(self.dense_2.name): self.dense_2.build([None, None, self.dff]) class TFEncoderLayer(keras.layers.Layer): def __init__( self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-6, output_attentions=False, **kwargs ): super().__init__(**kwargs) self.output_attentions = output_attentions self.multi_head_attention = TFMultiHeadAttention( d_model_size, num_heads, output_attentions=self.output_attentions, name="multi_head_attention" ) self.ffn = TFPointWiseFeedForwardLayer(d_model_size, dff, name="ffn") self.layernorm1 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm1") self.layernorm2 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm2") self.dropout1 = keras.layers.Dropout(rate) self.dropout2 = keras.layers.Dropout(rate) self.d_model_size = d_model_size def call(self, x, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False): normed = self.layernorm1(x) attn_outputs = self.multi_head_attention( normed, normed, normed, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=training, ) attn_output = attn_outputs[0] attn_output = self.dropout1(attn_output, training=training) out1 = x + attn_output out2 = self.layernorm2(out1) ffn_output = self.ffn(out2) ffn_output = self.dropout2(ffn_output, training=training) out2 = out1 + ffn_output outputs = (out2,) + attn_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "multi_head_attention", None) is not None: with tf.name_scope(self.multi_head_attention.name): self.multi_head_attention.build(None) if getattr(self, "ffn", None) is not None: with tf.name_scope(self.ffn.name): self.ffn.build(None) if getattr(self, "layernorm1", None) is not None: with tf.name_scope(self.layernorm1.name): self.layernorm1.build([None, None, self.d_model_size]) if getattr(self, "layernorm2", None) is not None: with tf.name_scope(self.layernorm2.name): self.layernorm2.build([None, None, self.d_model_size]) @keras_serializable class TFCTRLMainLayer(keras.layers.Layer): config_class = CTRLConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.use_cache = config.use_cache self.return_dict = config.use_return_dict self.d_model_size = config.n_embd self.num_layers = config.n_layer self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size) self.w = keras.layers.Embedding( input_dim=config.vocab_size, output_dim=config.n_embd, embeddings_initializer=get_initializer(config.initializer_range), name="w", ) self.dropout = keras.layers.Dropout(config.embd_pdrop) self.h = [ TFEncoderLayer( config.n_embd, config.n_head, config.dff, config.resid_pdrop, config.layer_norm_epsilon, self.output_attentions, name=f"h_._{i}", ) for i in range(config.n_layer) ] self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="layernorm") def get_input_embeddings(self): return self.w def set_input_embeddings(self, new_embeddings): self.w = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError @unpack_inputs def call( self, input_ids: TFModelInputType | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFBaseModelOutputWithPast]: # If using past key value states, only the last tokens # should be given as an input if past_key_values is not None: if input_ids is not None: input_ids = input_ids[:, -1:] if inputs_embeds is not None: inputs_embeds = inputs_embeds[:, -1:] if token_type_ids is not None: token_type_ids = token_type_ids[:, -1:] if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if past_key_values is None: past_length = 0 past_key_values = [None] * len(self.h) else: past_length = shape_list(past_key_values[0][0])[-2] if position_ids is None: position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32), axis=0) position_ids = tf.tile(position_ids, [input_shape[0], 1]) # Attention mask. if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1] + past_length)) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. one_cst = tf.constant(1.0) ten_thousand_cst = tf.constant(-10000.0) attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype) attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), ten_thousand_cst) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_layers if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.w(token_type_ids) token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, dtype=token_type_embeds.dtype)) else: token_type_embeds = tf.constant(0.0) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.w.input_dim) inputs_embeds = self.w(input_ids) seq_len = input_shape[-1] mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0) inputs_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, inputs_embeds.dtype)) pos_embeds = tf.gather(self.pos_encoding, position_ids) pos_embeds = tf.cast(pos_embeds, dtype=token_type_embeds.dtype) hidden_states = inputs_embeds + pos_embeds + token_type_embeds hidden_states = self.dropout(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] presents = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, (h, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = h( hidden_states, mask, layer_past, attention_mask, head_mask[i], use_cache, output_attentions, training=training, ) hidden_states, present = outputs[:2] if use_cache: presents = presents + (present,) if output_attentions: all_attentions = all_attentions + (outputs[2],) hidden_states = self.layernorm(hidden_states) hidden_states = tf.reshape(hidden_states, output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) if not return_dict: return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "w", None) is not None: with tf.name_scope(self.w.name): self.w.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, self.config.n_embd]) if getattr(self, "h", None) is not None: for layer in self.h: with tf.name_scope(layer.name): layer.build(None) class TFCTRLPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = CTRLConfig base_model_prefix = "transformer" CTRL_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`CTRLConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CTRL_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) past (`List[tf.Tensor]` of length `config.n_layers`): Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed. attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past` key value states are returned and can be used to speed up decoding (see `past`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.", CTRL_START_DOCSTRING, ) class TFCTRLModel(TFCTRLPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFCTRLMainLayer(config, name="transformer") @unpack_inputs @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFBaseModelOutputWithPast]: outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) class TFCTRLBiasLayer(keras.layers.Layer): """ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis, so all weights have to be registered in a layer. """ def __init__(self, shape, initializer, trainable, name, **kwargs): super().__init__(name=name, **kwargs) self.shape = shape self.initializer = initializer self.trainable = trainable def build(self, input_shape): self.bias = self.add_weight( name="bias", shape=self.shape, initializer=self.initializer, trainable=self.trainable ) super().build(input_shape) def call(self, x): return x + self.bias @add_start_docstrings( """ The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, CTRL_START_DOCSTRING, ) class TFCTRLLMHeadModel(TFCTRLPreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFCTRLMainLayer(config, name="transformer") self.bias_layer = TFCTRLBiasLayer( name="lm_head", shape=[1, config.vocab_size], initializer="zeros", trainable=True ) def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def get_bias(self): return {"lm_head.bias": self.bias_layer.bias} def set_bias(self, value): # Replaces the existing layers containing bias for correct (de)serialization. vocab_size = value["lm_head.bias"].shape[-1] self.bias_layer = TFCTRLBiasLayer( name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=True ) self.bias_layer.build(None) self.bias_layer.bias.assign(value["lm_head.bias"]) # Copied from transformers.models.gpt2.modeling_tf_gpt2.TFGPT2LMHeadModel.prepare_inputs_for_generation def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): token_type_ids = kwargs.get("token_type_ids", None) # only last token for inputs_ids if past is defined in kwargs if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) if token_type_ids is not None: token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1) position_ids = kwargs.get("position_ids", None) attention_mask = kwargs.get("attention_mask", None) if attention_mask is not None and position_ids is None: position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True) if past_key_values: position_ids = tf.expand_dims(position_ids[:, -1], -1) return { "input_ids": inputs, "attention_mask": attention_mask, "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": use_cache, "token_type_ids": token_type_ids, } @unpack_inputs @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFCausalLMOutputWithPast]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ transformer_outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] logits = tf.matmul(hidden_states, self.transformer.w.weights, transpose_b=True) logits = self.bias_layer(logits) loss = None if labels is not None: # shift labels to the left and cut last logit token shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels, shifted_logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) @add_start_docstrings( """ The CTRL Model transformer with a sequence classification head on top (linear layer). [`TFCTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1, GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, CTRL_START_DOCSTRING, ) class TFCTRLForSequenceClassification(TFCTRLPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier", use_bias=False, ) self.transformer = TFCTRLMainLayer(config, name="transformer") self.config = config def get_output_embeddings(self): # Remove after transformers v4.32. Fix this model's `test_model_common_attributes` test too. logger.warning( "Sequence classification models do not have output embeddings. `.get_output_embeddings` will be removed " "in transformers v4.32." ) return self.transformer.w @unpack_inputs @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: np.ndarray | tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFSequenceClassifierOutput]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ transformer_outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] logits = self.classifier(hidden_states) logits_shape = shape_list(logits) batch_size = logits_shape[0] if self.config.pad_token_id is None: last_non_pad_token = tf.fill((batch_size,), value=logits_shape[1] - 1) else: if input_ids is not None: token_indices = tf.range(shape_list(input_ids)[-1]) non_pad_mask = tf.cast(input_ids != self.config.pad_token_id, token_indices.dtype) last_non_pad_token = tf.reduce_max(token_indices * non_pad_mask, axis=-1) else: last_non_pad_token = tf.fill((batch_size,), value=logits_shape[1] - 1) logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) loss = None pooled_logits = tf.gather(logits, last_non_pad_token, batch_dims=1, axis=1) if labels is not None: if self.config.pad_token_id is None and logits_shape[0] != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(pooled_logits, [-1, self.num_labels])) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=pooled_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.n_embd]) if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) __all__ = ["TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCTRLPreTrainedModel"]
transformers/src/transformers/models/ctrl/modeling_tf_ctrl.py/0
{ "file_path": "transformers/src/transformers/models/ctrl/modeling_tf_ctrl.py", "repo_id": "transformers", "token_count": 17110 }
# coding=utf-8 # Copyright 2020 Microsoft and the Hugging Face Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch DeBERTa model.""" from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_deberta import DebertaConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "DebertaConfig" _CHECKPOINT_FOR_DOC = "microsoft/deberta-base" # Masked LM docstring _CHECKPOINT_FOR_MASKED_LM = "lsanochkin/deberta-large-feedback" _MASKED_LM_EXPECTED_OUTPUT = "' Paris'" _MASKED_LM_EXPECTED_LOSS = "0.54" # QuestionAnswering docstring _CHECKPOINT_FOR_QA = "Palak/microsoft_deberta-large_squad" _QA_EXPECTED_OUTPUT = "' a nice puppet'" _QA_EXPECTED_LOSS = 0.14 _QA_TARGET_START_INDEX = 12 _QA_TARGET_END_INDEX = 14 class DebertaLayerNorm(nn.Module): """LayerNorm module in the TF style (epsilon inside the square root).""" def __init__(self, size, eps=1e-12): super().__init__() self.weight = nn.Parameter(torch.ones(size)) self.bias = nn.Parameter(torch.zeros(size)) self.variance_epsilon = eps def forward(self, hidden_states): input_type = hidden_states.dtype hidden_states = hidden_states.float() mean = hidden_states.mean(-1, keepdim=True) variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True) hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon) hidden_states = hidden_states.to(input_type) y = self.weight * hidden_states + self.bias return y class DebertaSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states @torch.jit.script def build_relative_position(query_layer, key_layer): """ Build relative position according to the query and key We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q - P_k\\) Args: query_size (int): the length of query key_size (int): the length of key Return: `torch.LongTensor`: A tensor with shape [1, query_size, key_size] """ query_size = query_layer.size(-2) key_size = key_layer.size(-2) q_ids = torch.arange(query_size, dtype=torch.long, device=query_layer.device) k_ids = torch.arange(key_size, dtype=torch.long, device=key_layer.device) rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1) rel_pos_ids = rel_pos_ids[:query_size, :] rel_pos_ids = rel_pos_ids.unsqueeze(0) return rel_pos_ids @torch.jit.script def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos): return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]) @torch.jit.script def p2c_dynamic_expand(c2p_pos, query_layer, key_layer): return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)]) @torch.jit.script def pos_dynamic_expand(pos_index, p2c_att, key_layer): return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2))) ###### To support a general trace, we have to define these operation as they use python objects (sizes) ################## # which are not supported by torch.jit.trace. # Full credits to @Szustarol @torch.jit.script def scaled_size_sqrt(query_layer: torch.Tensor, scale_factor: int): return torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) @torch.jit.script def build_rpos(query_layer: torch.Tensor, key_layer: torch.Tensor, relative_pos): if query_layer.size(-2) != key_layer.size(-2): return build_relative_position(query_layer, key_layer) else: return relative_pos @torch.jit.script def compute_attention_span(query_layer: torch.Tensor, key_layer: torch.Tensor, max_relative_positions: int): return torch.tensor(min(max(query_layer.size(-2), key_layer.size(-2)), max_relative_positions)) @torch.jit.script def uneven_size_corrected(p2c_att, query_layer: torch.Tensor, key_layer: torch.Tensor, relative_pos): if query_layer.size(-2) != key_layer.size(-2): pos_index = relative_pos[:, :, :, 0].unsqueeze(-1) return torch.gather(p2c_att, dim=2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer)) else: return p2c_att ######################################################################################################################## class DisentangledSelfAttention(nn.Module): """ Disentangled self-attention module Parameters: config (`str`): A model config class instance with the configuration to build a new model. The schema is similar to *BertConfig*, for more details, please refer [`DebertaConfig`] """ def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False) self.q_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float)) self.v_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float)) self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else [] self.relative_attention = getattr(config, "relative_attention", False) self.talking_head = getattr(config, "talking_head", False) if self.talking_head: self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False) self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False) else: self.head_logits_proj = None self.head_weights_proj = None if self.relative_attention: self.max_relative_positions = getattr(config, "max_relative_positions", -1) if self.max_relative_positions < 1: self.max_relative_positions = config.max_position_embeddings self.pos_dropout = nn.Dropout(config.hidden_dropout_prob) if "c2p" in self.pos_att_type: self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False) if "p2c" in self.pos_att_type: self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool = False, query_states: Optional[torch.Tensor] = None, relative_pos: Optional[torch.Tensor] = None, rel_embeddings: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """ Call the module Args: hidden_states (`torch.FloatTensor`): Input states to the module usually the output from previous layer, it will be the Q,K and V in *Attention(Q,K,V)* attention_mask (`torch.BoolTensor`): An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j* th token. output_attentions (`bool`, *optional*): Whether return the attention matrix. query_states (`torch.FloatTensor`, *optional*): The *Q* state in *Attention(Q,K,V)*. relative_pos (`torch.LongTensor`): The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with values ranging in [*-max_relative_positions*, *max_relative_positions*]. rel_embeddings (`torch.FloatTensor`): The embedding of relative distances. It's a tensor of shape [\\(2 \\times \\text{max_relative_positions}\\), *hidden_size*]. """ if query_states is None: qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1) query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1) else: ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0) qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)] q = torch.matmul(qkvw[0], query_states.t().to(dtype=qkvw[0].dtype)) k = torch.matmul(qkvw[1], hidden_states.t().to(dtype=qkvw[1].dtype)) v = torch.matmul(qkvw[2], hidden_states.t().to(dtype=qkvw[2].dtype)) query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]] query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :]) value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :]) rel_att: int = 0 # Take the dot product between "query" and "key" to get the raw attention scores. scale_factor = 1 + len(self.pos_att_type) scale = scaled_size_sqrt(query_layer, scale_factor) query_layer = query_layer / scale.to(dtype=query_layer.dtype) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.relative_attention and rel_embeddings is not None and relative_pos is not None: rel_embeddings = self.pos_dropout(rel_embeddings) rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor) if rel_att is not None: attention_scores = attention_scores + rel_att # bxhxlxd if self.head_logits_proj is not None: attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) attention_mask = attention_mask.bool() attention_scores = attention_scores.masked_fill(~(attention_mask), torch.finfo(query_layer.dtype).min) # bsz x height x length x dimension attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if self.head_weights_proj is not None: attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (-1,) context_layer = context_layer.view(new_context_layer_shape) if not output_attentions: return (context_layer, None) return (context_layer, attention_probs) def disentangled_att_bias( self, query_layer: torch.Tensor, key_layer: torch.Tensor, relative_pos: torch.Tensor, rel_embeddings: torch.Tensor, scale_factor: int, ): if relative_pos is None: relative_pos = build_relative_position(query_layer, key_layer, query_layer.device) if relative_pos.dim() == 2: relative_pos = relative_pos.unsqueeze(0).unsqueeze(0) elif relative_pos.dim() == 3: relative_pos = relative_pos.unsqueeze(1) # bxhxqxk elif relative_pos.dim() != 4: raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}") att_span = compute_attention_span(query_layer, key_layer, self.max_relative_positions) relative_pos = relative_pos.long() rel_embeddings = rel_embeddings[ self.max_relative_positions - att_span : self.max_relative_positions + att_span, : ].unsqueeze(0) score = 0 # content->position if "c2p" in self.pos_att_type: pos_key_layer = self.pos_proj(rel_embeddings) pos_key_layer = self.transpose_for_scores(pos_key_layer) c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2)) c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1) c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos)) score += c2p_att # position->content if "p2c" in self.pos_att_type: pos_query_layer = self.pos_q_proj(rel_embeddings) pos_query_layer = self.transpose_for_scores(pos_query_layer) pos_query_layer /= scaled_size_sqrt(pos_query_layer, scale_factor) r_pos = build_rpos( query_layer, key_layer, relative_pos, ) p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1) p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2).to(dtype=key_layer.dtype)) p2c_att = torch.gather( p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer) ).transpose(-1, -2) p2c_att = uneven_size_corrected(p2c_att, query_layer, key_layer, relative_pos) score += p2c_att return score class DebertaEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() pad_token_id = getattr(config, "pad_token_id", 0) self.embedding_size = getattr(config, "embedding_size", config.hidden_size) self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id) self.position_biased_input = getattr(config, "position_biased_input", True) if not self.position_biased_input: self.position_embeddings = None else: self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size) if config.type_vocab_size > 0: self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size) else: self.token_type_embeddings = None if self.embedding_size != config.hidden_size: self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False) else: self.embed_proj = None self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if self.position_embeddings is not None: position_embeddings = self.position_embeddings(position_ids.long()) else: position_embeddings = torch.zeros_like(inputs_embeds) embeddings = inputs_embeds if self.position_biased_input: embeddings += position_embeddings if self.token_type_embeddings is not None: token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings += token_type_embeddings if self.embed_proj is not None: embeddings = self.embed_proj(embeddings) embeddings = self.LayerNorm(embeddings) if mask is not None: if mask.dim() != embeddings.dim(): if mask.dim() == 4: mask = mask.squeeze(1).squeeze(1) mask = mask.unsqueeze(2) mask = mask.to(embeddings.dtype) embeddings = embeddings * mask embeddings = self.dropout(embeddings) return embeddings class DebertaAttention(nn.Module): def __init__(self, config): super().__init__() self.self = DisentangledSelfAttention(config) self.output = DebertaSelfOutput(config) self.config = config def forward( self, hidden_states, attention_mask, output_attentions: bool = False, query_states=None, relative_pos=None, rel_embeddings=None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: self_output, att_matrix = self.self( hidden_states, attention_mask, output_attentions, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, ) if query_states is None: query_states = hidden_states attention_output = self.output(self_output, query_states) if output_attentions: return (attention_output, att_matrix) else: return (attention_output, None) # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta class DebertaIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class DebertaOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class DebertaLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = DebertaAttention(config) self.intermediate = DebertaIntermediate(config) self.output = DebertaOutput(config) def forward( self, hidden_states, attention_mask, query_states=None, relative_pos=None, rel_embeddings=None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: attention_output, att_matrix = self.attention( hidden_states, attention_mask, output_attentions=output_attentions, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, ) intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) if output_attentions: return (layer_output, att_matrix) else: return (layer_output, None) class DebertaEncoder(PreTrainedModel): """Modified BertEncoder with relative position bias support""" def __init__(self, config): super().__init__(config) self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)]) self.relative_attention = getattr(config, "relative_attention", False) if self.relative_attention: self.max_relative_positions = getattr(config, "max_relative_positions", -1) if self.max_relative_positions < 1: self.max_relative_positions = config.max_position_embeddings self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size) self.gradient_checkpointing = False def get_rel_embedding(self): rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None return rel_embeddings def get_attention_mask(self, attention_mask): if attention_mask.dim() <= 2: extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1) elif attention_mask.dim() == 3: attention_mask = attention_mask.unsqueeze(1) return attention_mask def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None): if self.relative_attention and relative_pos is None: if query_states is not None: relative_pos = build_relative_position(query_states, hidden_states) else: relative_pos = build_relative_position(hidden_states, hidden_states) return relative_pos def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_hidden_states: bool = True, output_attentions: bool = False, query_states=None, relative_pos=None, return_dict: bool = True, ): attention_mask = self.get_attention_mask(attention_mask) relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos) all_hidden_states: Optional[Tuple[torch.Tensor]] = (hidden_states,) if output_hidden_states else None all_attentions = () if output_attentions else None next_kv = hidden_states rel_embeddings = self.get_rel_embedding() for i, layer_module in enumerate(self.layer): if self.gradient_checkpointing and self.training: hidden_states, att_m = self._gradient_checkpointing_func( layer_module.__call__, next_kv, attention_mask, query_states, relative_pos, rel_embeddings, output_attentions, ) else: hidden_states, att_m = layer_module( next_kv, attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions, ) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if query_states is not None: query_states = hidden_states else: next_kv = hidden_states if output_attentions: all_attentions = all_attentions + (att_m,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class DebertaPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DebertaConfig base_model_prefix = "deberta" _keys_to_ignore_on_load_unexpected = ["position_embeddings"] supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() DEBERTA_START_DOCSTRING = r""" The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data. This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`DebertaConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ DEBERTA_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.", DEBERTA_START_DOCSTRING, ) class DebertaModel(DebertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = DebertaEmbeddings(config) self.encoder = DebertaEncoder(config) self.z_steps = 0 self.config = config # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError("The prune function is not implemented in DeBERTa model.") @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) embedding_output = self.embeddings( input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids, mask=attention_mask, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, attention_mask, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict, ) encoded_layers = encoder_outputs[1] if self.z_steps > 1: hidden_states = encoded_layers[-2] layers = [self.encoder.layer[-1] for _ in range(self.z_steps)] query_states = encoded_layers[-1] rel_embeddings = self.encoder.get_rel_embedding() attention_mask = self.encoder.get_attention_mask(attention_mask) rel_pos = self.encoder.get_rel_pos(embedding_output) for layer in layers[1:]: query_states = layer( hidden_states, attention_mask, output_attentions=False, query_states=query_states, relative_pos=rel_pos, rel_embeddings=rel_embeddings, ) encoded_layers.append(query_states) sequence_output = encoded_layers[-1] if not return_dict: return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states if output_hidden_states else None, attentions=encoder_outputs.attentions, ) class LegacyDebertaPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.embedding_size = getattr(config, "embedding_size", config.hidden_size) self.dense = nn.Linear(config.hidden_size, self.embedding_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class LegacyDebertaLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = LegacyDebertaPredictionHeadTransform(config) self.embedding_size = getattr(config, "embedding_size", config.hidden_size) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->LegacyDeberta class LegacyDebertaOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = LegacyDebertaLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class DebertaLMPredictionHead(nn.Module): """https://github.com/microsoft/DeBERTa/blob/master/DeBERTa/deberta/bert.py#L270""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=True) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # note that the input embeddings must be passed as an argument def forward(self, hidden_states, word_embeddings): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm( hidden_states ) # original used MaskedLayerNorm, but passed no mask. This is equivalent. hidden_states = torch.matmul(hidden_states, word_embeddings.weight.t()) + self.bias return hidden_states class DebertaOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.lm_head = DebertaLMPredictionHead(config) # note that the input embeddings must be passed as an argument def forward(self, sequence_output, word_embeddings): prediction_scores = self.lm_head(sequence_output, word_embeddings) return prediction_scores @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING) class DebertaForMaskedLM(DebertaPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.legacy = config.legacy self.deberta = DebertaModel(config) if self.legacy: self.cls = LegacyDebertaOnlyMLMHead(config) else: self._tied_weights_keys = ["lm_predictions.lm_head.weight", "deberta.embeddings.word_embeddings.weight"] self.lm_predictions = DebertaOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): if self.legacy: return self.cls.predictions.decoder else: return self.lm_predictions.lm_head.dense def set_output_embeddings(self, new_embeddings): if self.legacy: self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias else: self.lm_predictions.lm_head.dense = new_embeddings self.lm_predictions.lm_head.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_MASKED_LM, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="[MASK]", expected_output=_MASKED_LM_EXPECTED_OUTPUT, expected_loss=_MASKED_LM_EXPECTED_LOSS, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.deberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] if self.legacy: prediction_scores = self.cls(sequence_output) else: prediction_scores = self.lm_predictions(sequence_output, self.deberta.embeddings.word_embeddings) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class ContextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size) self.dropout = nn.Dropout(config.pooler_dropout) self.config = config def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. context_token = hidden_states[:, 0] context_token = self.dropout(context_token) pooled_output = self.dense(context_token) pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output) return pooled_output @property def output_dim(self): return self.config.hidden_size @add_start_docstrings( """ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, DEBERTA_START_DOCSTRING, ) class DebertaForSequenceClassification(DebertaPreTrainedModel): def __init__(self, config): super().__init__(config) num_labels = getattr(config, "num_labels", 2) self.num_labels = num_labels self.deberta = DebertaModel(config) self.pooler = ContextPooler(config) output_dim = self.pooler.output_dim self.classifier = nn.Linear(output_dim, num_labels) drop_out = getattr(config, "cls_dropout", None) drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out self.dropout = nn.Dropout(drop_out) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.deberta.get_input_embeddings() def set_input_embeddings(self, new_embeddings): self.deberta.set_input_embeddings(new_embeddings) @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.deberta( input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) encoder_layer = outputs[0] pooled_output = self.pooler(encoder_layer) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: # regression task loss_fn = nn.MSELoss() logits = logits.view(-1).to(labels.dtype) loss = loss_fn(logits, labels.view(-1)) elif labels.dim() == 1 or labels.size(-1) == 1: label_index = (labels >= 0).nonzero() labels = labels.long() if label_index.size(0) > 0: labeled_logits = torch.gather( logits, 0, label_index.expand(label_index.size(0), logits.size(1)) ) labels = torch.gather(labels, 0, label_index.view(-1)) loss_fct = CrossEntropyLoss() loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1)) else: loss = torch.tensor(0).to(logits) else: log_softmax = nn.LogSoftmax(-1) loss = -((log_softmax(logits) * labels).sum(-1)).mean() elif self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @add_start_docstrings( """ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, DEBERTA_START_DOCSTRING, ) class DebertaForTokenClassification(DebertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.deberta = DebertaModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.deberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @add_start_docstrings( """ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, DEBERTA_START_DOCSTRING, ) class DebertaForQuestionAnswering(DebertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.deberta = DebertaModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_QA, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, expected_output=_QA_EXPECTED_OUTPUT, expected_loss=_QA_EXPECTED_LOSS, qa_target_start_index=_QA_TARGET_START_INDEX, qa_target_end_index=_QA_TARGET_END_INDEX, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.deberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "DebertaForMaskedLM", "DebertaForQuestionAnswering", "DebertaForSequenceClassification", "DebertaForTokenClassification", "DebertaModel", "DebertaPreTrainedModel", ]
transformers/src/transformers/models/deberta/modeling_deberta.py/0
{ "file_path": "transformers/src/transformers/models/deberta/modeling_deberta.py", "repo_id": "transformers", "token_count": 24514 }
# coding=utf-8 # Copyright 2020, The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Bort checkpoint.""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() logger = logging.get_logger(__name__) SAMPLE_TEXT = "The Nymphenburg Palace is a beautiful palace in Munich!" def convert_bort_checkpoint_to_pytorch(bort_checkpoint_path: str, pytorch_dump_folder_path: str): """ Convert the original Bort checkpoint (based on MXNET and Gluonnlp) to our BERT structure- """ # Original Bort configuration bort_4_8_768_1024_hparams = { "attention_cell": "multi_head", "num_layers": 4, "units": 1024, "hidden_size": 768, "max_length": 512, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1024, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1e-5, "token_type_vocab_size": 2, } predefined_args = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py encoder = BERTEncoder( attention_cell=predefined_args["attention_cell"], num_layers=predefined_args["num_layers"], units=predefined_args["units"], hidden_size=predefined_args["hidden_size"], max_length=predefined_args["max_length"], num_heads=predefined_args["num_heads"], scaled=predefined_args["scaled"], dropout=predefined_args["dropout"], output_attention=False, output_all_encodings=False, use_residual=predefined_args["use_residual"], activation=predefined_args.get("activation", "gelu"), layer_norm_eps=predefined_args.get("layer_norm_eps", None), ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later vocab_name = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab gluon_cache_dir = os.path.join(get_home_dir(), "models") bort_vocab = _load_vocab(vocab_name, None, gluon_cache_dir, cls=Vocab) original_bort = nlp.model.BERTModel( encoder, len(bort_vocab), units=predefined_args["units"], embed_size=predefined_args["embed_size"], embed_dropout=predefined_args["embed_dropout"], word_embed=predefined_args["word_embed"], use_pooler=False, use_token_type_embed=False, token_type_vocab_size=predefined_args["token_type_vocab_size"], use_classifier=False, use_decoder=False, ) original_bort.load_parameters(bort_checkpoint_path, cast_dtype=True, ignore_extra=True) params = original_bort._collect_params_with_prefix() # Build our config 🤗 hf_bort_config_json = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(bort_vocab), } hf_bort_config = BertConfig.from_dict(hf_bort_config_json) hf_bort_model = BertForMaskedLM(hf_bort_config) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(mx_array) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy())) # Check param shapes and map new HF param back def check_and_map_params(hf_param, gluon_param): shape_hf = hf_param.shape gluon_param = to_torch(params[gluon_param]) shape_gluon = gluon_param.shape assert ( shape_hf == shape_gluon ), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers" return gluon_param hf_bort_model.bert.embeddings.word_embeddings.weight = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight, "word_embed.0.weight" ) hf_bort_model.bert.embeddings.position_embeddings.weight = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight, "encoder.position_weight" ) hf_bort_model.bert.embeddings.LayerNorm.bias = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias, "encoder.layer_norm.beta" ) hf_bort_model.bert.embeddings.LayerNorm.weight = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight, "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) hf_bort_model.bert.embeddings.token_type_embeddings.weight.data = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers): layer: BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention self_attn: BertSelfAttention = layer.attention.self self_attn.key.bias.data = check_and_map_params( self_attn.key.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" ) self_attn.key.weight.data = check_and_map_params( self_attn.key.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" ) self_attn.query.bias.data = check_and_map_params( self_attn.query.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" ) self_attn.query.weight.data = check_and_map_params( self_attn.query.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" ) self_attn.value.bias.data = check_and_map_params( self_attn.value.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" ) self_attn.value.weight.data = check_and_map_params( self_attn.value.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" ) # self attention output self_output: BertSelfOutput = layer.attention.output self_output.dense.bias = check_and_map_params( self_output.dense.bias, f"encoder.transformer_cells.{i}.proj.bias" ) self_output.dense.weight = check_and_map_params( self_output.dense.weight, f"encoder.transformer_cells.{i}.proj.weight" ) self_output.LayerNorm.bias = check_and_map_params( self_output.LayerNorm.bias, f"encoder.transformer_cells.{i}.layer_norm.beta" ) self_output.LayerNorm.weight = check_and_map_params( self_output.LayerNorm.weight, f"encoder.transformer_cells.{i}.layer_norm.gamma" ) # intermediate intermediate: BertIntermediate = layer.intermediate intermediate.dense.bias = check_and_map_params( intermediate.dense.bias, f"encoder.transformer_cells.{i}.ffn.ffn_1.bias" ) intermediate.dense.weight = check_and_map_params( intermediate.dense.weight, f"encoder.transformer_cells.{i}.ffn.ffn_1.weight" ) # output bert_output: BertOutput = layer.output bert_output.dense.bias = check_and_map_params( bert_output.dense.bias, f"encoder.transformer_cells.{i}.ffn.ffn_2.bias" ) bert_output.dense.weight = check_and_map_params( bert_output.dense.weight, f"encoder.transformer_cells.{i}.ffn.ffn_2.weight" ) bert_output.LayerNorm.bias = check_and_map_params( bert_output.LayerNorm.bias, f"encoder.transformer_cells.{i}.ffn.layer_norm.beta" ) bert_output.LayerNorm.weight = check_and_map_params( bert_output.LayerNorm.weight, f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models tokenizer = RobertaTokenizer.from_pretrained("FacebookAI/roberta-base") input_ids = tokenizer.encode_plus(SAMPLE_TEXT)["input_ids"] # Get gluon output gluon_input_ids = mx.nd.array([input_ids]) output_gluon = original_bort(inputs=gluon_input_ids, token_types=[]) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(pytorch_dump_folder_path) hf_bort_model = BertModel.from_pretrained(pytorch_dump_folder_path) hf_bort_model.eval() input_ids = tokenizer.encode_plus(SAMPLE_TEXT, return_tensors="pt") output_hf = hf_bort_model(**input_ids)[0] gluon_layer = output_gluon[0].asnumpy() hf_layer = output_hf[0].detach().numpy() max_absolute_diff = np.max(np.abs(hf_layer - gluon_layer)).item() success = np.allclose(gluon_layer, hf_layer, atol=1e-3) if success: print("✔️ Both model do output the same tensors") else: print("❌ Both model do **NOT** output the same tensors") print("Absolute difference is:", max_absolute_diff) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
transformers/src/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 6181 }
# coding=utf-8 # Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Ernie-M.""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ....tokenization_utils import PreTrainedTokenizer from ....utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} RESOURCE_FILES_NAMES = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } # Adapted from paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer class ErnieMTokenizer(PreTrainedTokenizer): r""" Constructs a Ernie-M tokenizer. It uses the `sentencepiece` tools to cut the words to sub-words. Args: sentencepiece_model_file (`str`): The file path of sentencepiece model. vocab_file (`str`, *optional*): The file path of the vocabulary. do_lower_case (`str`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): A special token representing the `unknown (out-of-vocabulary)` token. An unknown token is set to be `unk_token` inorder to be converted to an ID. sep_token (`str`, *optional*, defaults to `"[SEP]"`): A special token separating two different sentences in the same input. pad_token (`str`, *optional*, defaults to `"[PAD]"`): A special token used to make arrays of tokens the same size for batching purposes. cls_token (`str`, *optional*, defaults to `"[CLS]"`): A special token used for sequence classification. It is the last token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): A special token representing a masked token. This is the token used in the masked language modeling task which the model tries to predict the original unmasked ones. """ # Ernie-M model doesn't have token_type embedding. model_input_names: List[str] = ["input_ids"] vocab_files_names = VOCAB_FILES_NAMES resource_files_names = RESOURCE_FILES_NAMES def __init__( self, sentencepiece_model_ckpt, vocab_file=None, do_lower_case=False, encoding="utf8", unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.do_lower_case = do_lower_case self.sentencepiece_model_ckpt = sentencepiece_model_ckpt self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(sentencepiece_model_ckpt) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: self.vocab = self.load_vocab(filepath=vocab_file) else: self.vocab = {self.sp_model.id_to_piece(id): id for id in range(self.sp_model.get_piece_size())} self.reverse_vocab = {v: k for k, v in self.vocab.items()} super().__init__( do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, vocab_file=vocab_file, encoding=encoding, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) def get_offset_mapping(self, text): if text is None: return None split_tokens = self.tokenize(text) normalized_text, char_mapping = "", [] for i, ch in enumerate(text): if ch in self.SP_CHAR_MAPPING: ch = self.SP_CHAR_MAPPING.get(ch) else: ch = unicodedata.normalize("NFKC", ch) if self.is_whitespace(ch): continue normalized_text += ch char_mapping.extend([i] * len(ch)) text, token_mapping, offset = normalized_text, [], 0 if self.do_lower_case: text = text.lower() for token in split_tokens: if token[:1] == "▁": token = token[1:] start = text[offset:].index(token) + offset end = start + len(token) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1)) offset = end return token_mapping @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.sentencepiece_model_ckpt) def clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" return "".join((self.SP_CHAR_MAPPING.get(c, c) for c in text)) def _tokenize(self, text, enable_sampling=False, nbest_size=64, alpha=0.1): """Tokenize a string.""" if self.sp_model_kwargs.get("enable_sampling") is True: enable_sampling = True if self.sp_model_kwargs.get("alpha") is not None: alpha = self.sp_model_kwargs.get("alpha") if self.sp_model_kwargs.get("nbest_size") is not None: nbest_size = self.sp_model_kwargs.get("nbest_size") if not enable_sampling: pieces = self.sp_model.EncodeAsPieces(text) else: pieces = self.sp_model.SampleEncodeAsPieces(text, nbest_size, alpha) new_pieces = [] for pi, piece in enumerate(pieces): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(SPIECE_UNDERLINE) and pi != 0: new_pieces.append(SPIECE_UNDERLINE) continue else: continue lst_i = 0 for i, chunk in enumerate(piece): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(chunk) or self.is_punct(chunk): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i]) new_pieces.append(chunk) lst_i = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i]) lst_i = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i]) lst_i = i if len(piece) > lst_i: new_pieces.append(piece[lst_i:]) return new_pieces def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def convert_ids_to_string(self, ids): """ Converts a sequence of tokens (strings for sub-words) in a single string. """ tokens = self.convert_ids_to_tokens(ids) out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk_token)) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.reverse_vocab.get(index, self.unk_token) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): r""" Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An ErnieM sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of input_id with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] _cls = [self.cls_token_id] _sep = [self.sep_token_id] return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None): r""" Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. An Ernie-M offset_mapping has the following format: - single sequence: `(0,0) X (0,0)` - pair of sequences: `(0,0) A (0,0) (0,0) B (0,0)` Args: offset_mapping_ids_0 (`List[tuple]`): List of char offsets to which the special tokens will be added. offset_mapping_ids_1 (`List[tuple]`, *optional*): Optional second list of wordpiece offsets for offset mapping pairs. Returns: `List[tuple]`: List of wordpiece offsets with the appropriate offsets of special tokens. """ if offset_mapping_1 is None: return [(0, 0)] + offset_mapping_0 + [(0, 0)] return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)] def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): r""" Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `encode` method. Args: token_ids_0 (`List[int]`): List of ids of the first sequence. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`str`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0] if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create the token type IDs corresponding to the sequences passed. [What are token type IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of building: those. Args: token_ids_0 (`List[int]`): The first tokenized sequence. token_ids_1 (`List[int]`, *optional*): The second tokenized sequence. Returns: `List[int]`: The token type ids. """ # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_1 is None: # [CLS] X [SEP] return (len(token_ids_0) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3) def is_ch_char(self, char): """ is_ch_char """ if "\u4e00" <= char <= "\u9fff": return True return False def is_alpha(self, char): """ is_alpha """ if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def is_punct(self, char): """ is_punct """ if char in ",;:.?!~,;:。?!《》【】": return True return False def is_whitespace(self, char): """ is whitespace """ if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(char) == 1: cat = unicodedata.category(char) if cat == "Zs": return True return False def load_vocab(self, filepath): token_to_idx = {} with io.open(filepath, "r", encoding="utf-8") as f: for index, line in enumerate(f): token = line.rstrip("\n") token_to_idx[token] = int(index) return token_to_idx def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 tokenizer_model_file = os.path.join(save_directory, "sentencepiece.bpe.model") with open(tokenizer_model_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (vocab_file,)
transformers/src/transformers/models/deprecated/ernie_m/tokenization_ernie_m.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/ernie_m/tokenization_ernie_m.py", "repo_id": "transformers", "token_count": 7502 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_mctct": ["MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_mctct"] = [ "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/deprecated/mctct/__init__.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/mctct/__init__.py", "repo_id": "transformers", "token_count": 601 }
from .... import PretrainedConfig class NezhaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`NezhaModel`]. It is used to instantiate an Nezha model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nezha [sijunhe/nezha-cn-base](https://huggingface.co/sijunhe/nezha-cn-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, optional, defaults to 21128): Vocabulary size of the NEZHA model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`NezhaModel`]. hidden_size (`int`, optional, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, optional, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, optional, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, optional, defaults to 3072): The dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, optional, defaults to "gelu"): The non-linear activation function (function or string) in the encoder and pooler. hidden_dropout_prob (`float`, optional, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, optional, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, optional, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, optional, defaults to 2): The vocabulary size of the *token_type_ids* passed into [`NezhaModel`]. initializer_range (`float`, optional, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, optional, defaults to 1e-12): The epsilon used by the layer normalization layers. classifier_dropout (`float`, optional, defaults to 0.1): The dropout ratio for attached classifiers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. Example: ```python >>> from transformers import NezhaConfig, NezhaModel >>> # Initializing an Nezha configuration >>> configuration = NezhaConfig() >>> # Initializing a model (with random weights) from the Nezha-base style configuration model >>> model = NezhaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "nezha" def __init__( self, vocab_size=21128, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, max_relative_position=64, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, classifier_dropout=0.1, pad_token_id=0, bos_token_id=2, eos_token_id=3, use_cache=True, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.max_relative_position = max_relative_position self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.classifier_dropout = classifier_dropout self.use_cache = use_cache
transformers/src/transformers/models/deprecated/nezha/configuration_nezha.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/nezha/configuration_nezha.py", "repo_id": "transformers", "token_count": 1864 }
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ RetriBERT model """ import math from typing import Optional import torch import torch.utils.checkpoint as checkpoint from torch import nn from ....modeling_utils import PreTrainedModel from ....utils import add_start_docstrings, logging from ...bert.modeling_bert import BertModel from .configuration_retribert import RetriBertConfig logger = logging.get_logger(__name__) # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL # class RetriBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RetriBertConfig load_tf_weights = None base_model_prefix = "retribert" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) RETRIBERT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RetriBertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( """Bert Based model to embed queries or document for document retrieval.""", RETRIBERT_START_DOCSTRING, ) class RetriBertModel(RetriBertPreTrainedModel): def __init__(self, config: RetriBertConfig) -> None: super().__init__(config) self.projection_dim = config.projection_dim self.bert_query = BertModel(config) self.bert_doc = None if config.share_encoders else BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.ce_loss = nn.CrossEntropyLoss(reduction="mean") # Initialize weights and apply final processing self.post_init() def embed_sentences_checkpointed( self, input_ids, attention_mask, sent_encoder, checkpoint_batch_size=-1, ): # reproduces BERT forward pass with checkpointing if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size: return sent_encoder(input_ids, attention_mask=attention_mask)[1] else: # prepare implicit variables device = input_ids.device input_shape = input_ids.size() token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) head_mask = [None] * sent_encoder.config.num_hidden_layers extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask( attention_mask, input_shape ) # define function for checkpointing def partial_encode(*inputs): encoder_outputs = sent_encoder.encoder( inputs[0], attention_mask=inputs[1], head_mask=head_mask, ) sequence_output = encoder_outputs[0] pooled_output = sent_encoder.pooler(sequence_output) return pooled_output # run embedding layer on everything at once embedding_output = sent_encoder.embeddings( input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None ) # run encoding and pooling on one mini-batch at a time pooled_output_list = [] for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)): b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask) pooled_output_list.append(pooled_output) return torch.cat(pooled_output_list, dim=0) def embed_questions( self, input_ids, attention_mask=None, checkpoint_batch_size=-1, ): q_reps = self.embed_sentences_checkpointed( input_ids, attention_mask, self.bert_query, checkpoint_batch_size, ) return self.project_query(q_reps) def embed_answers( self, input_ids, attention_mask=None, checkpoint_batch_size=-1, ): a_reps = self.embed_sentences_checkpointed( input_ids, attention_mask, self.bert_query if self.bert_doc is None else self.bert_doc, checkpoint_batch_size, ) return self.project_doc(a_reps) def forward( self, input_ids_query: torch.LongTensor, attention_mask_query: Optional[torch.FloatTensor], input_ids_doc: torch.LongTensor, attention_mask_doc: Optional[torch.FloatTensor], checkpoint_batch_size: int = -1, ) -> torch.FloatTensor: r""" Args: input_ids_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary for the queries in a batch. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask_query (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) input_ids_doc (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary for the documents in a batch. attention_mask_doc (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on documents padding token indices. checkpoint_batch_size (`int`, *optional*, defaults to `-1`): If greater than 0, uses gradient checkpointing to only compute sequence representation on `checkpoint_batch_size` examples at a time on the GPU. All query representations are still compared to all document representations in the batch. Return: `torch.FloatTensor``: The bidirectional cross-entropy loss obtained while trying to match each query to its corresponding document and each document to its corresponding query in the batch """ device = input_ids_query.device q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size) a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size) compare_scores = torch.mm(q_reps, a_reps.t()) loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device)) loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device)) loss = (loss_qa + loss_aq) / 2 return loss
transformers/src/transformers/models/deprecated/retribert/modeling_retribert.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/retribert/modeling_retribert.py", "repo_id": "transformers", "token_count": 3800 }
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Transformer XL checkpoint and datasets.""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.deprecated.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.deprecated.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 data_utils.Vocab = data_utils.TransfoXLTokenizer data_utils.Corpus = data_utils.TransfoXLCorpus sys.modules["data_utils"] = data_utils sys.modules["vocabulary"] = data_utils def convert_transfo_xl_checkpoint_to_pytorch( tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file ): if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(transfo_xl_dataset_file, "rb") as fp: corpus = pickle.load(fp, encoding="latin1") # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(f"Save vocabulary to {pytorch_vocab_dump_path}") corpus_vocab_dict = corpus.vocab.__dict__ torch.save(corpus_vocab_dict, pytorch_vocab_dump_path) corpus_dict_no_vocab = corpus.__dict__ corpus_dict_no_vocab.pop("vocab", None) pytorch_dataset_dump_path = pytorch_dump_folder_path + "/" + CORPUS_NAME print(f"Save dataset to {pytorch_dataset_dump_path}") torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model config_path = os.path.abspath(transfo_xl_config_file) tf_path = os.path.abspath(tf_checkpoint_path) print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.") # Initialise PyTorch model if transfo_xl_config_file == "": config = TransfoXLConfig() else: config = TransfoXLConfig.from_json_file(transfo_xl_config_file) print(f"Building PyTorch model from configuration: {config}") model = TransfoXLLMHeadModel(config) model = load_tf_weights_in_transfo_xl(model, config, tf_path) # Save pytorch-model pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME) pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME) print(f"Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}") torch.save(model.state_dict(), pytorch_weights_dump_path) print(f"Save configuration file to {os.path.abspath(pytorch_config_dump_path)}") with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.\n" "Given the files are in the pickle format, please be wary of passing it files you trust.", ) args = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
transformers/src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 2023 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DETR checkpoints with native (Transformers) backbone.""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_detr_config(model_name): # initialize config if "resnet-50" in model_name: backbone_config = ResNetConfig.from_pretrained("microsoft/resnet-50") elif "resnet-101" in model_name: backbone_config = ResNetConfig.from_pretrained("microsoft/resnet-101") else: raise ValueError("Model name should include either resnet50 or resnet101") config = DetrConfig(use_timm_backbone=False, backbone_config=backbone_config) # set label attributes is_panoptic = "panoptic" in model_name if is_panoptic: config.num_labels = 250 else: config.num_labels = 91 repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config, is_panoptic def create_rename_keys(config): # here we list all keys to be renamed (original name on the left, our name on the right) rename_keys = [] # stem # fmt: off rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight")) rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight")) rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias")) rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean")) rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var")) # stages for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): # shortcut if layer_idx == 0: rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var", ) ) # 3 convs for i in range(3): rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean", ) ) rename_keys.append( ( f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var", f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var", ) ) # fmt: on for i in range(config.encoder_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) return rename_keys def rename_key(state_dict, old, new): val = state_dict.pop(old) state_dict[new] = val def read_in_q_k_v(state_dict, is_panoptic=False): prefix = "" if is_panoptic: prefix = "detr." # first: transformer encoder for i in range(6): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6): # read in weights + bias of input projection layer of self-attention in_proj_weight = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention in_proj_weight_cross_attn = state_dict.pop( f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" ) in_proj_bias_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias") # next, add query, keys and values (in that order) of cross-attention to the state dict state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :] state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256] state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[256:512, :] state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512] state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :] state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_detr_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): """ Copy/paste/tweak model's weights to our DETR structure. """ # load default config config, is_panoptic = get_detr_config(model_name) # load original model from torch hub model_name_to_original_name = { "detr-resnet-50": "detr_resnet50", "detr-resnet-101": "detr_resnet101", } logger.info(f"Converting model {model_name}...") detr = torch.hub.load("facebookresearch/detr", model_name_to_original_name[model_name], pretrained=True).eval() state_dict = detr.state_dict() # rename keys for src, dest in create_rename_keys(config): if is_panoptic: src = "detr." + src rename_key(state_dict, src, dest) # query, key and value matrices need special treatment read_in_q_k_v(state_dict, is_panoptic=is_panoptic) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them prefix = "detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("detr") and not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor") ): val = state_dict.pop(key) state_dict["detr.model" + key[4:]] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: val = state_dict.pop(key) state_dict["detr." + key] = val elif key.startswith("bbox_attention") or key.startswith("mask_head"): continue else: val = state_dict.pop(key) state_dict[prefix + key] = val else: if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"): val = state_dict.pop(key) state_dict[prefix + key] = val # finally, create HuggingFace model and load state dict model = DetrForSegmentation(config) if is_panoptic else DetrForObjectDetection(config) model.load_state_dict(state_dict) model.eval() # verify our conversion on an image format = "coco_panoptic" if is_panoptic else "coco_detection" processor = DetrImageProcessor(format=format) encoding = processor(images=prepare_img(), return_tensors="pt") pixel_values = encoding["pixel_values"] original_outputs = detr(pixel_values) outputs = model(pixel_values) assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-3) assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-3) if is_panoptic: assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: # Upload model and image processor to the hub logger.info("Uploading PyTorch model and image processor to the hub...") model.push_to_hub(f"nielsr/{model_name}") processor.push_to_hub(f"nielsr/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_name", default="detr-resnet-50", type=str, choices=["detr-resnet-50", "detr-resnet-101"], help="Name of the DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.") args = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/detr/convert_detr_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/detr/convert_detr_to_pytorch.py", "repo_id": "transformers", "token_count": 9079 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DINOv2 checkpoints from the original repository. URL: https://github.com/facebookresearch/dinov2/tree/main """ import argparse import json from pathlib import Path import requests import torch import torch.nn as nn from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, Dinov2Config, Dinov2ForImageClassification, Dinov2Model from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dinov2_config(model_name, image_classifier=False): config = Dinov2Config(image_size=518, patch_size=14) # size of the architecture if "vits" in model_name: config.hidden_size = 384 config.num_attention_heads = 6 elif "vitb" in model_name: pass elif "vitl" in model_name: config.hidden_size = 1024 config.num_hidden_layers = 24 config.num_attention_heads = 16 elif "vitg" in model_name: config.use_swiglu_ffn = True config.hidden_size = 1536 config.num_hidden_layers = 40 config.num_attention_heads = 24 else: raise ValueError("Model not supported") if image_classifier: repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" config.num_labels = 1000 config.id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) config.id2label = {int(k): v for k, v in config.id2label.items()} return config def create_rename_keys(config): rename_keys = [] # fmt: off # patch embedding layer rename_keys.append(("cls_token", "embeddings.cls_token")) rename_keys.append(("mask_token", "embeddings.mask_token")) rename_keys.append(("pos_embed", "embeddings.position_embeddings")) rename_keys.append(("patch_embed.proj.weight", "embeddings.patch_embeddings.projection.weight")) rename_keys.append(("patch_embed.proj.bias", "embeddings.patch_embeddings.projection.bias")) for i in range(config.num_hidden_layers): # layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"encoder.layer.{i}.norm1.weight")) rename_keys.append((f"blocks.{i}.norm1.bias", f"encoder.layer.{i}.norm1.bias")) rename_keys.append((f"blocks.{i}.norm2.weight", f"encoder.layer.{i}.norm2.weight")) rename_keys.append((f"blocks.{i}.norm2.bias", f"encoder.layer.{i}.norm2.bias")) # MLP if config.use_swiglu_ffn: rename_keys.append((f"blocks.{i}.mlp.w12.weight", f"encoder.layer.{i}.mlp.w12.weight")) rename_keys.append((f"blocks.{i}.mlp.w12.bias", f"encoder.layer.{i}.mlp.w12.bias")) rename_keys.append((f"blocks.{i}.mlp.w3.weight", f"encoder.layer.{i}.mlp.w3.weight")) rename_keys.append((f"blocks.{i}.mlp.w3.bias", f"encoder.layer.{i}.mlp.w3.bias")) else: rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"encoder.layer.{i}.mlp.fc1.weight")) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"encoder.layer.{i}.mlp.fc1.bias")) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"encoder.layer.{i}.mlp.fc2.weight")) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"encoder.layer.{i}.mlp.fc2.bias")) # layerscale rename_keys.append((f"blocks.{i}.ls1.gamma", f"encoder.layer.{i}.layer_scale1.lambda1")) rename_keys.append((f"blocks.{i}.ls2.gamma", f"encoder.layer.{i}.layer_scale2.lambda1")) # attention projection layer rename_keys.append((f"blocks.{i}.attn.proj.weight", f"encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"encoder.layer.{i}.attention.output.dense.bias")) # final layernorm rename_keys.append(("norm.weight", "layernorm.weight")) rename_keys.append(("norm.bias", "layernorm.bias")) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :] state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :] state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") return image @torch.no_grad() def convert_dinov2_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our DINOv2 structure. """ # define default Dinov2 configuration image_classifier = "1layer" in model_name config = get_dinov2_config(model_name, image_classifier=image_classifier) # load original model from torch hub original_model = torch.hub.load("facebookresearch/dinov2", model_name.replace("_1layer", "")) original_model.eval() # load state_dict of original model, remove and rename some keys state_dict = original_model.state_dict() rename_keys = create_rename_keys(config) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config) for key, val in state_dict.copy().items(): val = state_dict.pop(key) if "w12" in key: key = key.replace("w12", "weights_in") if "w3" in key: key = key.replace("w3", "weights_out") state_dict[key] = val # load HuggingFace model if image_classifier: model = Dinov2ForImageClassification(config).eval() model.dinov2.load_state_dict(state_dict) model_name_to_classifier_dict_url = { "dinov2_vits14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_linear_head.pth", "dinov2_vitb14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_linear_head.pth", "dinov2_vitl14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_linear_head.pth", "dinov2_vitg14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_linear_head.pth", } url = model_name_to_classifier_dict_url[model_name] classifier_state_dict = torch.hub.load_state_dict_from_url(url, map_location="cpu") model.classifier.weight = nn.Parameter(classifier_state_dict["weight"]) model.classifier.bias = nn.Parameter(classifier_state_dict["bias"]) else: model = Dinov2Model(config).eval() model.load_state_dict(state_dict) # load image image = prepare_img() # preprocess image transformations = transforms.Compose( [ transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=IMAGENET_DEFAULT_MEAN, # these are RGB mean+std values std=IMAGENET_DEFAULT_STD, # across a large photo dataset. ), ] ) original_pixel_values = transformations(image).unsqueeze(0) # insert batch dimension processor = BitImageProcessor( size={"shortest_edge": 256}, resample=PILImageResampling.BICUBIC, image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD, ) pixel_values = processor(image, return_tensors="pt").pixel_values assert torch.allclose(original_pixel_values, pixel_values) with torch.no_grad(): outputs = model(pixel_values, output_hidden_states=True) original_outputs = original_model(pixel_values) # assert values if image_classifier: print("Predicted class:") class_idx = outputs.logits.argmax(-1).item() print(model.config.id2label[class_idx]) else: assert outputs.last_hidden_state[:, 0].shape == original_outputs.shape assert torch.allclose(outputs.last_hidden_state[:, 0], original_outputs, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model_name_to_hf_name = { "dinov2_vits14": "dinov2-small", "dinov2_vitb14": "dinov2-base", "dinov2_vitl14": "dinov2-large", "dinov2_vitg14": "dinov2-giant", "dinov2_vits14_1layer": "dinov2-small-imagenet1k-1-layer", "dinov2_vitb14_1layer": "dinov2-base-imagenet1k-1-layer", "dinov2_vitl14_1layer": "dinov2-large-imagenet1k-1-layer", "dinov2_vitg14_1layer": "dinov2-giant-imagenet1k-1-layer", } name = model_name_to_hf_name[model_name] model.push_to_hub(f"facebook/{name}") processor.push_to_hub(f"facebook/{name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="dinov2_vitb14", type=str, choices=[ "dinov2_vits14", "dinov2_vitb14", "dinov2_vitl14", "dinov2_vitg14", "dinov2_vits14_1layer", "dinov2_vitb14_1layer", "dinov2_vitl14_1layer", "dinov2_vitg14_1layer", ], help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_dinov2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/dinov2/convert_dinov2_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/dinov2/convert_dinov2_to_hf.py", "repo_id": "transformers", "token_count": 5213 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DiT checkpoints from the unilm repository.""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config, has_lm_head=False, is_semantic=False): prefix = "backbone." if is_semantic else "" rename_keys = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append( (f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias")) # projection layer + position embeddings rename_keys.extend( [ (f"{prefix}cls_token", "beit.embeddings.cls_token"), (f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"), (f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"), (f"{prefix}pos_embed", "beit.embeddings.position_embeddings"), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("mask_token", "beit.embeddings.mask_token"), ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) else: # layernorm + classification head rename_keys.extend( [ ("fc_norm.weight", "beit.pooler.layernorm.weight"), ("fc_norm.bias", "beit.pooler.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False): for i in range(config.num_hidden_layers): prefix = "backbone." if is_semantic else "" # queries, keys and values in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight") q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias") v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias") state_dict[f"beit.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[ : config.hidden_size, : ] state_dict[f"beit.encoder.layer.{i}.attention.attention.query.bias"] = q_bias state_dict[f"beit.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"beit.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ -config.hidden_size :, : ] state_dict[f"beit.encoder.layer.{i}.attention.attention.value.bias"] = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1") gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2") state_dict[f"beit.encoder.layer.{i}.lambda_1"] = gamma_1 state_dict[f"beit.encoder.layer.{i}.lambda_2"] = gamma_2 def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_dit_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our BEiT structure. """ # define default BEiT configuration has_lm_head = False if "rvlcdip" in checkpoint_url else True config = BeitConfig(use_absolute_position_embeddings=True, use_mask_token=has_lm_head) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 # labels if "rvlcdip" in checkpoint_url: config.num_labels = 16 repo_id = "huggingface/label-files" filename = "rvlcdip-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} # load state_dict of original model, remove and rename some keys state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"] rename_keys = create_rename_keys(config, has_lm_head=has_lm_head) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head) # load HuggingFace model model = BeitForMaskedImageModeling(config) if has_lm_head else BeitForImageClassification(config) model.eval() model.load_state_dict(state_dict) # Check outputs on an image image_processor = BeitImageProcessor( size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False ) image = prepare_img() encoding = image_processor(images=image, return_tensors="pt") pixel_values = encoding["pixel_values"] outputs = model(pixel_values) logits = outputs.logits # verify logits expected_shape = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(expected_shape), "Shape of logits not as expected" Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: if has_lm_head: model_name = "dit-base" if "base" in checkpoint_url else "dit-large" else: model_name = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip" image_processor.push_to_hub( repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization="nielsr", commit_message="Add image processor", use_temp_dir=True, ) model.push_to_hub( repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization="nielsr", commit_message="Add model", use_temp_dir=True, ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) args = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/dit/convert_dit_unilm_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/dit/convert_dit_unilm_to_pytorch.py", "repo_id": "transformers", "token_count": 4018 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DPT model configuration""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto.configuration_auto import CONFIG_MAPPING from ..bit import BitConfig logger = logging.get_logger(__name__) class DPTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DPTModel`]. It is used to instantiate an DPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DPT [Intel/dpt-large](https://huggingface.co/Intel/dpt-large) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 384): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. is_hybrid (`bool`, *optional*, defaults to `False`): Whether to use a hybrid backbone. Useful in the context of loading DPT-Hybrid models. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. backbone_out_indices (`List[int]`, *optional*, defaults to `[2, 5, 8, 11]`): Indices of the intermediate hidden states to use from backbone. readout_type (`str`, *optional*, defaults to `"project"`): The readout type to use when processing the readout token (CLS token) of the intermediate hidden states of the ViT backbone. Can be one of [`"ignore"`, `"add"`, `"project"`]. - "ignore" simply ignores the CLS token. - "add" passes the information from the CLS token to all other tokens by adding the representations. - "project" passes information to the other tokens by concatenating the readout to all other tokens before projecting the representation to the original feature dimension D using a linear layer followed by a GELU non-linearity. reassemble_factors (`List[int]`, *optional*, defaults to `[4, 2, 1, 0.5]`): The up/downsampling factors of the reassemble layers. neck_hidden_sizes (`List[str]`, *optional*, defaults to `[96, 192, 384, 768]`): The hidden sizes to project to for the feature maps of the backbone. fusion_hidden_size (`int`, *optional*, defaults to 256): The number of channels before fusion. head_in_index (`int`, *optional*, defaults to -1): The index of the features to use in the heads. use_batch_norm_in_fusion_residual (`bool`, *optional*, defaults to `False`): Whether to use batch normalization in the pre-activate residual units of the fusion blocks. use_bias_in_fusion_residual (`bool`, *optional*, defaults to `True`): Whether to use bias in the pre-activate residual units of the fusion blocks. add_projection (`bool`, *optional*, defaults to `False`): Whether to add a projection layer before the depth estimation head. use_auxiliary_head (`bool`, *optional*, defaults to `True`): Whether to use an auxiliary head during training. auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): Weight of the cross-entropy loss of the auxiliary head. semantic_loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function of the semantic segmentation model. semantic_classifier_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the semantic classification head. backbone_featmap_shape (`List[int]`, *optional*, defaults to `[1, 1024, 24, 24]`): Used only for the `hybrid` embedding type. The shape of the feature maps of the backbone. neck_ignore_stages (`List[int]`, *optional*, defaults to `[0, 1]`): Used only for the `hybrid` embedding type. The stages of the readout layers to ignore. backbone_config (`Union[Dict[str, Any], PretrainedConfig]`, *optional*): The configuration of the backbone model. Only used in case `is_hybrid` is `True` or in case you want to leverage the [`AutoBackbone`] API. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, defaults to `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. Example: ```python >>> from transformers import DPTModel, DPTConfig >>> # Initializing a DPT dpt-large style configuration >>> configuration = DPTConfig() >>> # Initializing a model from the dpt-large style configuration >>> model = DPTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "dpt" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=384, patch_size=16, num_channels=3, is_hybrid=False, qkv_bias=True, backbone_out_indices=[2, 5, 8, 11], readout_type="project", reassemble_factors=[4, 2, 1, 0.5], neck_hidden_sizes=[96, 192, 384, 768], fusion_hidden_size=256, head_in_index=-1, use_batch_norm_in_fusion_residual=False, use_bias_in_fusion_residual=None, add_projection=False, use_auxiliary_head=True, auxiliary_loss_weight=0.4, semantic_loss_ignore_index=255, semantic_classifier_dropout=0.1, backbone_featmap_shape=[1, 1024, 24, 24], neck_ignore_stages=[0, 1], backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.is_hybrid = is_hybrid use_autobackbone = False if self.is_hybrid: if backbone_config is None: backbone_config = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } if isinstance(backbone_config, dict): logger.info("Initializing the config with a `BiT` backbone.") backbone_config = BitConfig(**backbone_config) elif isinstance(backbone_config, PretrainedConfig): backbone_config = backbone_config else: raise ValueError( f"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." ) self.backbone_config = backbone_config self.backbone_featmap_shape = backbone_featmap_shape self.neck_ignore_stages = neck_ignore_stages if readout_type != "project": raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode.") elif backbone is not None or backbone_config is not None: use_autobackbone = True if isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) self.backbone_config = backbone_config self.backbone_featmap_shape = None self.neck_ignore_stages = [] # We only use load_backbone when config.is_hydrid is False verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) else: self.backbone_config = None self.backbone_featmap_shape = None self.neck_ignore_stages = [] self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs # ViT parameters used if not using a hybrid backbone self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.use_autobackbone = use_autobackbone self.backbone_out_indices = None if use_autobackbone else backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']") self.hidden_act = hidden_act self.initializer_range = initializer_range self.readout_type = readout_type self.reassemble_factors = reassemble_factors self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size self.head_in_index = head_in_index self.use_batch_norm_in_fusion_residual = use_batch_norm_in_fusion_residual self.use_bias_in_fusion_residual = use_bias_in_fusion_residual self.add_projection = add_projection # auxiliary head attributes (semantic segmentation) self.use_auxiliary_head = use_auxiliary_head self.auxiliary_loss_weight = auxiliary_loss_weight self.semantic_loss_ignore_index = semantic_loss_ignore_index self.semantic_classifier_dropout = semantic_classifier_dropout def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) if output["backbone_config"] is not None: output["backbone_config"] = self.backbone_config.to_dict() output["model_type"] = self.__class__.model_type return output __all__ = ["DPTConfig"]
transformers/src/transformers/models/dpt/configuration_dpt.py/0
{ "file_path": "transformers/src/transformers/models/dpt/configuration_dpt.py", "repo_id": "transformers", "token_count": 5635 }
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ELECTRA checkpoint.""" import argparse import torch from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, discriminator_or_generator): # Initialise PyTorch model config = ElectraConfig.from_json_file(config_file) print(f"Building PyTorch model from configuration: {config}") if discriminator_or_generator == "discriminator": model = ElectraForPreTraining(config) elif discriminator_or_generator == "generator": model = ElectraForMaskedLM(config) else: raise ValueError("The discriminator_or_generator argument should be either 'discriminator' or 'generator'") # Load weights from tf checkpoint load_tf_weights_in_electra( model, config, tf_checkpoint_path, discriminator_or_generator=discriminator_or_generator ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--discriminator_or_generator", default=None, type=str, required=True, help=( "Whether to export the generator or the discriminator. Should be a string, either 'discriminator' or " "'generator'." ), ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.discriminator_or_generator )
transformers/src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 1018 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for EnCodec.""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging logger = logging.get_logger(__name__) class EncodecFeatureExtractor(SequenceFeatureExtractor): r""" Constructs an EnCodec feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Instantiating a feature extractor with the defaults will yield a similar configuration to that of the [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. Use 1 for mono, 2 for stereo. sampling_rate (`int`, *optional*, defaults to 24000): The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values. chunk_length_s (`float`, *optional*): If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded. overlap (`float`, *optional*): Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following formulae : `int((1.0 - self.overlap) * self.chunk_length)`. """ model_input_names = ["input_values", "padding_mask"] def __init__( self, feature_size: int = 1, sampling_rate: int = 24000, padding_value: float = 0.0, chunk_length_s: float = None, overlap: float = None, **kwargs, ): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.chunk_length_s = chunk_length_s self.overlap = overlap # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_length(self) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate) # This is a property because you might want to change the chunk_length_s on the fly @property def chunk_stride(self) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length)) def __call__( self, raw_audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Optional[Union[bool, str, PaddingStrategy]] = None, truncation: Optional[bool] = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio (`feature_size = 2`). padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, *optional*, defaults to `False`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one.") elif padding is None: # by default let's pad the inputs padding = True is_batched = bool( isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list))) ) if is_batched: raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio] elif not is_batched and not isinstance(raw_audio, np.ndarray): raw_audio = np.asarray(raw_audio, dtype=np.float32) elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64): raw_audio = raw_audio.astype(np.float32) # always return batch if not is_batched: raw_audio = [np.asarray(raw_audio).T] # verify inputs are valid for idx, example in enumerate(raw_audio): if example.ndim > 2: raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}") if self.feature_size == 1 and example.ndim != 1: raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels") if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels") padded_inputs = None input_values = BatchFeature({"input_values": raw_audio}) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: max_length = min(array.shape[0] for array in raw_audio) nb_step = int(np.floor(max_length / self.chunk_stride)) max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: max_length = max(array.shape[0] for array in raw_audio) nb_step = int(np.ceil(max_length / self.chunk_stride)) max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length padding = "max_length" else: padded_inputs = input_values # normal padding on batch if padded_inputs is None: padded_inputs = self.pad( input_values, max_length=max_length, truncation=truncation, padding=padding, return_attention_mask=padding, ) if padding: padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask") input_values = [] for example in padded_inputs.pop("input_values"): if self.feature_size == 1: example = example[..., None] input_values.append(example.T) padded_inputs["input_values"] = input_values if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs __all__ = ["EncodecFeatureExtractor"]
transformers/src/transformers/models/encodec/feature_extraction_encodec.py/0
{ "file_path": "transformers/src/transformers/models/encodec/feature_extraction_encodec.py", "repo_id": "transformers", "token_count": 4087 }
# coding=utf-8 # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def count_parameters(state_dict): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items()) def upgrade_state_dict(state_dict, codebook_state_dict): upgrade = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue key = key.replace("heads.cmd.mim_head.cls.predictions", "mmm_image_head") key = key.replace("heads.cmd.mlm_head.cls.predictions", "mmm_text_head") key = key.replace("heads.cmd.itm_head.cls", "itm_head") key = key.replace("heads.cmd.itm_head.pooler", "itm_head.pooler") key = key.replace("heads.cmd.clip_head.logit_scale", "flava.logit_scale") key = key.replace("heads.fairseq_mlm.cls.predictions", "mlm_head") key = key.replace("heads.imagenet.mim_head.cls.predictions", "mim_head") key = key.replace("mm_text_projection", "flava.text_to_mm_projection") key = key.replace("mm_image_projection", "flava.image_to_mm_projection") key = key.replace("image_encoder.module", "flava.image_model") key = key.replace("text_encoder.module", "flava.text_model") key = key.replace("mm_encoder.module.encoder.cls_token", "flava.multimodal_model.cls_token") key = key.replace("mm_encoder.module", "flava.multimodal_model") key = key.replace("text_projection", "flava.text_projection") key = key.replace("image_projection", "flava.image_projection") upgrade[key] = value.float() for key, value in codebook_state_dict.items(): upgrade[f"image_codebook.{key}"] = value return upgrade @torch.no_grad() def convert_flava_checkpoint(checkpoint_path, codebook_path, pytorch_dump_folder_path, config_path=None): """ Copy/paste/tweak model's weights to transformers design. """ if config_path is not None: config = FlavaConfig.from_pretrained(config_path) else: config = FlavaConfig() hf_model = FlavaForPreTraining(config).eval() codebook_state_dict = convert_dalle_checkpoint(codebook_path, None, save_checkpoint=False) if os.path.exists(checkpoint_path): state_dict = torch.load(checkpoint_path, map_location="cpu") else: state_dict = torch.hub.load_state_dict_from_url(checkpoint_path, map_location="cpu") hf_state_dict = upgrade_state_dict(state_dict, codebook_state_dict) hf_model.load_state_dict(hf_state_dict) hf_state_dict = hf_model.state_dict() hf_count = count_parameters(hf_state_dict) state_dict_count = count_parameters(state_dict) + count_parameters(codebook_state_dict) assert torch.allclose(hf_count, state_dict_count, atol=1e-3) hf_model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") args = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
transformers/src/transformers/models/flava/convert_flava_original_pytorch_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/flava/convert_flava_original_pytorch_to_hf.py", "repo_id": "transformers", "token_count": 1622 }
# coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """FSMT configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class DecoderConfig(PretrainedConfig): r""" Configuration class for FSMT's decoder specific things. note: this is a private helper class """ model_type = "fsmt_decoder" def __init__(self, vocab_size=0, bos_token_id=0): super().__init__() self.vocab_size = vocab_size self.bos_token_id = bos_token_id class FSMTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FSMTModel`]. It is used to instantiate a FSMT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FSMT [facebook/wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: langs (`List[str]`): A list with source language and target_language (e.g., ['en', 'ru']). src_vocab_size (`int`): Vocabulary size of the encoder. Defines the number of different tokens that can be represented by the `inputs_ids` passed to the forward method in the encoder. tgt_vocab_size (`int`): Vocabulary size of the decoder. Defines the number of different tokens that can be represented by the `inputs_ids` passed to the forward method in the decoder. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `Callable`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (`bool`, *optional*, defaults to `True`): Scale embeddings by diving by sqrt(d_model). bos_token_id (`int`, *optional*, defaults to 0) Beginning of stream token id. pad_token_id (`int`, *optional*, defaults to 1) Padding token id. eos_token_id (`int`, *optional*, defaults to 2) End of stream token id. decoder_start_token_id (`int`, *optional*): This model starts decoding with `eos_token_id` encoder_layerdrop (`float`, *optional*, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. decoder_layerdrop (`float`, *optional*, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether this is an encoder/decoder model. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie input and output embeddings. num_beams (`int`, *optional*, defaults to 5) Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means no beam search. length_penalty (`float`, *optional*, defaults to 1) Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences. early_stopping (`bool`, *optional*, defaults to `False`) Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Examples: ```python >>> from transformers import FSMTConfig, FSMTModel >>> # Initializing a FSMT facebook/wmt19-en-ru style configuration >>> config = FSMTConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = FSMTModel(config) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "fsmt" attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} # update the defaults from config file def __init__( self, langs=["en", "de"], src_vocab_size=42024, tgt_vocab_size=42024, activation_function="relu", d_model=1024, max_length=200, max_position_embeddings=1024, encoder_ffn_dim=4096, encoder_layers=12, encoder_attention_heads=16, encoder_layerdrop=0.0, decoder_ffn_dim=4096, decoder_layers=12, decoder_attention_heads=16, decoder_layerdrop=0.0, attention_dropout=0.0, dropout=0.1, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, is_encoder_decoder=True, scale_embedding=True, tie_word_embeddings=False, num_beams=5, length_penalty=1.0, early_stopping=False, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, forced_eos_token_id=2, **common_kwargs, ): self.langs = langs self.src_vocab_size = src_vocab_size self.tgt_vocab_size = tgt_vocab_size self.d_model = d_model # encoder_embed_dim and decoder_embed_dim self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = self.num_hidden_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std # Normal(0, this parameter) self.activation_function = activation_function self.decoder = DecoderConfig(vocab_size=tgt_vocab_size, bos_token_id=eos_token_id) if "decoder" in common_kwargs: del common_kwargs["decoder"] self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True # 3 Types of Dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout self.use_cache = use_cache super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, is_encoder_decoder=is_encoder_decoder, tie_word_embeddings=tie_word_embeddings, forced_eos_token_id=forced_eos_token_id, max_length=max_length, num_beams=num_beams, length_penalty=length_penalty, early_stopping=early_stopping, **common_kwargs, ) __all__ = ["FSMTConfig"]
transformers/src/transformers/models/fsmt/configuration_fsmt.py/0
{ "file_path": "transformers/src/transformers/models/fsmt/configuration_fsmt.py", "repo_id": "transformers", "token_count": 4021 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for GIT """ import re from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_utils import ImageInput from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack, _validate_images_text_input_order from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...utils import is_torch_available, logging, requires_backends if is_torch_available(): from .image_processing_fuyu import FuyuBatchFeature logger = logging.get_logger(__name__) if is_torch_available(): import torch TEXT_REPR_BBOX_OPEN = "<box>" TEXT_REPR_BBOX_CLOSE = "</box>" TEXT_REPR_POINT_OPEN = "<point>" TEXT_REPR_POINT_CLOSE = "</point>" TOKEN_BBOX_OPEN_STRING = "<0x00>" # <bbox> TOKEN_BBOX_CLOSE_STRING = "<0x01>" # </bbox> TOKEN_POINT_OPEN_STRING = "<0x02>" # <point> TOKEN_POINT_CLOSE_STRING = "<0x03>" # </point> BEGINNING_OF_ANSWER_STRING = "<0x04>" # <boa> class FuyuProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "stride": 0, "return_attention_mask": True, "return_overflowing_tokens": False, "return_special_tokens_mask": False, "return_offsets_mapping": False, "return_token_type_ids": False, "return_length": False, "verbose": True, }, "images_kwargs": {}, } def full_unpacked_stream_to_tensor( all_bi_tokens_to_place: List[int], full_unpacked_stream: List["torch.Tensor"], fill_value: int, batch_size: int, new_seq_len: int, offset: int, ) -> "torch.Tensor": """Takes an unpacked stream of tokens (i.e. a list of tensors, one for each item in the batch) and does the required padding to create a single tensor for the batch of shape batch_size x new_seq_len. """ assert len(all_bi_tokens_to_place) == batch_size assert len(full_unpacked_stream) == batch_size # Create padded tensors for the full batch. new_padded_tensor = torch.full( [batch_size, new_seq_len], fill_value=fill_value, dtype=full_unpacked_stream[0].dtype, device=full_unpacked_stream[0].device, ) # Place each batch entry into the batch tensor. for bi in range(batch_size): tokens_to_place = all_bi_tokens_to_place[bi] new_padded_tensor[bi, :tokens_to_place] = full_unpacked_stream[bi][offset : tokens_to_place + offset] return new_padded_tensor def construct_full_unpacked_stream( num_real_text_tokens: Union[List[List[int]], "torch.Tensor"], input_stream: "torch.Tensor", image_tokens: List[List["torch.Tensor"]], batch_size: int, num_sub_sequences: int, ) -> List["torch.Tensor"]: """Takes an input_stream tensor of shape B x S x ?. For each subsequence, adds any required padding to account for images and then unpacks the subsequences to create a single sequence per item in the batch. Returns a list of tensors, one for each item in the batch.""" all_bi_stream = [] for batch_index in range(batch_size): all_si_stream = [] # First, construct full token stream (including image placeholder tokens) and loss mask for each subsequence # and append to lists. We use lists rather than tensors because each subsequence is variable-sized. # TODO Remove this logic in a subsequent release since subsequences are not supported. image_adjustment = image_tokens[batch_index][0] subsequence_stream = torch.cat([image_adjustment, input_stream[batch_index, 0]], dim=0) num_real_tokens = image_adjustment.shape[0] + num_real_text_tokens[batch_index][0] all_si_stream.append(subsequence_stream[:num_real_tokens]) all_bi_stream.append(torch.cat(all_si_stream, dim=0)) return all_bi_stream def _replace_string_repr_with_token_tags(prompt: str) -> str: prompt = prompt.replace(TEXT_REPR_POINT_OPEN, TOKEN_POINT_OPEN_STRING) prompt = prompt.replace(TEXT_REPR_POINT_CLOSE, TOKEN_POINT_CLOSE_STRING) prompt = prompt.replace(TEXT_REPR_BBOX_OPEN, TOKEN_BBOX_OPEN_STRING) prompt = prompt.replace(TEXT_REPR_BBOX_CLOSE, TOKEN_BBOX_CLOSE_STRING) return prompt def _segment_prompt_into_text_token_conversions(prompt: str) -> List: """ Given a string prompt, converts the prompt into a list of TextTokenConversions. """ # Wherever, we notice the [TOKEN_OPEN_STRING, TOKEN_CLOSE_STRING], we split the prompt prompt_text_list: List = [] regex_pattern = re.compile( f"({TOKEN_BBOX_OPEN_STRING}|{TOKEN_BBOX_CLOSE_STRING}|{TOKEN_POINT_OPEN_STRING}|{TOKEN_POINT_CLOSE_STRING})" ) # Split by the regex pattern prompt_split = regex_pattern.split(prompt) for i, elem in enumerate(prompt_split): if len(elem) == 0 or elem in [ TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING, ]: continue prompt_text_list.append( (elem, i > 1 and prompt_split[i - 1] in [TOKEN_BBOX_OPEN_STRING, TOKEN_POINT_OPEN_STRING]) ) return prompt_text_list def _transform_coordinates_and_tokenize(prompt: str, scale_factor: float, tokenizer) -> List[int]: """ This function transforms the prompt in the following fashion: - <box> <point> and </box> </point> to their respective token mappings - extract the coordinates from the tag - transform the coordinates into the transformed image space - return the prompt tokens with the transformed coordinates and new tags Bounding boxes and points MUST be in the following format: <box>y1, x1, y2, x2</box> <point>x, y</point> The spaces and punctuation added above are NOT optional. """ # Make a namedtuple that stores "text" and "is_bbox" # We want to do the following: Tokenize the code normally -> when we see a point or box, tokenize using the tokenize_within_tag function # When point or box close tag, continue tokenizing normally # First, we replace the point and box tags with their respective tokens prompt = _replace_string_repr_with_token_tags(prompt) # Tokenize the prompt # Convert prompt into a list split prompt_text_list = _segment_prompt_into_text_token_conversions(prompt) transformed_prompt_tokens: List[int] = [] for elem in prompt_text_list: if elem[1]: # This is a location, we need to tokenize it within_tag_tokenized = _transform_within_tags(elem[0], scale_factor, tokenizer) # Surround the text with the open and close tags transformed_prompt_tokens.extend(within_tag_tokenized) else: transformed_prompt_tokens.extend(tokenizer(elem[0], add_special_tokens=False).input_ids) return transformed_prompt_tokens def _transform_within_tags(text: str, scale_factor: float, tokenizer) -> List[int]: """ Given a bounding box of the fashion <box>1, 2, 3, 4</box> | <point>1, 2</point> This function is responsible for converting 1, 2, 3, 4 into tokens of 1 2 3 4 without any commas. """ # Convert the text into a list of strings. num_int_strs = text.split(",") if len(num_int_strs) == 2: # If there are any open or close tags, remove them. token_space_open_string = tokenizer.vocab[TOKEN_POINT_OPEN_STRING] token_space_close_string = tokenizer.vocab[TOKEN_POINT_CLOSE_STRING] else: token_space_open_string = tokenizer.vocab[TOKEN_BBOX_OPEN_STRING] token_space_close_string = tokenizer.vocab[TOKEN_BBOX_CLOSE_STRING] # Remove all spaces from num_ints num_ints = [float(num.strip()) for num in num_int_strs] # scale to transformed image siz if len(num_ints) == 2: num_ints_translated = scale_point_to_transformed_image(x=num_ints[0], y=num_ints[1], scale_factor=scale_factor) elif len(num_ints) == 4: num_ints_translated = scale_bbox_to_transformed_image( top=num_ints[0], left=num_ints[1], bottom=num_ints[2], right=num_ints[3], scale_factor=scale_factor, ) else: raise ValueError(f"Invalid number of ints: {len(num_ints)}") # Tokenize the text, skipping the tokens = [tokenizer.vocab[str(num)] for num in num_ints_translated] return [token_space_open_string] + tokens + [token_space_close_string] def _tokenize_prompts_with_image_and_batch( tokenizer, prompts: List[List[str]], scale_factors: Optional[List[List["torch.Tensor"]]], max_tokens_to_generate: int, max_position_embeddings: int, add_BOS: bool, # Same issue with types as above add_beginning_of_answer_token: bool, ) -> Tuple["torch.Tensor", "torch.Tensor"]: """ Given a set of prompts and number of tokens to generate: - tokenize prompts - set the sequence length to be the max of length of prompts plus the number of tokens we would like to generate - pad all the sequences to this length so we can convert them into a 3D tensor. """ # If not tool use, tranform the coordinates while tokenizing if scale_factors is not None: transformed_prompt_tokens = [] for prompt_seq, scale_factor_seq in zip(prompts, scale_factors): transformed_prompt_tokens.append( [ _transform_coordinates_and_tokenize(prompt, scale_factor.item(), tokenizer) for prompt, scale_factor in zip(prompt_seq, scale_factor_seq) ] ) else: transformed_prompt_tokens = [[tokenizer.tokenize(prompt) for prompt in prompt_seq] for prompt_seq in prompts] prompts_tokens = transformed_prompt_tokens if add_BOS: bos_token = tokenizer.vocab["<s>"] else: bos_token = tokenizer.vocab["|ENDOFTEXT|"] prompts_tokens = [[[bos_token] + x for x in prompt_seq] for prompt_seq in prompts_tokens] if add_beginning_of_answer_token: beginning_of_answer = tokenizer.vocab[BEGINNING_OF_ANSWER_STRING] # Only add bbox open token to the last subsequence since that is what will be completed for token_seq in prompts_tokens: token_seq[-1].append(beginning_of_answer) # Now we have a list of list of tokens which each list has a different # size. We want to extend this list to: # - incorporate the tokens that need to be generated # - make all the sequences equal length. # Get the prompts length. prompts_length = [[len(x) for x in prompts_tokens_seq] for prompts_tokens_seq in prompts_tokens] # Get the max prompts length. max_prompt_len: int = np.max(prompts_length) # Number of tokens in the each sample of the batch. samples_length = min(max_prompt_len + max_tokens_to_generate, max_position_embeddings) if max_prompt_len + max_tokens_to_generate > max_position_embeddings: logger.warning( f"Max subsequence prompt length of {max_prompt_len} + max tokens to generate {max_tokens_to_generate}", f"exceeds context length of {max_position_embeddings}. Will generate as many tokens as possible.", ) # Now update the list of list to be of the same size: samples_length. for prompt_tokens_seq, prompts_length_seq in zip(prompts_tokens, prompts_length): for prompt_tokens, prompt_length in zip(prompt_tokens_seq, prompts_length_seq): if len(prompt_tokens) > samples_length: raise ValueError("Length of subsequence prompt exceeds sequence length.") padding_size = samples_length - prompt_length prompt_tokens.extend([tokenizer.vocab["|ENDOFTEXT|"]] * padding_size) # Now we are in a structured format, we can convert to tensors. prompts_tokens_tensor = torch.tensor(prompts_tokens, dtype=torch.int64) prompts_length_tensor = torch.tensor(prompts_length, dtype=torch.int64) return prompts_tokens_tensor, prompts_length_tensor # Simplified assuming self.crop_top = self.padding_top = 0 def original_to_transformed_h_coords(original_coords, scale_h): return np.round(original_coords * scale_h).astype(np.int32) # Simplified assuming self.crop_left = self.padding_left = 0 def original_to_transformed_w_coords(original_coords, scale_w): return np.round(original_coords * scale_w).astype(np.int32) def scale_point_to_transformed_image(x: float, y: float, scale_factor: float) -> List[int]: x_scaled = original_to_transformed_w_coords(np.array([x / 2]), scale_factor)[0] y_scaled = original_to_transformed_h_coords(np.array([y / 2]), scale_factor)[0] return [x_scaled, y_scaled] def scale_bbox_to_transformed_image( top: float, left: float, bottom: float, right: float, scale_factor: float ) -> List[int]: top_scaled = original_to_transformed_w_coords(np.array([top / 2]), scale_factor)[0] left_scaled = original_to_transformed_h_coords(np.array([left / 2]), scale_factor)[0] bottom_scaled = original_to_transformed_w_coords(np.array([bottom / 2]), scale_factor)[0] right_scaled = original_to_transformed_h_coords(np.array([right / 2]), scale_factor)[0] return [top_scaled, left_scaled, bottom_scaled, right_scaled] class FuyuProcessor(ProcessorMixin): r""" Constructs a Fuyu processor which wraps a Fuyu image processor and a Llama tokenizer into a single processor. [`FuyuProcessor`] offers all the functionalities of [`FuyuImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~FuyuProcessor.__call__`] and [`~FuyuProcessor.decode`] for more information. Args: image_processor ([`FuyuImageProcessor`]): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`]): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] valid_kwargs = [] image_processor_class = "FuyuImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor, tokenizer, **kwargs): super().__init__(image_processor=image_processor, tokenizer=tokenizer) self.image_processor = image_processor self.tokenizer = tokenizer self.max_tokens_to_generate = 10 self.max_position_embeddings = 16384 # TODO Can't derive this from model files: where to set it? self.pad_token_id = 0 self.dummy_image_index = -1 def _left_pad_inputs_with_attention_mask(self, model_inputs: List[Dict], return_attention_mask: bool): max_length_input_ids = max(entry["input_ids"].shape[1] for entry in model_inputs) max_length_image_patch_indices = max(entry["image_patches_indices"].shape[1] for entry in model_inputs) batched_inputs = {"input_ids": [], "image_patches": [], "image_patches_indices": [], "attention_mask": []} for entry in model_inputs: for key, tensor in entry.items(): if key == "input_ids": num_padding_tokens = max_length_input_ids - tensor.shape[1] padded_input_ids = torch.cat( [ torch.full((tensor.shape[0], num_padding_tokens), self.pad_token_id, dtype=torch.long), tensor, ], dim=1, ) batched_inputs[key].append(padded_input_ids) attention_mask = torch.cat( [torch.zeros(tensor.shape[0], num_padding_tokens, dtype=torch.long), torch.ones_like(tensor)], dim=1, ) batched_inputs["attention_mask"].append(attention_mask) elif key == "image_patches": # For image_patches, we don't pad but just append them to the list. batched_inputs[key].append(tensor) else: # for image_patches_indices num_padding_indices = max_length_image_patch_indices - tensor.shape[1] padded_indices = torch.cat( [ torch.full( (tensor.shape[0], num_padding_indices), self.dummy_image_index, dtype=torch.long ), tensor, ], dim=1, ) batched_inputs[key].append(padded_indices) batched_keys = ["input_ids", "image_patches_indices"] if return_attention_mask: batched_keys.append("attention_mask") for key in batched_keys: batched_inputs[key] = torch.cat(batched_inputs[key], dim=0) return batched_inputs def get_sample_encoding( self, prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, image_placeholder_id, image_newline_id, tensor_batch_images, ): image_present = torch.ones(1, 1, 1) model_image_input = self.image_processor.preprocess_with_tokenizer_info( image_input=tensor_batch_images, image_present=image_present, image_unpadded_h=image_unpadded_heights, image_unpadded_w=image_unpadded_widths, image_placeholder_id=image_placeholder_id, image_newline_id=image_newline_id, variable_sized=True, ) # FIXME max_tokens_to_generate is embedded into this processor's call. prompt_tokens, prompts_length = _tokenize_prompts_with_image_and_batch( tokenizer=self.tokenizer, prompts=prompts, scale_factors=scale_factors, max_tokens_to_generate=self.max_tokens_to_generate, max_position_embeddings=self.max_position_embeddings, add_BOS=True, add_beginning_of_answer_token=True, ) image_padded_unpacked_tokens = construct_full_unpacked_stream( num_real_text_tokens=prompts_length, input_stream=prompt_tokens, image_tokens=model_image_input["image_input_ids"], batch_size=1, num_sub_sequences=self.subsequence_length, ) # Construct inputs for image patch indices. unpacked_image_patch_indices_per_batch = construct_full_unpacked_stream( num_real_text_tokens=prompts_length, input_stream=torch.full_like(prompt_tokens, -1), image_tokens=model_image_input["image_patch_indices_per_batch"], batch_size=1, num_sub_sequences=self.subsequence_length, ) max_prompt_length = max(x.shape[-1] for x in image_padded_unpacked_tokens) max_seq_len_batch = min(max_prompt_length + self.max_tokens_to_generate, self.max_position_embeddings) tokens_to_place = min(max_seq_len_batch, max(0, image_padded_unpacked_tokens[0].shape[0])) # Use same packing logic for the image patch indices. image_patch_input_indices = full_unpacked_stream_to_tensor( all_bi_tokens_to_place=[tokens_to_place], full_unpacked_stream=unpacked_image_patch_indices_per_batch, fill_value=-1, batch_size=1, new_seq_len=max_seq_len_batch, offset=0, ) image_patches_tensor = torch.stack([img[0] for img in model_image_input["image_patches"]]) batch_encoding = { "input_ids": image_padded_unpacked_tokens[0].unsqueeze(0), "image_patches": image_patches_tensor, "image_patches_indices": image_patch_input_indices, } return batch_encoding def __call__( self, images: ImageInput = None, text: Optional[Union[str, List[str], TextInput, PreTokenizedInput]] = None, audio=None, videos=None, **kwargs: Unpack[FuyuProcessorKwargs], ) -> "FuyuBatchFeature": """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to FuyuImageProcessor's [`~FuyuImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: images (`PIL.Image.Image`, `List[PIL.Image.Image]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `List[str]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). Returns: [`FuyuBatchEncoding`]: A [`FuyuBatchEncoding`] with the following fields: - **input_ids** -- Tensor of token ids to be fed to a model. Returned when `text` is not `None`. - **image_patches** -- List of Tensor of image patches. Returned when `images` is not `None`. - **image_patches_indices** -- Tensor of indices where patch embeddings have to be inserted by the model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model when `return_attention_mask=True`. """ requires_backends(self, ["torch"]) # --- Check input validity --- if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be None.") # check if images and text inputs are reversed for BC images, text = _validate_images_text_input_order(images, text) output_kwargs = self._merge_kwargs( FuyuProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if not output_kwargs["text_kwargs"].setdefault("return_attention_mask", True): raise ValueError("`return_attention_mask=False` is not supported for this model.") if text is not None and images is None: logger.warning("You are processing a text with no associated image. Make sure it is intended.") self.current_processor = self.tokenizer text_encoding = self.tokenizer(text, **output_kwargs["text_kwargs"]) return text_encoding if text is None and images is not None: logger.warning("You are processing an image with no associated text. Make sure it is intended.") prompts = [[""]] if text is not None and images is not None: if isinstance(text, str): prompts = [[text]] elif isinstance(text, list): prompts = [[text_seq] for text_seq in text] # --- Preprocess images using self.image_processor --- # FIXME - We hard code "pt" here because the rest of the processing assumes torch tensors output_kwargs["images_kwargs"]["return_tensors"] = "pt" image_encoding = self.image_processor.preprocess(images, **output_kwargs["images_kwargs"]) batch_images = image_encoding["images"] image_unpadded_heights = image_encoding["image_unpadded_heights"] image_unpadded_widths = image_encoding["image_unpadded_widths"] scale_factors = image_encoding["image_scale_factors"] self.subsequence_length = 1 # Each batch contains only one sequence. self.batch_size = len(batch_images) # --- Use self.tokenizer to get the ids of special tokens to insert into image ids --- image_placeholder_id = self.tokenizer("|SPEAKER|", add_special_tokens=False)["input_ids"][1] image_newline_id = self.tokenizer("|NEWLINE|", add_special_tokens=False)["input_ids"][1] tensor_batch_images = torch.stack([img[0] for img in batch_images]).unsqueeze(1) # --- Use self.image_processor again to obtain the full token ids and batch inputs --- all_encodings = [] for prompt, scale_factor, image_unpadded_height, image_unpadded_width, tensor_batch_image in zip( prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, tensor_batch_images ): sample_encoding = self.get_sample_encoding( prompts=[prompt], scale_factors=[scale_factor], image_unpadded_heights=torch.tensor([image_unpadded_height]), image_unpadded_widths=torch.tensor([image_unpadded_width]), image_placeholder_id=image_placeholder_id, image_newline_id=image_newline_id, tensor_batch_images=tensor_batch_image.unsqueeze(0), ) all_encodings.append(sample_encoding) batch_encoding = self._left_pad_inputs_with_attention_mask( model_inputs=all_encodings, return_attention_mask=True ) return FuyuBatchFeature(data=batch_encoding) def post_process_box_coordinates(self, outputs, target_sizes=None): """ Transforms raw coordinates detected by [`FuyuForCausalLM`] to the original images' coordinate space. Coordinates will be returned in "box" format, with the following pattern: `<box>top, left, bottom, right</box>` Point coordinates are not supported yet. Args: outputs ([`GenerateOutput`]): Raw outputs from `generate`. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, found coordinates in the output sequence are rescaled to the target sizes. If left to None, coordinates will not be rescaled. Returns: `GenerateOutput`: Same output type returned by `generate`, with output token ids replaced with boxed and possible rescaled coordinates. """ def scale_factor_to_fit(original_size, target_size=None): height, width = original_size if target_size is None: max_height = self.image_processor.size["height"] max_width = self.image_processor.size["width"] else: max_height, max_width = target_size if width <= max_width and height <= max_height: return 1.0 return min(max_height / height, max_width / width) def find_delimiters_pair(tokens, start_token, end_token): start_id = self.tokenizer.convert_tokens_to_ids(start_token) end_id = self.tokenizer.convert_tokens_to_ids(end_token) starting_positions = (tokens == start_id).nonzero(as_tuple=True)[0] ending_positions = (tokens == end_id).nonzero(as_tuple=True)[0] if torch.any(starting_positions) and torch.any(ending_positions): return (starting_positions[0], ending_positions[0]) return (None, None) def tokens_to_boxes(tokens, original_size): while (pair := find_delimiters_pair(tokens, TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING)) != ( None, None, ): start, end = pair if end != start + 5: continue # Retrieve transformed coordinates from tokens coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end]) # Scale back to original image size and multiply by 2 scale = scale_factor_to_fit(original_size) top, left, bottom, right = [2 * int(float(c) / scale) for c in coords] # Replace the IDs so they get detokenized right replacement = f" {TEXT_REPR_BBOX_OPEN}{top}, {left}, {bottom}, {right}{TEXT_REPR_BBOX_CLOSE}" replacement = self.tokenizer.tokenize(replacement)[1:] replacement = self.tokenizer.convert_tokens_to_ids(replacement) replacement = torch.tensor(replacement).to(tokens) tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0) return tokens def tokens_to_points(tokens, original_size): while (pair := find_delimiters_pair(tokens, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING)) != ( None, None, ): start, end = pair if end != start + 3: continue # Retrieve transformed coordinates from tokens coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end]) # Scale back to original image size and multiply by 2 scale = scale_factor_to_fit(original_size) x, y = [2 * int(float(c) / scale) for c in coords] # Replace the IDs so they get detokenized right replacement = f" {TEXT_REPR_POINT_OPEN}{x}, {y}{TEXT_REPR_POINT_CLOSE}" replacement = self.tokenizer.tokenize(replacement)[1:] replacement = self.tokenizer.convert_tokens_to_ids(replacement) replacement = torch.tensor(replacement).to(tokens) tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0) return tokens if target_sizes is None: target_sizes = ((self.image_processor.size["height"], self.image_processor.size["width"]),) * len(outputs) elif target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") if len(outputs) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as output sequences") results = [] for seq, size in zip(outputs, target_sizes): seq = tokens_to_boxes(seq, size) seq = tokens_to_points(seq, size) results.append(seq) return results def post_process_image_text_to_text(self, generated_outputs): """ Post-processes the output of `FuyuForConditionalGeneration` to only return the text output. Args: generated_outputs (`torch.Tensor` or `np.ndarray`): The output of the model. The output is expected to be a tensor of shape `(batch_size, sequence_length)` containing the token ids of the generated sequences. Returns: `List[str]`: The decoded text output. """ beginning_of_answer = self.tokenizer.convert_tokens_to_ids(BEGINNING_OF_ANSWER_STRING) # get boa index for each outputted sequence tensor # start all generated sequences from the beginning of the answer token, pad to have consistent length unpadded_output_sequences = [ seq[(seq == beginning_of_answer).nonzero(as_tuple=True)[0] + 1 :] for seq in generated_outputs ] max_len = max(len(seq) for seq in unpadded_output_sequences) # convert to torch and pad sequences padded_output_sequences = torch.full((len(unpadded_output_sequences), max_len), self.pad_token_id) for i, seq in enumerate(unpadded_output_sequences): padded_output_sequences[i, : len(seq)] = torch.tensor(seq) return self.batch_decode(padded_output_sequences, skip_special_tokens=True) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) __all__ = ["FuyuProcessor"]
transformers/src/transformers/models/fuyu/processing_fuyu.py/0
{ "file_path": "transformers/src/transformers/models/fuyu/processing_fuyu.py", "repo_id": "transformers", "token_count": 14228 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert GIT checkpoints from the original repository. URL: https://github.com/microsoft/GenerativeImage2Text/tree/main""" import argparse from pathlib import Path import av import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( AutoTokenizer, CLIPImageProcessor, GitConfig, GitForCausalLM, GitProcessor, GitVisionConfig, VideoMAEImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_git_config(model_name): if "base" in model_name and "vqa" in model_name: image_size = 480 elif "large" in model_name and "vqa" in model_name: image_size = 420 else: image_size = 224 vision_config = GitVisionConfig(image_size=image_size) if "large" in model_name: vision_config.patch_size = 14 vision_config.hidden_size = 1024 vision_config.intermediate_size = 4096 vision_config.num_hidden_layers = 24 vision_config.num_attention_heads = 16 is_video = "vatex" in model_name or "msrvtt" in model_name num_image_with_embedding = 6 if is_video else None config = GitConfig(vision_config=vision_config.to_dict(), num_image_with_embedding=num_image_with_embedding) return config, image_size, is_video # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config, prefix=""): rename_keys = [] # image encoder # ftm: off rename_keys.append( (f"{prefix}image_encoder.class_embedding", "git.image_encoder.vision_model.embeddings.class_embedding") ) rename_keys.append( ( f"{prefix}image_encoder.positional_embedding", "git.image_encoder.vision_model.embeddings.position_embedding.weight", ) ) rename_keys.append( (f"{prefix}image_encoder.conv1.weight", "git.image_encoder.vision_model.embeddings.patch_embedding.weight") ) rename_keys.append((f"{prefix}image_encoder.ln_pre.weight", "git.image_encoder.vision_model.pre_layrnorm.weight")) rename_keys.append((f"{prefix}image_encoder.ln_pre.bias", "git.image_encoder.vision_model.pre_layrnorm.bias")) rename_keys.append( (f"{prefix}image_encoder.ln_post.weight", "git.image_encoder.vision_model.post_layernorm.weight") ) rename_keys.append((f"{prefix}image_encoder.ln_post.bias", "git.image_encoder.vision_model.post_layernorm.bias")) # fmt: on rename_keys.append((f"{prefix}image_encoder.proj", "git.image_encoder.visual_projection.weight")) # fmt: off for i in range(config.vision_config.num_hidden_layers): # image encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.attn.out_proj.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.out_proj.weight")) rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.attn.out_proj.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.out_proj.bias")) rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_1.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm1.weight")) rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_1.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm1.bias")) rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_fc.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc1.weight")) rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_fc.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc1.bias")) rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_proj.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc2.weight")) rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_proj.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc2.bias")) rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_2.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm2.weight")) rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_2.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm2.bias")) # fmt: on # text decoder # fmt: off rename_keys.append((f"{prefix}textual.embedding.words.weight", "git.embeddings.word_embeddings.weight")) rename_keys.append((f"{prefix}textual.embedding.positions.weight", "git.embeddings.position_embeddings.weight")) rename_keys.append((f"{prefix}textual.visual_projection.0.weight", "git.visual_projection.visual_projection.0.weight")) rename_keys.append((f"{prefix}textual.visual_projection.0.bias", "git.visual_projection.visual_projection.0.bias")) rename_keys.append((f"{prefix}textual.visual_projection.1.weight", "git.visual_projection.visual_projection.1.weight")) rename_keys.append((f"{prefix}textual.visual_projection.1.bias", "git.visual_projection.visual_projection.1.bias")) rename_keys.append((f"{prefix}textual.embedding.layer_norm.weight", "git.embeddings.LayerNorm.weight")) rename_keys.append((f"{prefix}textual.embedding.layer_norm.bias", "git.embeddings.LayerNorm.bias")) rename_keys.append((f"{prefix}textual.output.weight", "output.weight")) rename_keys.append((f"{prefix}textual.output.bias", "output.bias")) for i in range(config.num_hidden_layers): rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.query.weight", f"git.encoder.layer.{i}.attention.self.query.weight")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.query.bias", f"git.encoder.layer.{i}.attention.self.query.bias")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.key.weight", f"git.encoder.layer.{i}.attention.self.key.weight")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.key.bias", f"git.encoder.layer.{i}.attention.self.key.bias")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.value.weight", f"git.encoder.layer.{i}.attention.self.value.weight")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.value.bias", f"git.encoder.layer.{i}.attention.self.value.bias")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.dense.weight", f"git.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.dense.bias", f"git.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.LayerNorm.weight", f"git.encoder.layer.{i}.attention.output.LayerNorm.weight")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.LayerNorm.bias", f"git.encoder.layer.{i}.attention.output.LayerNorm.bias")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.intermediate.dense.weight", f"git.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.intermediate.dense.bias", f"git.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.dense.weight", f"git.encoder.layer.{i}.output.dense.weight")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.dense.bias", f"git.encoder.layer.{i}.output.dense.bias")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.LayerNorm.weight", f"git.encoder.layer.{i}.output.LayerNorm.weight")) rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.LayerNorm.bias", f"git.encoder.layer.{i}.output.LayerNorm.bias")) # fmt: on if config.num_image_with_embedding is not None: rename_keys.append(("img_temperal_embedding.0", "git.img_temperal_embedding.0")) rename_keys.append(("img_temperal_embedding.1", "git.img_temperal_embedding.1")) rename_keys.append(("img_temperal_embedding.2", "git.img_temperal_embedding.2")) rename_keys.append(("img_temperal_embedding.3", "git.img_temperal_embedding.3")) rename_keys.append(("img_temperal_embedding.4", "git.img_temperal_embedding.4")) rename_keys.append(("img_temperal_embedding.5", "git.img_temperal_embedding.5")) return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val.T if "image_encoder.visual_projection" in new else val # we split up the matrix of each CLIP encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config, prefix=""): dim = config.vision_config.hidden_size for i in range(config.vision_config.num_hidden_layers): # read in weights + bias of input projection layer (in the original implementation, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"{prefix}image_encoder.transformer.resblocks.{i}.attn.in_proj_weight") in_proj_bias = state_dict.pop(f"{prefix}image_encoder.transformer.resblocks.{i}.attn.in_proj_bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[ :dim, : ] state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:dim] state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[ dim : dim * 2, : ] state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[ dim : dim * 2 ] state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[ -dim:, : ] state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-dim:] # We will verify our results on an image def prepare_img(model_name): if "textvqa" in model_name: filepath = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset") image = Image.open(filepath).convert("RGB") else: url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) return image def prepare_video(): def read_video_pyav(container, indices): """ Decode the video with PyAV decoder. Args: container (`av.container.input.InputContainer`): PyAV container. indices (`List[int]`): List of frame indices to decode. Returns: result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). """ frames = [] container.seek(0) start_index = indices[0] end_index = indices[-1] for i, frame in enumerate(container.decode(video=0)): if i > end_index: break if i >= start_index and i in indices: frames.append(frame) return np.stack([x.to_ndarray(format="rgb24") for x in frames]) def sample_frame_indices(clip_len, frame_sample_rate, seg_len): """ Sample a given number of frame indices from the video. Args: clip_len (`int`): Total number of frames to sample. frame_sample_rate (`int`): Sample every n-th frame. seg_len (`int`): Maximum allowed index of sample's last frame. Returns: indices (`List[int]`): List of sampled frame indices """ converted_len = int(clip_len * frame_sample_rate) end_idx = np.random.randint(converted_len, seg_len) start_idx = end_idx - converted_len indices = np.linspace(start_idx, end_idx, num=clip_len) indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) return indices # set seed for reproducibility np.random.seed(0) file_path = hf_hub_download(repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset") with av.open(file_path) as container: # sample 6 frames num_frames = 6 indices = sample_frame_indices( clip_len=num_frames, frame_sample_rate=4, seg_len=container.streams.video[0].frames ) frames = read_video_pyav(container, indices) return frames @torch.no_grad() def convert_git_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our GIT structure. """ model_name_to_url = { "git-base": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE/snapshot/model.pt", "git-base-coco": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_COCO/snapshot/model.pt", "git-base-textcaps": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_TEXTCAPS/snapshot/model.pt", "git-base-vqav2": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_VQAv2/snapshot/model.pt", "git-base-textvqa": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_TEXTVQA/snapshot/model.pt", # todo "git-base-vatex": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_VATEX/snapshot/model.pt", "git-base-msrvtt-qa": ( "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_MSRVTT_QA/snapshot/model.pt" ), "git-large": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE/snapshot/model.pt", "git-large-coco": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_COCO/snapshot/model.pt", "git-large-textcaps": ( "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_TEXTCAPS/snapshot/model.pt" ), "git-large-vqav2": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_VQAv2/snapshot/model.pt", "git-large-textvqa": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_TEXTVQA/snapshot/model.pt", "git-large-vatex": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_VATEX/snapshot/model.pt", "git-large-msrvtt-qa": ( "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_MSRVTT_QA/snapshot/model.pt" ), "git-large-r": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R/snapshot/model.pt", "git-large-r-coco": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R_COCO/snapshot/model.pt", "git-large-r-textcaps": ( "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R_TEXTCAPS/snapshot/model.pt" ), } model_name_to_path = { "git-large": "/Users/nielsrogge/Documents/GIT/git_large_model.pt", "git-large-coco": "/Users/nielsrogge/Documents/GIT/git_large_coco_model.pt", "git-large-textcaps": "/Users/nielsrogge/Documents/GIT/git_large_textcaps_model.pt", "git-large-vqav2": "/Users/nielsrogge/Documents/GIT/git_large_vqav2_model.pt", "git-large-textvqa": "/Users/nielsrogge/Documents/GIT/git_large_textvqa_model.pt", } # define GIT configuration based on model name config, image_size, is_video = get_git_config(model_name) if "large" in model_name and not is_video and "large-r" not in model_name: # large checkpoints take way too long to download checkpoint_path = model_name_to_path[model_name] state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] else: checkpoint_url = model_name_to_url[model_name] state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", file_name=model_name)[ "model" ] # rename keys prefix = "module." if model_name == "git-base" else "" rename_keys = create_rename_keys(config, prefix=prefix) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, prefix=prefix) # load HuggingFace model model = GitForCausalLM(config) missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) model.eval() print("Missing keys:", missing_keys) print("Unexpected keys:", unexpected_keys) assert missing_keys == ["git.embeddings.position_ids", "git.image_encoder.vision_model.embeddings.position_ids"] assert unexpected_keys == ["git.image_encoder.visual_projection.weight"] # verify results image_processor = ( VideoMAEImageProcessor( size={"shortest_edge": image_size}, crop_size={"height": image_size, "width": image_size} ) if is_video else CLIPImageProcessor( size={"shortest_edge": image_size}, crop_size={"height": image_size, "width": image_size} ) ) tokenizer = AutoTokenizer.from_pretrained( "google-bert/bert-base-uncased", model_input_names=["input_ids", "attention_mask"] ) processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor) if is_video: video = prepare_video() pixel_values = processor(images=list(video), return_tensors="pt").pixel_values else: image = prepare_img(model_name) image_transforms = Compose( [ Resize(image_size, interpolation=Image.BICUBIC), CenterCrop(image_size), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), ] ) original_pixel_values = image_transforms(image).unsqueeze(0) pixel_values = processor(images=image, return_tensors="pt").pixel_values assert torch.allclose(pixel_values, original_pixel_values) input_ids = torch.tensor([[101]]) outputs = model(input_ids, pixel_values=pixel_values) logits = outputs.logits print("Logits:", logits[0, -1, :3]) if model_name == "git-base": expected_slice_logits = torch.tensor([-1.2832, -1.2835, -1.2840]) elif model_name == "git-base-coco": expected_slice_logits = torch.tensor([-0.9925, -0.9930, -0.9935]) elif model_name == "git-base-textcaps": expected_slice_logits = torch.tensor([-1.2980, -1.2983, -1.2985]) elif model_name == "git-base-vqav2": expected_slice_logits = torch.tensor([-0.8570, -0.8568, -0.8561]) elif model_name == "git-base-textvqa": expected_slice_logits = torch.tensor([-1.4085, -1.4083, -1.4082]) elif model_name == "git-base-vatex": expected_slice_logits = torch.tensor([-1.3451, -1.3447, -1.3447]) elif model_name == "git-base-msrvtt-qa": expected_slice_logits = torch.tensor([-0.8554, -0.8550, -0.8540]) elif model_name == "git-large": expected_slice_logits = torch.tensor([-1.1708, -1.1707, -1.1705]) elif model_name == "git-large-coco": expected_slice_logits = torch.tensor([-1.0425, -1.0423, -1.0422]) elif model_name == "git-large-textcaps": expected_slice_logits = torch.tensor([-1.2705, -1.2708, -1.2706]) elif model_name == "git-large-vqav2": expected_slice_logits = torch.tensor([-0.7042, -0.7043, -0.7043]) elif model_name == "git-large-textvqa": expected_slice_logits = torch.tensor([-0.8590, -0.8592, -0.8590]) elif model_name == "git-large-vatex": expected_slice_logits = torch.tensor([-1.0113, -1.0114, -1.0113]) elif model_name == "git-large-msrvtt-qa": expected_slice_logits = torch.tensor([0.0130, 0.0134, 0.0131]) elif model_name == "git-large-r": expected_slice_logits = torch.tensor([-1.1283, -1.1285, -1.1286]) elif model_name == "git-large-r-coco": expected_slice_logits = torch.tensor([-0.9641, -0.9641, -0.9641]) elif model_name == "git-large-r-textcaps": expected_slice_logits = torch.tensor([-1.1121, -1.1120, -1.1124]) assert torch.allclose(logits[0, -1, :3], expected_slice_logits, atol=1e-4) print("Looks ok!") prompt = "" if "textvqa" in model_name: prompt = "what does the front of the bus say at the top?" elif "msrvtt-qa" in model_name: prompt = "what does the woman eat?" elif "vqa" in model_name: prompt = "what are the cats doing?" input_ids = tokenizer(prompt, add_special_tokens=False).input_ids input_ids = [processor.tokenizer.cls_token_id] + input_ids input_ids = torch.tensor(input_ids).unsqueeze(0) print("Generating caption...") generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50) print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True)) if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and processor of {model_name} to the hub...") model.push_to_hub(f"microsoft/{model_name}") processor.push_to_hub(f"microsoft/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="git-base", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) args = parser.parse_args() convert_git_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/git/convert_git_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/git/convert_git_to_pytorch.py", "repo_id": "transformers", "token_count": 9854 }
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import glob import os from typing import List, Optional import regex as re import torch from huggingface_hub import snapshot_download from safetensors import safe_open from transformers import ( GotOcr2Config, GotOcr2ForConditionalGeneration, GotOcr2ImageProcessor, GotOcr2Processor, PreTrainedTokenizerFast, is_vision_available, ) from transformers.convert_slow_tokenizer import TikTokenConverter from transformers.tokenization_utils import AddedToken if is_vision_available(): from transformers.image_utils import load_image # fmt: off ORIGINAL_TO_CONVERTED_KEY_MAPPING = { # Vision encoder mapping r"model.vision_tower_high.pos_embed": r"vision_tower.pos_embed", r"model.vision_tower_high.patch_embed.proj": r"vision_tower.patch_embed.projection", r"model.vision_tower_high.blocks.(\d+).norm": r"vision_tower.layers.\1.layer_norm", r"model.vision_tower_high.blocks.(\d+).attn": r"vision_tower.layers.\1.attn", r"model.vision_tower_high.blocks.(\d+).mlp": r"vision_tower.layers.\1.mlp", r"model.vision_tower_high.neck.0": r"vision_tower.neck.conv1", r"model.vision_tower_high.neck.1": r"vision_tower.neck.layer_norm1", r"model.vision_tower_high.neck.2": r"vision_tower.neck.conv2", r"model.vision_tower_high.neck.3": r"vision_tower.neck.layer_norm2", r"model.vision_tower_high.net_(\d+)": lambda m: f"multi_modal_projector.conv_upsampler{int(m.group(1)) - 1}", r"model.mm_projector_vary" : r"multi_modal_projector.multimodal_projector", r"model.": r"language_model.model.", r"lm_head": r"language_model.lm_head", } # fmt: on CONTEXT_LENGTH = 8000 def convert_old_keys_to_new_keys(state_dict_keys: dict = None): """ This function should be applied only once, on the concatenated keys to efficiently rename using the key mappings. """ output_dict = {} if state_dict_keys is not None: old_text = "\n".join(state_dict_keys) new_text = old_text for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items(): new_text = re.sub(pattern, replacement, new_text) output_dict = dict(zip(old_text.split("\n"), new_text.split("\n"))) return output_dict def load_original_state_dict(model_id): directory_path = snapshot_download(repo_id=model_id, allow_patterns=["*.safetensors"]) original_state_dict = {} for path in glob.glob(f"{directory_path}/*"): if path.endswith(".safetensors"): with safe_open(path, framework="pt", device="cpu") as f: for key in f.keys(): original_state_dict[key] = f.get_tensor(key) return original_state_dict def get_got_ocr2_config(): config = GotOcr2Config() return config def write_model( model_path, input_base_path, push_to_hub=False, ): os.makedirs(model_path, exist_ok=True) config = get_got_ocr2_config() config.architectures = ["GotOcr2ForConditionalGeneration"] config.save_pretrained(model_path) print("Model config saved successfully...") # ------------------------------------------------------------ # Convert weights # ------------------------------------------------------------ print(f"Fetching all parameters from the checkpoint at {input_base_path}...") state_dict_old = load_original_state_dict(input_base_path) print("Converting model...") all_keys = list(state_dict_old.keys()) new_keys = convert_old_keys_to_new_keys(all_keys) state_dict = {} for key in all_keys: new_key = new_keys[key] state_dict[new_key] = state_dict_old[key] del state_dict_old gc.collect() print("Loading the checkpoint in a GotOcr2ForConditionalGeneration model.") model = GotOcr2ForConditionalGeneration(config) missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) model = model.to(torch.bfloat16) print("model dtype:", model.dtype) print("Missing keys:", missing_keys) print("Unexpected keys:", unexpected_keys) print("Saving the model.") model.save_pretrained(model_path) if push_to_hub: model.push_to_hub("stepfun-ai/GOT-OCR-2.0-hf", use_temp_dir=True) del state_dict, model # Safety check: reload the converted model gc.collect() print("Reloading the model to check if it's saved correctly.") model = GotOcr2ForConditionalGeneration.from_pretrained(model_path, device_map="auto") processor = GotOcr2Processor.from_pretrained(model_path) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/image_ocr.jpg" ) inputs = processor(image, return_tensors="pt", format=True).to(model.device, dtype=model.dtype) generate_ids = model.generate(**inputs, do_sample=False, num_beams=1, max_new_tokens=4) decoded_output = processor.decode(generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True) expected_output = "\\title{\nR" print("Decoded output:", decoded_output) assert decoded_output == expected_output print("Model reloaded successfully.") del model class GotOcr2Converter(TikTokenConverter): def __init__( self, vocab_file, special_tokens: List[str], pattern: str, model_max_length: int, chat_template: Optional[str] = None, **kwargs, ): super().__init__(vocab_file, pattern=pattern) self.additional_special_tokens = special_tokens tokenizer = self.converted() if chat_template is not None: kwargs["chat_template"] = chat_template self.tokenizer = PreTrainedTokenizerFast( tokenizer_object=tokenizer, model_input_names=["input_ids", "attention_mask"], model_max_length=model_max_length, **kwargs, ) def write_tokenizer(tokenizer_path: str, save_dir: str, push_to_hub: bool = False): model_max_length = CONTEXT_LENGTH pattern = r"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+" # noqa: W605 # Special tokens special_tokens = ( ["<|endoftext|>", "<|im_start|>", "<|im_end|>"] + [f"<|extra_{i}|>" for i in range(205)] + [ "<ref>", "</ref>", "<box>", "</box>", "<quad>", "</quad>", "<img>", "</img>", "<imgpad>", ] ) pad_token = "<|endoftext|>" pad_token = AddedToken(pad_token, lstrip=False, rstrip=False, normalized=False, single_word=False) converter = GotOcr2Converter( vocab_file=tokenizer_path, pattern=pattern, special_tokens=special_tokens, model_max_length=model_max_length, pad_token=pad_token, bos_token="<|endoftext|>", eos_token="<|endoftext|>", clean_up_tokenization_spaces=True, ) tokenizer = converter.tokenizer tokenizer.save_pretrained(save_dir) if push_to_hub: tokenizer.push_to_hub("stepfun-ai/GOT-OCR-2.0-hf", use_temp_dir=True) def write_image_processor(save_dir: str, push_to_hub: bool = False): image_processor = GotOcr2ImageProcessor( do_resize=True, size={"height": 1024, "width": 1024}, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], ) image_processor.save_pretrained(save_dir) if push_to_hub: image_processor.push_to_hub("stepfun-ai/GOT-OCR-2.0-hf", use_temp_dir=True) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_dir", default="stepfun-ai/GOT-OCR2_0", help="Location of LLaMA weights, which contains tokenizer.model and model folders", ) parser.add_argument( "--output_dir", default="GotOcr2", help="Location to write HF model and tokenizer", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() write_tokenizer( tokenizer_path="qwen.tiktoken", save_dir=args.output_dir, push_to_hub=args.push_to_hub, ) write_image_processor( save_dir=args.output_dir, push_to_hub=args.push_to_hub, ) write_model( model_path=args.output_dir, input_base_path=args.input_dir, push_to_hub=args.push_to_hub, ) if __name__ == "__main__": main()
transformers/src/transformers/models/got_ocr2/convert_got_ocr2_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/got_ocr2/convert_got_ocr2_weights_to_hf.py", "repo_id": "transformers", "token_count": 4291 }
# coding=utf-8 # Copyright 2023 The BigCode team and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """GPTBigCode configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class GPTBigCodeConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`GPTBigCodeModel`]. It is used to instantiate a GPTBigCode model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPTBigCode [gpt_bigcode](https://huggingface.co/gpt_bigcode) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50257): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTBigCodeModel`]. n_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 768): Dimensionality of the embeddings and hidden states. n_layer (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. n_inner (`int`, *optional*, defaults to None): Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd activation_function (`str`, *optional*, defaults to `"gelu_pytorch_tanh"`): Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new", "gelu_pytorch_tanh"]`. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_attn_weights (`bool`, *optional*, defaults to `True`): Scale attention weights by dividing by sqrt(hidden_size).. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`): Whether to call the fused softmax in float32. scale_attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`): Whether to scale the attention softmax in float32. attention_type (`bool`, *optional*, defaults to `True`): Whether to use Multi-Query Attion (`True`) or Multi-Head Attention (`False`). Example: ```python >>> from transformers import GPTBigCodeConfig, GPTBigCodeModel >>> # Initializing a GPTBigCode configuration >>> configuration = GPTBigCodeConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = GPTBigCodeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gpt_bigcode" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, vocab_size=50257, n_positions=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function="gelu_pytorch_tanh", resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, attention_softmax_in_fp32=True, scale_attention_softmax_in_fp32=True, multi_query=True, **kwargs, ): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.attention_softmax_in_fp32 = attention_softmax_in_fp32 self.scale_attention_softmax_in_fp32 = scale_attention_softmax_in_fp32 self.multi_query = multi_query self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) __all__ = ["GPTBigCodeConfig"]
transformers/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py/0
{ "file_path": "transformers/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py", "repo_id": "transformers", "token_count": 2468 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Grounding DINO model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class GroundingDinoConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GroundingDinoModel`]. It is used to instantiate a Grounding DINO model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Grounding DINO [IDEA-Research/grounding-dino-tiny](https://huggingface.co/IDEA-Research/grounding-dino-tiny) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, defaults to `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `BertConfig`): The config object or dictionary of the text backbone. num_queries (`int`, *optional*, defaults to 900): Number of object queries, i.e. detection slots. This is the maximal number of objects [`GroundingDinoModel`] can detect in a single image. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as an encoder/decoder or not. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. num_feature_levels (`int`, *optional*, defaults to 4): The number of input feature levels. encoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the encoder. decoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the decoder. two_stage (`bool`, *optional*, defaults to `True`): Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of Grounding DINO, which are further fed into the decoder for iterative bounding box refinement. class_cost (`float`, *optional*, defaults to 1.0): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5.0): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2.0): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. bbox_loss_coefficient (`float`, *optional*, defaults to 5.0): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2.0): Relative weight of the generalized IoU loss in the object detection loss. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. disable_custom_kernels (`bool`, *optional*, defaults to `False`): Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom kernels are not supported by PyTorch ONNX export. max_text_len (`int`, *optional*, defaults to 256): The maximum length of the text input. text_enhancer_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the text enhancer. fusion_droppath (`float`, *optional*, defaults to 0.1): The droppath ratio for the fusion module. fusion_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the fusion module. embedding_init_target (`bool`, *optional*, defaults to `True`): Whether to initialize the target with Embedding weights. query_dim (`int`, *optional*, defaults to 4): The dimension of the query vector. decoder_bbox_embed_share (`bool`, *optional*, defaults to `True`): Whether to share the bbox regression head for all decoder layers. two_stage_bbox_embed_share (`bool`, *optional*, defaults to `False`): Whether to share the bbox embedding between the two-stage bbox generator and the region proposal generation. positional_embedding_temperature (`float`, *optional*, defaults to 20): The temperature for Sine Positional Embedding that is used together with vision backbone. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. Examples: ```python >>> from transformers import GroundingDinoConfig, GroundingDinoModel >>> # Initializing a Grounding DINO IDEA-Research/grounding-dino-tiny style configuration >>> configuration = GroundingDinoConfig() >>> # Initializing a model (with random weights) from the IDEA-Research/grounding-dino-tiny style configuration >>> model = GroundingDinoModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "grounding-dino" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, text_config=None, num_queries=900, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, auxiliary_loss=False, position_embedding_type="sine", num_feature_levels=4, encoder_n_points=4, decoder_n_points=4, two_stage=True, class_cost=1.0, bbox_cost=5.0, giou_cost=2.0, bbox_loss_coefficient=5.0, giou_loss_coefficient=2.0, focal_alpha=0.25, disable_custom_kernels=False, # other parameters max_text_len=256, text_enhancer_dropout=0.0, fusion_droppath=0.1, fusion_dropout=0.0, embedding_init_target=True, query_dim=4, decoder_bbox_embed_share=True, two_stage_bbox_embed_share=False, positional_embedding_temperature=20, init_std=0.02, layer_norm_eps=1e-5, **kwargs, ): if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.") backbone_config = CONFIG_MAPPING["swin"]( window_size=7, image_size=224, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], out_indices=[2, 3, 4], ) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.pop("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) if text_config is None: text_config = {} logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).") self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type # deformable attributes self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points self.two_stage = two_stage # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.focal_alpha = focal_alpha self.disable_custom_kernels = disable_custom_kernels # Text backbone if isinstance(text_config, dict): text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "bert" text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["bert"]() self.text_config = text_config self.max_text_len = max_text_len # Text Enhancer self.text_enhancer_dropout = text_enhancer_dropout # Fusion self.fusion_droppath = fusion_droppath self.fusion_dropout = fusion_dropout # Others self.embedding_init_target = embedding_init_target self.query_dim = query_dim self.decoder_bbox_embed_share = decoder_bbox_embed_share self.two_stage_bbox_embed_share = two_stage_bbox_embed_share if two_stage_bbox_embed_share and not decoder_bbox_embed_share: raise ValueError("If two_stage_bbox_embed_share is True, decoder_bbox_embed_share must be True.") self.positional_embedding_temperature = positional_embedding_temperature self.init_std = init_std self.layer_norm_eps = layer_norm_eps super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model __all__ = ["GroundingDinoConfig"]
transformers/src/transformers/models/grounding_dino/configuration_grounding_dino.py/0
{ "file_path": "transformers/src/transformers/models/grounding_dino/configuration_grounding_dino.py", "repo_id": "transformers", "token_count": 5839 }
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} class HerbertTokenizerFast(PreTrainedTokenizerFast): """ Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library). Peculiarities: - uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a punctuation character will be treated separately. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = HerbertTokenizer def __init__( self, vocab_file=None, merges_file=None, tokenizer_file=None, cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", sep_token="</s>", **kwargs, ): super().__init__( vocab_file, merges_file, tokenizer_file=tokenizer_file, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, sep_token=sep_token, **kwargs, ) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An HerBERT, like BERT sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ cls = [self.cls_token_id] sep = [self.sep_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. HerBERT, like BERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) __all__ = ["HerbertTokenizerFast"]
transformers/src/transformers/models/herbert/tokenization_herbert_fast.py/0
{ "file_path": "transformers/src/transformers/models/herbert/tokenization_herbert_fast.py", "repo_id": "transformers", "token_count": 2528 }
# coding=utf-8 # Copyright 2021 The OpenAI Team Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OpenAI ImageGPT model.""" import math import os import warnings from typing import Any, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.cuda.amp import autocast from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_float, ) from .configuration_imagegpt import ImageGPTConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "openai/imagegpt-small" _CONFIG_FOR_DOC = "ImageGPTConfig" def load_tf_weights_in_imagegpt(model, config, imagegpt_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(imagegpt_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) for name, array in zip(names, arrays): name = name[6:] # skip "model/" name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ) or name[-1] in ["_step"]: logger.info("Skipping {}".format("/".join(name))) continue pointer = model if name[-1] not in ["wtet"]: pointer = getattr(pointer, "transformer") for m_name in name: if re.fullmatch(r"[A-Za-z]+\d+", m_name): scope_names = re.split(r"(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "w" or scope_names[0] == "g": pointer = getattr(pointer, "weight") elif scope_names[0] == "b": pointer = getattr(pointer, "bias") elif scope_names[0] == "wpe" or scope_names[0] == "wte": pointer = getattr(pointer, scope_names[0]) pointer = getattr(pointer, "weight") elif scope_names[0] in ["q_proj", "k_proj", "v_proj"]: pointer = getattr(pointer, "c_attn") pointer = getattr(pointer, "weight") elif len(name) == 3 and name[1] == "attn" and scope_names[0] == "c_proj": pointer = getattr(pointer, scope_names[0]) pointer = getattr(pointer, "weight") elif scope_names[0] == "wtet": pointer = getattr(pointer, "lm_head") pointer = getattr(pointer, "weight") elif scope_names[0] == "sos": pointer = getattr(pointer, "wte") pointer = getattr(pointer, "weight") else: pointer = getattr(pointer, scope_names[0]) if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if len(name) > 1 and name[1] == "attn" or name[-1] == "wtet" or name[-1] == "sos" or name[-1] == "wte": pass # array is used to initialize only part of the pointer so sizes won't match else: try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) if name[-1] == "q_proj": pointer.data[:, : config.n_embd] = torch.from_numpy(array.reshape(config.n_embd, config.n_embd)).T elif name[-1] == "k_proj": pointer.data[:, config.n_embd : 2 * config.n_embd] = torch.from_numpy( array.reshape(config.n_embd, config.n_embd) ).T elif name[-1] == "v_proj": pointer.data[:, 2 * config.n_embd :] = torch.from_numpy(array.reshape(config.n_embd, config.n_embd)).T elif len(name) == 3 and name[1] == "attn" and name[2] == "c_proj": pointer.data = torch.from_numpy(array.reshape(config.n_embd, config.n_embd)) elif name[-1] == "wtet": pointer.data = torch.from_numpy(array) elif name[-1] == "wte": pointer.data[: config.vocab_size - 1, :] = torch.from_numpy(array) elif name[-1] == "sos": pointer.data[-1] = torch.from_numpy(array) else: pointer.data = torch.from_numpy(array) return model class ImageGPTLayerNorm(nn.Module): def __init__(self, hidden_size: Tuple[int], eps: float = 1e-5): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.Tensor(hidden_size)) def forward(self, tensor: torch.Tensor) -> tuple: # input is not mean centered return ( tensor / torch.sqrt(torch.mean(torch.square(tensor), axis=-1, keepdim=True) + self.eps) * self.weight.data[..., :] ) class ImageGPTAttention(nn.Module): def __init__(self, config, is_cross_attention: Optional[bool] = False, layer_idx: Optional[int] = None): super().__init__() max_positions = config.max_position_embeddings self.register_buffer( "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( 1, 1, max_positions, max_positions ), persistent=False, ) self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False) self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.split_size = self.embed_dim if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale_attn_weights = config.scale_attn_weights self.is_cross_attention = is_cross_attention # Layer-wise attention scaling, reordering, and upcasting self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx self.layer_idx = layer_idx self.reorder_and_upcast_attn = config.reorder_and_upcast_attn if self.is_cross_attention: self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) self.q_attn = Conv1D(self.embed_dim, self.embed_dim) else: self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) self.c_proj = Conv1D(self.embed_dim, self.embed_dim) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads) index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)]) # Prune conv1d layers self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) # Update hyper params self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads)) self.num_heads = self.num_heads - len(heads) self.pruned_heads = self.pruned_heads.union(heads) def _attn(self, query, key, value, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch_float(value.size(-1) ** 0.5) # Layer-wise attention scaling if self.scale_attn_by_inverse_layer_idx: attn_weights = attn_weights / float(self.layer_idx + 1) if not self.is_cross_attention: # if only "normal" attention layer implements causal mask query_length, key_length = query.size(-2), key.size(-2) causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights, mask_value) if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = nn.Softmax(dim=-1)(attn_weights) # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None): # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM) bsz, num_heads, q_seq_len, dk = query.size() _, _, k_seq_len, _ = key.size() # Preallocate attn_weights for `baddbmm` attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device) # Compute Scale Factor scale_factor = 1.0 if self.scale_attn_weights: scale_factor /= float(value.size(-1)) ** 0.5 if self.scale_attn_by_inverse_layer_idx: scale_factor /= float(self.layer_idx + 1) # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk)) with autocast(enabled=False): q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) if not self.is_cross_attention: # if only "normal" attention layer implements causal mask query_length, key_length = query.size(-2), key.size(-2) causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights, mask_value) if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = nn.Softmax(dim=-1)(attn_weights) # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise if attn_weights.dtype != torch.float32: raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32") attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def _split_heads(self, tensor, num_heads, attn_head_size): """ Splits hidden_size dim into attn_head_size and num_heads """ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(*new_shape) return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) def _merge_heads(self, tensor, num_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden_size """ tensor = tensor.permute(0, 2, 1, 3).contiguous() new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) return tensor.view(new_shape) def forward( self, hidden_states: torch.Tensor, layer_past: Optional[bool] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ) -> tuple: if encoder_hidden_states is not None: if not hasattr(self, "q_attn"): raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `ImageGPTAttention(..., is_cross_attention=True)`." ) query = self.q_attn(hidden_states) key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) attention_mask = encoder_attention_mask else: query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if layer_past is not None: past_key, past_value = layer_past key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None if self.reorder_and_upcast_attn: attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask) else: attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs # a, present, (attentions) class ImageGPTMLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = Conv1D(intermediate_size, embed_dim) self.c_proj = Conv1D(embed_dim, intermediate_size) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class ImageGPTBlock(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() hidden_size = config.hidden_size inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = ImageGPTAttention(config, layer_idx=layer_idx) self.ln_2 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon) if config.add_cross_attention: self.crossattention = ImageGPTAttention(config, is_cross_attention=True, layer_idx=layer_idx) self.ln_cross_attn = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = ImageGPTMLP(inner_dim, config) def forward( self, hidden_states: torch.Tensor, layer_past: Optional[bool] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ) -> tuple: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn( hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] # output_attn: a, present, (attentions) outputs = attn_outputs[1:] # residual connection hidden_states = attn_output + residual if encoder_hidden_states is not None: # add one self-attention block for cross-attention if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " "cross-attention layers by setting `config.add_cross_attention=True`" ) residual = hidden_states hidden_states = self.ln_cross_attn(hidden_states) cross_attn_outputs = self.crossattention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) attn_output = cross_attn_outputs[0] # residual connection hidden_states = residual + attn_output outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) # residual connection hidden_states = residual + feed_forward_hidden_states outputs = (hidden_states,) + (outputs if use_cache else outputs[1:]) return outputs # hidden_states, present, (attentions, cross_attentions) class ImageGPTPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ImageGPTConfig load_tf_weights = load_tf_weights_in_imagegpt base_model_prefix = "transformer" main_input_name = "input_ids" supports_gradient_checkpointing = True _no_split_modules = ["ImageGPTBlock"] def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, Conv1D)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, ImageGPTLayerNorm): module.weight.data.fill_(1.0) # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. # > -- GPT-2 :: https://openai.com/blog/better-language-models/ # # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py for name, p in module.named_parameters(): if "c_proj" in name and "weight" in name: # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer))) IMAGEGPT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ImageGPTConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ IMAGEGPT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details. past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`): Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have their past given to this model should not be passed as `input_ids` as they have already been computed. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see `past_key_values`). use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare ImageGPT Model transformer outputting raw hidden-states without any specific head on top.", IMAGEGPT_START_DOCSTRING, ) class ImageGPTModel(ImageGPTPreTrainedModel): def __init__(self, config: ImageGPTConfig): super().__init__(config) self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([ImageGPTBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = ImageGPTLayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) @add_start_docstrings_to_model_forward(IMAGEGPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs: Any, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoImageProcessor, ImageGPTModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small") >>> model = ImageGPTModel.from_pretrained("openai/imagegpt-small") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" if "pixel_values" in kwargs: warnings.warn( "The `pixel_values` argument is deprecated and will be removed in v4.47, use `input_ids` instead.", FutureWarning, ) if input_ids is not None: raise ValueError( "You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`." ) input_ids = kwargs.pop("pixel_values") output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) # ImageGPTAttention mask. if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") attention_mask = attention_mask.view(batch_size, -1) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask[:, None, None, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.add_cross_attention and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): # Model parallel if self.model_parallel: torch.cuda.set_device(hidden_states.device) # Ensure layer_past is on same device as hidden_states (might not be correct) if layer_past is not None: layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past) # Ensure that attention_mask is always on the same device as hidden_states if attention_mask is not None: attention_mask = attention_mask.to(hidden_states.device) if isinstance(head_mask, torch.Tensor): head_mask = head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, ) else: outputs = block( hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) # Model Parallel: If it's the last layer for that device, put things on the next device if self.model_parallel: for k, v in self.device_map.items(): if i == v[-1] and "cuda:" + str(k) != self.last_device: hidden_states = hidden_states.to("cuda:" + str(k + 1)) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(*output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) @add_start_docstrings( """ The ImageGPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, IMAGEGPT_START_DOCSTRING, ) class ImageGPTForCausalImageModeling(ImageGPTPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: ImageGPTConfig): super().__init__(config) self.transformer = ImageGPTModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size - 1, bias=False) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(IMAGEGPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs: Any, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoImageProcessor, ImageGPTForCausalImageModeling >>> import torch >>> import matplotlib.pyplot as plt >>> import numpy as np >>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small") >>> model = ImageGPTForCausalImageModeling.from_pretrained("openai/imagegpt-small") >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") >>> model.to(device) # doctest: +IGNORE_RESULT >>> # unconditional generation of 8 images >>> batch_size = 4 >>> context = torch.full((batch_size, 1), model.config.vocab_size - 1) # initialize with SOS token >>> context = context.to(device) >>> output = model.generate( ... input_ids=context, max_length=model.config.n_positions + 1, temperature=1.0, do_sample=True, top_k=40 ... ) >>> clusters = image_processor.clusters >>> height = image_processor.size["height"] >>> width = image_processor.size["width"] >>> samples = output[:, 1:].cpu().detach().numpy() >>> samples_img = [ ... np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [height, width, 3]).astype(np.uint8) for s in samples ... ] # convert color cluster tokens back to pixels >>> f, axes = plt.subplots(1, batch_size, dpi=300) >>> for img, ax in zip(samples_img, axes): # doctest: +IGNORE_RESULT ... ax.axis("off") ... ax.imshow(img) ```""" if "pixel_values" in kwargs: warnings.warn( "The `pixel_values` argument is deprecated and will be removed in v4.47, use `input_ids` instead.", FutureWarning, ) if input_ids is not None: raise ValueError( "You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`." ) input_ids = kwargs.pop("pixel_values") return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions, ) @staticmethod def _reorder_cache( past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor ) -> Tuple[Tuple[torch.Tensor]]: """ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. """ return tuple( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) for layer_past in past_key_values ) @add_start_docstrings( """ The ImageGPT Model transformer with an image classification head on top (linear layer). [`ImageGPTForImageClassification`] average-pools the hidden states in order to do the classification. """, IMAGEGPT_START_DOCSTRING, ) class ImageGPTForImageClassification(ImageGPTPreTrainedModel): def __init__(self, config: ImageGPTConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = ImageGPTModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(IMAGEGPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs: Any, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, ImageGPTForImageClassification >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small") >>> model = ImageGPTForImageClassification.from_pretrained("openai/imagegpt-small") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits ```""" if "pixel_values" in kwargs: warnings.warn( "The `pixel_values` argument is deprecated and will be removed in v4.47, use `input_ids` instead.", FutureWarning, ) if input_ids is not None: raise ValueError( "You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`." ) input_ids = kwargs.pop("pixel_values") return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] # average-pool the hidden states along the sequence dimension pooled_hidden_states = hidden_states.mean(dim=1) # project from (batch_size, hidden_size) to (batch_size, num_labels) logits = self.score(pooled_hidden_states) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = [ "ImageGPTForCausalImageModeling", "ImageGPTForImageClassification", "ImageGPTModel", "ImageGPTPreTrainedModel", "load_tf_weights_in_imagegpt", ]
transformers/src/transformers/models/imagegpt/modeling_imagegpt.py/0
{ "file_path": "transformers/src/transformers/models/imagegpt/modeling_imagegpt.py", "repo_id": "transformers", "token_count": 22929 }
# coding=utf-8 # Copyright The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for LayoutLMv3. Same as LayoutLMv2, but RoBERTa-like BPE tokenization instead of WordPiece.""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import ( BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) # Copied from transformers.models.roberta.tokenization_roberta.get_pairs def get_pairs(word): """ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class LayoutLMv3Tokenizer(PreTrainedTokenizer): r""" Construct a LayoutLMv3 tokenizer. Based on [`RoBERTatokenizer`] (Byte Pair Encoding or BPE). [`LayoutLMv3Tokenizer`] can be used to turn words, word-level bounding boxes and optional word labels to token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and optional `labels` (for token classification). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. [`LayoutLMv3Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the word-level bounding boxes into token-level bounding boxes. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `True`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (RoBERTa tokenizer detect beginning of words by the preceding space). cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [CLS] token. sep_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [SEP] token. pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask", "bbox"] def __init__( self, vocab_file, merges_file, errors="replace", bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", add_prefix_space=True, cls_token_box=[0, 0, 0, 0], sep_token_box=[0, 0, 0, 0], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, **kwargs, ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().split("\n")[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword super().__init__( errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs, ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size def vocab_size(self): return len(self.encoder) # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_vocab def get_vocab(self): vocab = dict(self.encoder).copy() vocab.update(self.added_tokens_encoder) return vocab # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.bpe def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._tokenize def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) return bpe_tokens # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_token_to_id def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_id_to_token def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.convert_tokens_to_string def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = "".join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.create_token_type_ids_from_sequences def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) # If the text starts with a token that should not be split, no space is added before the text in any case. # It's necessary to match the fast tokenization if ( (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()) and sum([text.startswith(no_split_token) for no_split_token in self.added_tokens_encoder]) == 0 ): text = " " + text return (text, kwargs) @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.__call__ def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.batch_encode_plus def batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._batch_encode_plus def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) batch_outputs = self._batch_prepare_for_model( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._batch_prepare_for_model def _batch_prepare_for_model( self, batch_text_or_text_pairs, is_pair: bool = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Args: batch_ids_pairs: list of tokenized input ids or input ids pairs """ batch_outputs = {} for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)): batch_text_or_text_pair, boxes_example = example outputs = self.prepare_for_model( batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward padding_side=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.encode def encode( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> List[int]: encoded_inputs = self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) return encoded_inputs["input_ids"] @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.encode_plus def encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, `__call__` should be used instead. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._encode_plus def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) return self.prepare_for_model( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into token-level `labels`. The word label is used for the first token of the word, while remaining tokens are labeled with -100, such that they will be ignored by the loss function. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) tokens = [] pair_tokens = [] token_boxes = [] pair_token_boxes = [] labels = [] if text_pair is None: if word_labels is None: # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference) for word, box in zip(text, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) else: # CASE 2: token classification (training) for word, box, label in zip(text, boxes, word_labels): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) if self.only_label_first_subword: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1)) else: labels.extend([label] * len(word_tokens)) else: # CASE 3: document visual question answering (inference) # text = question # text_pair = words tokens = self.tokenize(text) token_boxes = [self.pad_token_box for _ in range(len(tokens))] for word, box in zip(text_pair, boxes): if len(word) < 1: # skip empty words continue word_tokens = self.tokenize(word) pair_tokens.extend(word_tokens) pair_token_boxes.extend([box] * len(word_tokens)) # Create ids + pair_ids ids = self.convert_tokens_to_ids(tokens) pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None if ( return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is not None ): raise ValueError( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) # Compute the total size of the returned encodings pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) = self.truncate_sequences( ids, token_boxes, pair_ids=pair_ids, pair_token_boxes=pair_token_boxes, labels=labels, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes encoded_inputs["overflowing_labels"] = overflowing_labels encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box] if pair_token_boxes: pair_token_boxes = [self.sep_token_box] + pair_token_boxes + [self.sep_token_box] token_boxes = token_boxes + pair_token_boxes if pair else token_boxes if labels: labels = [self.pad_token_label] + labels + [self.pad_token_label] else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) token_boxes = token_boxes + pair_token_boxes if pair else token_boxes # Build output dictionary encoded_inputs["input_ids"] = sequence encoded_inputs["bbox"] = token_boxes if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) if labels: encoded_inputs["labels"] = labels # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.truncate_sequences def truncate_sequences( self, ids: List[int], token_boxes: List[List[int]], pair_ids: Optional[List[int]] = None, pair_token_boxes: Optional[List[List[int]]] = None, labels: Optional[List[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> Tuple[List[int], List[int], List[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (`List[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. token_boxes (`List[List[int]]`): Bounding boxes of the first sequence. pair_ids (`List[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_token_boxes (`List[List[int]]`, *optional*): Bounding boxes of the second sequence. labels (`List[int]`, *optional*): Labels of the first sequence (for token classification tasks). num_tokens_to_remove (`int`, *optional*, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): The strategy to follow for truncation. Can be: - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (`int`, *optional*, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair of sequences (or a batch of pairs) is provided. """ if num_tokens_to_remove <= 0: return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] overflowing_token_boxes = [] overflowing_labels = [] if truncation_strategy == TruncationStrategy.ONLY_FIRST or ( truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None ): if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] overflowing_token_boxes = token_boxes[-window_len:] overflowing_labels = labels[-window_len:] ids = ids[:-num_tokens_to_remove] token_boxes = token_boxes[:-num_tokens_to_remove] labels = labels[:-num_tokens_to_remove] else: error_msg = ( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the first sequence has a length {len(ids)}. " ) if truncation_strategy == TruncationStrategy.ONLY_FIRST: error_msg = ( error_msg + "Please select another truncation strategy than " f"{truncation_strategy}, for instance 'longest_first' or 'only_second'." ) logger.error(error_msg) elif truncation_strategy == TruncationStrategy.LONGEST_FIRST: logger.warning( "Be aware, overflowing tokens are not returned for the setting you have chosen," f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' " "truncation strategy. So the returned list will always be empty even if some " "tokens have been removed." ) for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): ids = ids[:-1] token_boxes = token_boxes[:-1] labels = labels[:-1] else: pair_ids = pair_ids[:-1] pair_token_boxes = pair_token_boxes[:-1] elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] overflowing_token_boxes = pair_token_boxes[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove] else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_first'." ) return ( ids, token_boxes, pair_ids, pair_token_boxes, labels, overflowing_tokens, overflowing_token_boxes, overflowing_labels, ) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._pad def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side: The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) padding_side = padding_side if padding_side is not None else self.padding_side if padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(padding_side)) return encoded_inputs __all__ = ["LayoutLMv3Tokenizer"]
transformers/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py/0
{ "file_path": "transformers/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py", "repo_id": "transformers", "token_count": 33019 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for LeViT.""" from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( get_resize_output_image_size, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, logging logger = logging.get_logger(__name__) class LevitImageProcessor(BaseImageProcessor): r""" Constructs a LeViT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Wwhether to resize the shortest edge of the input to int(256/224 *`size`). Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]`, *optional*, defaults to `{"shortest_edge": 224}`): Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to `(size["shortest_egde"] * height / width, size["shortest_egde"])`. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether or not to center crop the input to `(crop_size["height"], crop_size["width"])`. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`Dict`, *optional*, defaults to `{"height": 224, "width": 224}`): Desired image size after `center_crop`. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`List[int]`, *optional*, defaults to `[0.485, 0.456, 0.406]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`List[int]`, *optional*, defaults to `[0.229, 0.224, 0.225]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN, image_std: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. If size is a dict with keys "width" and "height", the image will be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to `(size["shortest_egde"] * height / width, size["shortest_egde"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to (size * height / width, size). resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size_dict = get_size_dict(size, default_to_square=False) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: shortest_edge = int((256 / 224) * size["shortest_edge"]) output_size = get_resize_output_image_size( image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format ) size_dict = {"height": output_size[0], "width": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" ) return resize( image, size=(size_dict["height"], size_dict["width"]), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, do_center_crop: Optional[bool] = None, crop_size: Optional[Dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, Iterable[float]]] = None, image_std: Optional[Union[float, Iterable[float]]] = None, return_tensors: Optional[TensorType] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> BatchFeature: """ Preprocess an image or batch of images to be used as input to a LeViT model. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to (size * height / width, size). resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the output image after center cropping. Crops images to (crop_size["height"], crop_size["width"]). do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image pixel values by `rescaling_factor` - typical to values between 0 and 1. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Factor to rescale the image pixel values by. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image pixel values by `image_mean` and `image_std`. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Mean to normalize the image pixel values by. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Standard deviation to normalize the image pixel values by. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image, size, resample, input_data_format=input_data_format) for image in images] if do_center_crop: images = [self.center_crop(image, crop_size, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) __all__ = ["LevitImageProcessor"]
transformers/src/transformers/models/levit/image_processing_levit.py/0
{ "file_path": "transformers/src/transformers/models/levit/image_processing_levit.py", "repo_id": "transformers", "token_count": 6862 }
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for LLaVa.""" from typing import List, Optional, Tuple, Union from ...image_processing_utils import ( BatchFeature, ) from ...image_processing_utils_fast import ( BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, BASE_IMAGE_PROCESSOR_FAST_DOCSTRING_PREPROCESS, BaseImageProcessorFast, DefaultFastImageProcessorInitKwargs, DefaultFastImageProcessorPreprocessKwargs, group_images_by_shape, reorder_images, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, get_image_size, ) from ...processing_utils import Unpack from ...utils import ( TensorType, add_start_docstrings, is_torch_available, is_torchvision_available, is_torchvision_v2_available, is_vision_available, ) if is_vision_available(): from ...image_utils import PILImageResampling if is_torch_available(): import torch if is_torchvision_available(): if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F class LlavaFastImageProcessorInitKwargs(DefaultFastImageProcessorInitKwargs): do_pad: Optional[bool] class LlavaFastImageProcessorPreprocessKwargs(DefaultFastImageProcessorPreprocessKwargs): do_pad: Optional[bool] @add_start_docstrings( "Constructs a fast Llava image processor.", BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, """ do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image to a square based on the longest edge. Can be overridden by the `do_pad` parameter """, ) class LlavaImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {"shortest_edge": 224} default_to_square = False crop_size = {"height": 224, "width": 224} do_pad = False do_resize = True do_center_crop = True do_rescale = True do_normalize = True do_convert_rgb = True valid_init_kwargs = LlavaFastImageProcessorInitKwargs valid_preprocess_kwargs = LlavaFastImageProcessorPreprocessKwargs def __init__(self, **kwargs: Unpack[LlavaFastImageProcessorInitKwargs]) -> None: super().__init__(**kwargs) @add_start_docstrings( BASE_IMAGE_PROCESSOR_FAST_DOCSTRING_PREPROCESS, """ do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image to a square based on the longest edge. Can be overridden by the `do_pad` parameter """, ) def preprocess( self, images: ImageInput, **kwargs: Unpack[LlavaFastImageProcessorPreprocessKwargs] ) -> BatchFeature: return super().preprocess(images, **kwargs) def pad_to_square( self, images: "torch.Tensor", background_color: Union[int, Tuple[int, int, int]] = 0, ) -> "torch.Tensor": """ Pads an image to a square based on the longest edge. Args: images (`np.ndarray`): The images to pad. background_color (`int` or `Tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in mutli-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images. """ height, width = get_image_size(images, ChannelDimension.FIRST) if height == width: return images num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0] if isinstance(background_color, int): background_color = [background_color] + [0] * (num_channels - 1) elif len(background_color) != num_channels: raise ValueError( f"background_color must have no more than {num_channels} elements to match the number of channels" ) max_dim = max(height, width) paste_x_left = (max_dim - width) // 2 paste_y_left = (max_dim - height) // 2 paste_x_right = max_dim - width - paste_x_left paste_y_right = max_dim - height - paste_y_left padded_images = F.pad( images, padding=[paste_x_left, paste_y_left, paste_x_right, paste_y_right], fill=background_color ) return padded_images def _preprocess( self, images: List["torch.Tensor"], do_resize: bool, size: SizeDict, interpolation: Optional["F.InterpolationMode"], do_pad: bool, do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, List[float]]], image_std: Optional[Union[float, List[float]]], return_tensors: Optional[Union[str, TensorType]], ) -> BatchFeature: # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_pad: stacked_images = self.pad_to_square( images=stacked_images, background_color=tuple(int(x * 255) for x in self.image_mean) ) resized_images_grouped[shape] = stacked_images padded_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for batched resizing # Needed in case do_pad is False, or padding returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(padded_images) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_center_crop: stacked_images = self.center_crop(stacked_images, crop_size) # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) __all__ = ["LlavaImageProcessorFast"]
transformers/src/transformers/models/llava/image_processing_llava_fast.py/0
{ "file_path": "transformers/src/transformers/models/llava/image_processing_llava_fast.py", "repo_id": "transformers", "token_count": 3201 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for LLaVa-NeXT-Video. """ from typing import TYPE_CHECKING, List, Optional, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_processing_utils import select_best_resolution from ...image_utils import ImageInput, VideoInput, get_image_size, to_numpy_array from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType, logging if TYPE_CHECKING: pass logger = logging.get_logger(__name__) class LlavaNextVideoProcessor(ProcessorMixin): r""" Constructs a LLaVa-NeXT-Video processor which wraps a LLaVa-NeXT image processor, LLaVa-NeXT-Video video processor and a LLaMa tokenizer into a single processor. [`LlavaNextVideoProcessor`] offers all the functionalities of [`LlavaNextImageProcessor`], [`LlavaNextVideoImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~LlavaNextVideoProcessor.__call__`] and [`~LlavaNextVideoProcessor.decode`] for more information. Args: video_processor ([`LlavaNextVideoImageProcessor`], *optional*): The video processor is a required input. image_processor ([`LlavaNextImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): Jinja chat template that will be used in tokenizer's `apply_chat_template` patch_size (`int`, *optional*): Patch size from the vision tower. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Shoudl be same as in model's config video_token (`str`, *optional*, defaults to `"<video>"`): Special token used to denote video location. image_token (`str`, *optional*, defaults to `"<image>"`): Special token used to denote image location. num_additional_image_tokens (`int`, *optional*, defaults to 0): Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other extra tokens appended, no need to set this arg. """ # video and image processor share same args, but have different processing logic # only image processor config is saved in the hub attributes = ["video_processor", "image_processor", "tokenizer"] valid_kwargs = [ "chat_template", "patch_size", "vision_feature_select_strategy", "image_token", "video_token", "num_additional_image_tokens", ] image_processor_class = "LlavaNextImageProcessor" video_processor_class = "LlavaNextVideoImageProcessor" tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast") def __init__( self, video_processor=None, image_processor=None, tokenizer=None, chat_template=None, patch_size=None, vision_feature_select_strategy=None, video_token="<video>", image_token="<image>", num_additional_image_tokens=0, **kwargs, ): self.patch_size = patch_size self.num_additional_image_tokens = num_additional_image_tokens self.vision_feature_select_strategy = vision_feature_select_strategy self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token self.video_token = tokenizer.video_token if hasattr(tokenizer, "video_token") else video_token super().__init__(video_processor, image_processor, tokenizer, chat_template=chat_template) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], images: ImageInput = None, videos: VideoInput = None, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: int = None, return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH, ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. To prepare the video(s), this method forwards the `videos` and `kwrags` arguments to LlavaNextVideoImageProcessor's [`~LlavaNextVideoImageProcessor.__call__`] if `videos` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`, *optional*): Activates truncation to cut input sequences longer than `max_length` to `max_length`. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if images is not None: image_inputs = self.image_processor(images, return_tensors=return_tensors) else: image_inputs = {} if videos is not None: videos_inputs = self.video_processor(videos, return_tensors=return_tensors) else: videos_inputs = {} if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise ValueError("Invalid input text. Please provide a string, or a list of strings") if image_inputs: image_sizes = iter(image_inputs["image_sizes"]) height, width = get_image_size(to_numpy_array(image_inputs["pixel_values"][0][0])) prompt_strings = [] for sample in text: while self.image_token in sample: image_size = next(image_sizes) if not isinstance(image_size, (list, tuple)): # cast to list to avoid numerical precision errors when calculating unpadding image_size = image_size.tolist() orig_height, orig_width = image_size num_image_tokens = self._get_number_of_features(orig_height, orig_width, height, width) if self.vision_feature_select_strategy == "default": num_image_tokens -= 1 sample = sample.replace(self.image_token, "<placeholder>" * num_image_tokens, 1) prompt_strings.append(sample) text = [sample.replace("<placeholder>", self.image_token) for sample in prompt_strings] # videos are easier, simply get frames and multiply if videos_inputs: one_video = videos_inputs.get("pixel_values_videos")[0] if isinstance(one_video, (list, tuple)): one_video = np.array(one_video) else: one_video = to_numpy_array(one_video) height, width = get_image_size(one_video[0]) num_frames = one_video.shape[0] # frame dim is always after batch dim # no `self.num_additional_image_tokens` added because video always has a default feature selection strategy num_image_tokens = (height // self.patch_size) * (width // self.patch_size) num_video_tokens = num_image_tokens // 4 * num_frames # divide by 4 needed for avg pooling layer prompt_strings = [] for sample in text: sample = sample.replace(self.video_token, self.video_token * num_video_tokens) prompt_strings.append(sample) text = prompt_strings text_inputs = self.tokenizer( text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length, ) return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}) # Copied from transformers.models.llava_next.processing_llava_next.LlavaNextProcessor._get_number_of_features def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int: image_grid_pinpoints = self.image_processor.image_grid_pinpoints height_best_resolution, width_best_resolution = select_best_resolution( [orig_height, orig_width], image_grid_pinpoints ) scale_height, scale_width = height_best_resolution // height, width_best_resolution // width patches_height = height // self.patch_size patches_width = width // self.patch_size unpadded_features, newline_features = self._get_unpadded_features( orig_height, orig_width, patches_height, patches_width, scale_height, scale_width ) # The base patch covers the entire image (+1 for the CLS) base_features = patches_height * patches_width + self.num_additional_image_tokens num_image_tokens = unpadded_features + newline_features + base_features return num_image_tokens # Copied from transformers.models.llava_next.processing_llava_next.LlavaNextProcessor._get_unpadded_features def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width): """ Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA because it divided each image into patches depending on its resolution. Therefore we need to calculate how many patches an image is divided into and get the number of features from that. """ current_height = patches_height * scale_height current_width = patches_width * scale_width original_aspect_ratio = width / height current_aspect_ratio = current_width / current_height if original_aspect_ratio > current_aspect_ratio: new_height = int(round(height * (current_width / width), 7)) padding = (current_height - new_height) // 2 current_height -= padding * 2 else: new_width = int(round(width * (current_height / height), 7)) padding = (current_width - new_width) // 2 current_width -= padding * 2 unpadded_features = current_height * current_width newline_features = current_height return (unpadded_features, newline_features) # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama def decode(self, *args, **kwargs): """ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) __all__ = ["LlavaNextVideoProcessor"]
transformers/src/transformers/models/llava_next_video/processing_llava_next_video.py/0
{ "file_path": "transformers/src/transformers/models/llava_next_video/processing_llava_next_video.py", "repo_id": "transformers", "token_count": 6070 }
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import sys from argparse import ArgumentParser from dataclasses import dataclass from pathlib import Path from pprint import pformat from typing import Any, Dict, Iterator, List, Set, Tuple import requests import torch import torchvision.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.projects.deeplab import add_deeplab_config from huggingface_hub import hf_hub_download from PIL import Image from torch import Tensor, nn from transformers import ( Mask2FormerConfig, Mask2FormerForUniversalSegmentation, Mask2FormerImageProcessor, Mask2FormerModel, SwinConfig, ) from transformers.models.mask2former.modeling_mask2former import ( Mask2FormerForUniversalSegmentationOutput, Mask2FormerModelOutput, ) from transformers.utils import logging StateDict = Dict[str, Tensor] logging.set_verbosity_info() logger = logging.get_logger() torch.manual_seed(0) class TrackedStateDict: def __init__(self, to_track: Dict): """This class "tracks" a python dictionary by keeping track of which item is accessed. Args: to_track (Dict): The dictionary we wish to track """ self.to_track = to_track self._seen: Set[str] = set() def __getitem__(self, key: str) -> Any: return self.to_track[key] def __setitem__(self, key: str, item: Any): self._seen.add(key) self.to_track[key] = item def diff(self) -> List[str]: """This method returns a set difference between the keys in the tracked state dict and the one we have access so far. This is an effective method to check if we have update all the keys Returns: List[str]: List of keys not yet updated """ return set(self.to_track.keys()) - self._seen def copy(self) -> Dict: # proxy the call to the internal dictionary return self.to_track.copy() # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" img_data = requests.get(url, stream=True).raw im = Image.open(img_data) return im @dataclass class Args: """Fake command line arguments needed by mask2former/detectron implementation""" config_file: str def setup_cfg(args: Args): # load config from file and command-line arguments cfg = get_cfg() add_deeplab_config(cfg) add_maskformer2_config(cfg) cfg.merge_from_file(args.config_file) cfg.freeze() return cfg class OriginalMask2FormerConfigToOursConverter: def __call__(self, original_config: object) -> Mask2FormerConfig: model = original_config.MODEL repo_id = "huggingface/label-files" if model.SEM_SEG_HEAD.NUM_CLASSES == 847: filename = "mask2former-ade20k-full-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 150: filename = "ade20k-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 80: filename = "coco-detection-mmdet-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 171: filename = "mask2former-coco-stuff-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 133: filename = "coco-panoptic-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 19: filename = "cityscapes-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 8: filename = "cityscapes-instance-id2label.json" elif model.SEM_SEG_HEAD.NUM_CLASSES == 65: filename = "mapillary-vistas-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {label: idx for idx, label in id2label.items()} if model.SWIN.EMBED_DIM == 96: backbone_config = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] ) elif model.SWIN.EMBED_DIM == 128: backbone_config = SwinConfig( embed_dim=128, window_size=12, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), out_features=["stage1", "stage2", "stage3", "stage4"], ) elif model.SWIN.EMBED_DIM == 192: backbone_config = SwinConfig.from_pretrained( "microsoft/swin-large-patch4-window12-384", out_features=["stage1", "stage2", "stage3", "stage4"] ) else: raise ValueError(f"embed dim {model.SWIN.EMBED_DIM} not supported for Swin!") backbone_config.drop_path_rate = model.SWIN.DROP_PATH_RATE backbone_config.attention_probs_dropout_prob = model.SWIN.ATTN_DROP_RATE backbone_config.depths = model.SWIN.DEPTHS config: Mask2FormerConfig = Mask2FormerConfig( ignore_value=model.SEM_SEG_HEAD.IGNORE_VALUE, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, num_queries=model.MASK_FORMER.NUM_OBJECT_QUERIES, no_object_weight=model.MASK_FORMER.NO_OBJECT_WEIGHT, class_weight=model.MASK_FORMER.CLASS_WEIGHT, mask_weight=model.MASK_FORMER.MASK_WEIGHT, dice_weight=model.MASK_FORMER.DICE_WEIGHT, train_num_points=model.MASK_FORMER.TRAIN_NUM_POINTS, oversample_ratio=model.MASK_FORMER.OVERSAMPLE_RATIO, importance_sample_ratio=model.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO, init_std=0.02, init_xavier_std=1.0, use_auxiliary_loss=model.MASK_FORMER.DEEP_SUPERVISION, feature_strides=[4, 8, 16, 32], backbone_config=backbone_config, id2label=id2label, label2id=label2id, feature_size=model.SEM_SEG_HEAD.CONVS_DIM, mask_feature_size=model.SEM_SEG_HEAD.MASK_DIM, hidden_dim=model.MASK_FORMER.HIDDEN_DIM, encoder_layers=model.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS, encoder_feedforward_dim=1024, decoder_layers=model.MASK_FORMER.DEC_LAYERS, num_attention_heads=model.MASK_FORMER.NHEADS, dropout=model.MASK_FORMER.DROPOUT, dim_feedforward=model.MASK_FORMER.DIM_FEEDFORWARD, pre_norm=model.MASK_FORMER.PRE_NORM, enforce_input_proj=model.MASK_FORMER.ENFORCE_INPUT_PROJ, common_stride=model.SEM_SEG_HEAD.COMMON_STRIDE, ) return config class OriginalMask2FormerConfigToImageProcessorConverter: def __call__(self, original_config: object) -> Mask2FormerImageProcessor: model = original_config.MODEL model_input = original_config.INPUT return Mask2FormerImageProcessor( image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(), image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(), size=model_input.MIN_SIZE_TEST, max_size=model_input.MAX_SIZE_TEST, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, ignore_index=model.SEM_SEG_HEAD.IGNORE_VALUE, size_divisibility=32, ) class OriginalMask2FormerCheckpointToOursConverter: def __init__(self, original_model: nn.Module, config: Mask2FormerConfig): self.original_model = original_model self.config = config def pop_all(self, renamed_keys: List[Tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict): for src_key, dst_key in renamed_keys: dst_state_dict[dst_key] = src_state_dict.pop(src_key) def replace_maskformer_swin_backbone( self, dst_state_dict: StateDict, src_state_dict: StateDict, config: Mask2FormerConfig ): dst_prefix: str = "pixel_level_module.encoder" src_prefix: str = "backbone" renamed_keys = [ ( f"{src_prefix}.patch_embed.proj.weight", f"{dst_prefix}.model.embeddings.patch_embeddings.projection.weight", ), (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.model.embeddings.patch_embeddings.projection.bias"), (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.model.embeddings.norm.weight"), (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.model.embeddings.norm.bias"), ] num_layers = len(config.backbone_config.depths) for layer_idx in range(num_layers): for block_idx in range(config.backbone_config.depths[layer_idx]): renamed_keys.extend( [ # src, dst ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table", ), ] ) # now we need to handle the attentions # read in weights + bias of input projection layer of cross-attention src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] size = src_att_weight.shape[0] offset = size // 3 dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight" ] = src_att_weight[:offset, :] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias" ] = src_att_bias[:offset] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight" ] = src_att_weight[offset : offset * 2, :] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias" ] = src_att_bias[offset : offset * 2] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight" ] = src_att_weight[-offset:, :] dst_state_dict[ f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias" ] = src_att_bias[-offset:] # let's pop them src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") # proj renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias", ), ] ) # second norm renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias", ), ] ) # mlp renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias", ), ] ) renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index", f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index", ) ] ) if layer_idx < num_layers - 1: # patch merging renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.reduction.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight", f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias", f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.bias", ), ] ) # hidden states norms renamed_keys.extend( [ ( f"{src_prefix}.norm{layer_idx}.weight", f"{dst_prefix}.hidden_states_norms.{layer_idx}.weight", ), ( f"{src_prefix}.norm{layer_idx}.bias", f"{dst_prefix}.hidden_states_norms.{layer_idx}.bias", ), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: Mask2FormerConfig): dst_prefix: str = "pixel_level_module.encoder" src_prefix: str = "backbone" renamed_keys = [ ( f"{src_prefix}.patch_embed.proj.weight", f"{dst_prefix}.embeddings.patch_embeddings.projection.weight", ), (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.embeddings.patch_embeddings.projection.bias"), (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.embeddings.norm.weight"), (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.embeddings.norm.bias"), ] for layer_idx in range(len(config.backbone_config.depths)): for block_idx in range(config.backbone_config.depths[layer_idx]): renamed_keys.extend( [ # src, dst ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table", ), ] ) # now we need to handle the attentions # read in weights + bias of input projection layer of cross-attention src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] size = src_att_weight.shape[0] offset = size // 3 dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight" ] = src_att_weight[:offset, :] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias" ] = src_att_bias[:offset] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight" ] = src_att_weight[offset : offset * 2, :] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias" ] = src_att_bias[offset : offset * 2] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight" ] = src_att_weight[-offset:, :] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias" ] = src_att_bias[-offset:] # let's pop them src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") # proj renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias", ), ] ) # second norm renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias", ), ] ) # mlp renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias", ), ] ) renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index", ) ] ) if layer_idx < 3: # patch merging renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.reduction.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.bias", ), ] ) # hidden states norms renamed_keys.extend( [ ( f"{src_prefix}.norm{layer_idx}.weight", f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.weight", ), ( f"{src_prefix}.norm{layer_idx}.bias", f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.bias", ), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) # Backbone + Pixel Decoder def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "pixel_level_module.decoder" src_prefix: str = "sem_seg_head.pixel_decoder" self.replace_swin_backbone(dst_state_dict, src_state_dict, self.config) def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str): self_attn_keys = [] self_attn_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.attention_weights", f"{dst_prefix}.attention_weights") ) self_attn_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.output_proj", f"{dst_prefix}.output_proj") ) self_attn_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.sampling_offsets", f"{dst_prefix}.sampling_offsets") ) self_attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.value_proj", f"{dst_prefix}.value_proj")) return self_attn_keys def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str): encoder_keys = [] encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.fc1")) encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.fc2")) encoder_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.self_attn_layer_norm") ) encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.final_layer_norm")) encoder_keys.extend(rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn")) return encoder_keys # convolution layer for final features renamed_keys = [ (f"{src_prefix}.adapter_1.weight", f"{dst_prefix}.adapter_1.0.weight"), (f"{src_prefix}.adapter_1.norm.weight", f"{dst_prefix}.adapter_1.1.weight"), (f"{src_prefix}.adapter_1.norm.bias", f"{dst_prefix}.adapter_1.1.bias"), ] renamed_keys.extend( [ (f"{src_prefix}.layer_1.weight", f"{dst_prefix}.layer_1.0.weight"), (f"{src_prefix}.layer_1.norm.weight", f"{dst_prefix}.layer_1.1.weight"), (f"{src_prefix}.layer_1.norm.bias", f"{dst_prefix}.layer_1.1.bias"), ] ) # proj layers for i in range(3): for j in range(2): renamed_keys.extend( [ (f"{src_prefix}.input_proj.{i}.{j}.weight", f"{dst_prefix}.input_projections.{i}.{j}.weight"), (f"{src_prefix}.input_proj.{i}.{j}.bias", f"{dst_prefix}.input_projections.{i}.{j}.bias"), ] ) renamed_keys.extend([(f"{src_prefix}.transformer.level_embed", f"{dst_prefix}.level_embed")]) # layers for layer_idx in range(self.config.encoder_layers): renamed_keys.extend( rename_keys_for_encoder_layer( f"{src_prefix}.transformer.encoder.layers.{layer_idx}", f"{dst_prefix}.encoder.layers.{layer_idx}" ) ) # proj renamed_keys.extend( [ (f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"), (f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) # Transformer Decoder def rename_keys_in_masked_attention_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder" src_prefix: str = "sem_seg_head.predictor" rename_keys = [] for i in range(self.config.decoder_layers - 1): rename_keys.append( ( f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.out_proj.weight", f"{dst_prefix}.layers.{i}.self_attn.out_proj.weight", ) ) rename_keys.append( ( f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.out_proj.bias", f"{dst_prefix}.layers.{i}.self_attn.out_proj.bias", ) ) rename_keys.append( ( f"{src_prefix}.transformer_self_attention_layers.{i}.norm.weight", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.weight", ) ) rename_keys.append( ( f"{src_prefix}.transformer_self_attention_layers.{i}.norm.bias", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.bias", ) ) rename_keys.append( ( f"{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.in_proj_weight", f"{dst_prefix}.layers.{i}.cross_attn.in_proj_weight", ) ) rename_keys.append( ( f"{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.in_proj_bias", f"{dst_prefix}.layers.{i}.cross_attn.in_proj_bias", ) ) rename_keys.append( ( f"{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.out_proj.weight", f"{dst_prefix}.layers.{i}.cross_attn.out_proj.weight", ) ) rename_keys.append( ( f"{src_prefix}.transformer_cross_attention_layers.{i}.multihead_attn.out_proj.bias", f"{dst_prefix}.layers.{i}.cross_attn.out_proj.bias", ) ) rename_keys.append( ( f"{src_prefix}.transformer_cross_attention_layers.{i}.norm.weight", f"{dst_prefix}.layers.{i}.cross_attn_layer_norm.weight", ) ) rename_keys.append( ( f"{src_prefix}.transformer_cross_attention_layers.{i}.norm.bias", f"{dst_prefix}.layers.{i}.cross_attn_layer_norm.bias", ) ) rename_keys.append( (f"{src_prefix}.transformer_ffn_layers.{i}.linear1.weight", f"{dst_prefix}.layers.{i}.fc1.weight") ) rename_keys.append( (f"{src_prefix}.transformer_ffn_layers.{i}.linear1.bias", f"{dst_prefix}.layers.{i}.fc1.bias") ) rename_keys.append( (f"{src_prefix}.transformer_ffn_layers.{i}.linear2.weight", f"{dst_prefix}.layers.{i}.fc2.weight") ) rename_keys.append( (f"{src_prefix}.transformer_ffn_layers.{i}.linear2.bias", f"{dst_prefix}.layers.{i}.fc2.bias") ) rename_keys.append( ( f"{src_prefix}.transformer_ffn_layers.{i}.norm.weight", f"{dst_prefix}.layers.{i}.final_layer_norm.weight", ) ) rename_keys.append( ( f"{src_prefix}.transformer_ffn_layers.{i}.norm.bias", f"{dst_prefix}.layers.{i}.final_layer_norm.bias", ) ) return rename_keys def replace_masked_attention_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder" src_prefix: str = "sem_seg_head.predictor" renamed_keys = self.rename_keys_in_masked_attention_decoder(dst_state_dict, src_state_dict) # add more renamed_keys.extend( [ (f"{src_prefix}.decoder_norm.weight", f"{dst_prefix}.layernorm.weight"), (f"{src_prefix}.decoder_norm.bias", f"{dst_prefix}.layernorm.bias"), ] ) mlp_len = 3 for i in range(mlp_len): renamed_keys.extend( [ ( f"{src_prefix}.mask_embed.layers.{i}.weight", f"{dst_prefix}.mask_predictor.mask_embedder.{i}.0.weight", ), ( f"{src_prefix}.mask_embed.layers.{i}.bias", f"{dst_prefix}.mask_predictor.mask_embedder.{i}.0.bias", ), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder.layers" src_prefix: str = "sem_seg_head.predictor" for i in range(self.config.decoder_layers - 1): # read in weights + bias of input projection layer of self-attention in_proj_weight = src_state_dict.pop( f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_weight" ) in_proj_bias = src_state_dict.pop( f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict dst_state_dict[f"{dst_prefix}.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] dst_state_dict[f"{dst_prefix}.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] dst_state_dict[f"{dst_prefix}.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] dst_state_dict[f"{dst_prefix}.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] dst_state_dict[f"{dst_prefix}.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] dst_state_dict[f"{dst_prefix}.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module" src_prefix: str = "sem_seg_head.predictor" self.replace_masked_attention_decoder(dst_state_dict, src_state_dict) renamed_keys = [ (f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"), (f"{src_prefix}.query_feat.weight", f"{dst_prefix}.queries_features.weight"), (f"{src_prefix}.level_embed.weight", f"{dst_prefix}.level_embed.weight"), ] self.pop_all(renamed_keys, dst_state_dict, src_state_dict) self.replace_keys_qkv_transformer_decoder(dst_state_dict, src_state_dict) def replace_universal_segmentation_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "" src_prefix: str = "sem_seg_head.predictor" renamed_keys = [ (f"{src_prefix}.class_embed.weight", f"{dst_prefix}class_predictor.weight"), (f"{src_prefix}.class_embed.bias", f"{dst_prefix}class_predictor.bias"), ] logger.info(f"Replacing keys {pformat(renamed_keys)}") self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def convert(self, mask2former: Mask2FormerModel) -> Mask2FormerModel: dst_state_dict = TrackedStateDict(mask2former.state_dict()) src_state_dict = self.original_model.state_dict() self.replace_pixel_module(dst_state_dict, src_state_dict) self.replace_transformer_module(dst_state_dict, src_state_dict) logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}") logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}") logger.info("🙌 Done") state_dict = {key: dst_state_dict[key] for key in dst_state_dict.to_track.keys()} mask2former.load_state_dict(state_dict) return mask2former def convert_universal_segmentation( self, mask2former: Mask2FormerForUniversalSegmentation ) -> Mask2FormerForUniversalSegmentation: dst_state_dict = TrackedStateDict(mask2former.state_dict()) src_state_dict = self.original_model.state_dict() self.replace_universal_segmentation_module(dst_state_dict, src_state_dict) state_dict = {key: dst_state_dict[key] for key in dst_state_dict.to_track.keys()} mask2former.load_state_dict(state_dict) return mask2former @staticmethod def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[Tuple[object, Path, Path]]: checkpoints: List[Path] = checkpoints_dir.glob("**/*.pkl") for checkpoint in checkpoints: logger.info(f"💪 Converting {checkpoint.stem}") # find associated config file # dataset_name e.g 'coco' dataset_name = checkpoint.parents[2].stem if dataset_name == "ade": dataset_name = dataset_name.replace("ade", "ade20k") # task type e.g 'instance-segmentation' segmentation_task = checkpoint.parents[1].stem # config file corresponding to checkpoint config_file_name = f"{checkpoint.parents[0].stem}.yaml" config: Path = config_dir / dataset_name / segmentation_task / "swin" / config_file_name yield config, checkpoint def test( original_model, our_model: Mask2FormerForUniversalSegmentation, image_processor: Mask2FormerImageProcessor, tolerance: float, ): with torch.no_grad(): original_model = original_model.eval() our_model = our_model.eval() im = prepare_img() x = image_processor(images=im, return_tensors="pt")["pixel_values"] original_model_backbone_features = original_model.backbone(x.clone()) our_model_output: Mask2FormerModelOutput = our_model.model(x.clone(), output_hidden_states=True) # Test backbone for original_model_feature, our_model_feature in zip( original_model_backbone_features.values(), our_model_output.encoder_hidden_states ): assert torch.allclose( original_model_feature, our_model_feature, atol=tolerance ), "The backbone features are not the same." # Test pixel decoder mask_features, _, multi_scale_features = original_model.sem_seg_head.pixel_decoder.forward_features( original_model_backbone_features ) for original_model_feature, our_model_feature in zip( multi_scale_features, our_model_output.pixel_decoder_hidden_states ): assert torch.allclose( original_model_feature, our_model_feature, atol=tolerance ), "The pixel decoder feature are not the same" # Let's test the full model tr_complete = T.Compose( [T.Resize((384, 384)), T.ToTensor()], ) y = (tr_complete(im) * 255.0).to(torch.int).float() # modify original Mask2Former code to return mask and class logits original_class_logits, original_mask_logits = original_model([{"image": y.clone().squeeze(0)}]) our_model_out: Mask2FormerForUniversalSegmentationOutput = our_model(x.clone()) our_mask_logits = our_model_out.masks_queries_logits our_class_logits = our_model_out.class_queries_logits assert original_mask_logits.shape == our_mask_logits.shape, "Output masks shapes are not matching." assert original_class_logits.shape == our_class_logits.shape, "Output class logits shapes are not matching." assert torch.allclose( original_class_logits, our_class_logits, atol=tolerance ), "The class logits are not the same." assert torch.allclose( original_mask_logits, our_mask_logits, atol=tolerance ), "The predicted masks are not the same." logger.info("✅ Test passed!") def get_model_name(checkpoint_file: Path): # model_name_raw is something like maskformer2_swin_small_bs16_50ep model_name_raw: str = checkpoint_file.parents[0].stem # `segmentation_task_type` must be one of the following: `instance-segmentation`, `panoptic-segmentation`, `semantic-segmentation` segmentation_task_name: str = checkpoint_file.parents[1].stem if segmentation_task_name not in ["instance-segmentation", "panoptic-segmentation", "semantic-segmentation"]: raise ValueError( f"{segmentation_task_name} must be wrong since acceptable values are: instance-segmentation," " panoptic-segmentation, semantic-segmentation." ) # dataset name must be one of the following: `coco`, `ade`, `cityscapes`, `mapillary-vistas` dataset_name: str = checkpoint_file.parents[2].stem if dataset_name not in ["coco", "ade", "cityscapes", "mapillary-vistas"]: raise ValueError( f"{dataset_name} must be wrong since we didn't find 'coco' or 'ade' or 'cityscapes' or 'mapillary-vistas'" " in it " ) backbone = "swin" backbone_types = ["tiny", "small", "base_IN21k", "base", "large"] backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0].replace("_", "-") model_name = f"mask2former-{backbone}-{backbone_type}-{dataset_name}-{segmentation_task_name.split('-')[0]}" return model_name if __name__ == "__main__": parser = ArgumentParser( description="Command line to convert the original mask2formers (with swin backbone) to our implementations." ) parser.add_argument( "--checkpoints_dir", type=Path, help=( "A directory containing the model's checkpoints. The directory has to have the following structure:" " <DIR_NAME>/<DATASET_NAME>/<SEGMENTATION_TASK_NAME>/<CONFIG_NAME>.pkl" ), ) parser.add_argument( "--configs_dir", type=Path, help=( "A directory containing the model's configs, see detectron2 doc. The directory has to have the following" " structure: <DIR_NAME>/<DATASET_NAME>/<SEGMENTATION_TASK_NAME>/<CONFIG_NAME>.yaml" ), ) parser.add_argument( "--mask2former_dir", required=True, type=Path, help=( "A path to Mask2Former's original implementation directory. You can download from here:" " https://github.com/facebookresearch/Mask2Former" ), ) args = parser.parse_args() checkpoints_dir: Path = args.checkpoints_dir config_dir: Path = args.configs_dir mask2former_dir: Path = args.mask2former_dir # append the path to the parents to mask2former dir sys.path.append(str(mask2former_dir.parent)) # import original Mask2Former config and model from original source code repo from Mask2Former.mask2former.config import add_maskformer2_config from Mask2Former.mask2former.maskformer_model import MaskFormer as OriginalMask2Former for config_file, checkpoint_file in OriginalMask2FormerCheckpointToOursConverter.using_dirs( checkpoints_dir, config_dir ): model_name = get_model_name(checkpoint_file) image_processor = OriginalMask2FormerConfigToImageProcessorConverter()( setup_cfg(Args(config_file=config_file)) ) image_processor.size = {"height": 384, "width": 384} original_config = setup_cfg(Args(config_file=config_file)) mask2former_kwargs = OriginalMask2Former.from_config(original_config) original_model = OriginalMask2Former(**mask2former_kwargs).eval() DetectionCheckpointer(original_model).load(str(checkpoint_file)) config: Mask2FormerConfig = OriginalMask2FormerConfigToOursConverter()(original_config) mask2former = Mask2FormerModel(config=config).eval() converter = OriginalMask2FormerCheckpointToOursConverter(original_model, config) mask2former = converter.convert(mask2former) mask2former_for_segmentation = Mask2FormerForUniversalSegmentation(config=config).eval() mask2former_for_segmentation.model = mask2former mask2former_for_segmentation = converter.convert_universal_segmentation(mask2former_for_segmentation) tolerance = 3e-1 high_tolerance_models = [ "mask2former-swin-base-IN21k-coco-instance", "mask2former-swin-base-coco-instance", "mask2former-swin-small-cityscapes-semantic", ] if model_name in high_tolerance_models: tolerance = 3e-1 logger.info(f"🪄 Testing {model_name}...") test(original_model, mask2former_for_segmentation, image_processor, tolerance) logger.info(f"🪄 Pushing {model_name} to hub...") image_processor.push_to_hub(model_name) mask2former_for_segmentation.push_to_hub(model_name)
transformers/src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/mask2former/convert_mask2former_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 24038 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MGP-STR model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class MgpstrConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`MgpstrModel`]. It is used to instantiate an MGP-STR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MGP-STR [alibaba-damo/mgp-str-base](https://huggingface.co/alibaba-damo/mgp-str-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`List[int]`, *optional*, defaults to `[32, 128]`): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. max_token_length (`int`, *optional*, defaults to 27): The max number of output tokens. num_character_labels (`int`, *optional*, defaults to 38): The number of classes for character head . num_bpe_labels (`int`, *optional*, defaults to 50257): The number of classes for bpe head . num_wordpiece_labels (`int`, *optional*, defaults to 30522): The number of classes for wordpiece head . hidden_size (`int`, *optional*, defaults to 768): The embedding dimension. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. mlp_ratio (`float`, *optional*, defaults to 4.0): The ratio of mlp hidden dim to embedding dim. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. distilled (`bool`, *optional*, defaults to `False`): Model includes a distillation token and head as in DeiT models. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. drop_rate (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder. attn_drop_rate (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.0): The stochastic depth rate. output_a3_attentions (`bool`, *optional*, defaults to `False`): Whether or not the model should returns A^3 module attentions. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import MgpstrConfig, MgpstrForSceneTextRecognition >>> # Initializing a Mgpstr mgp-str-base style configuration >>> configuration = MgpstrConfig() >>> # Initializing a model (with random weights) from the mgp-str-base style configuration >>> model = MgpstrForSceneTextRecognition(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mgp-str" def __init__( self, image_size=[32, 128], patch_size=4, num_channels=3, max_token_length=27, num_character_labels=38, num_bpe_labels=50257, num_wordpiece_labels=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, mlp_ratio=4.0, qkv_bias=True, distilled=False, layer_norm_eps=1e-5, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, output_a3_attentions=False, initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.max_token_length = max_token_length self.num_character_labels = num_character_labels self.num_bpe_labels = num_bpe_labels self.num_wordpiece_labels = num_wordpiece_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.mlp_ratio = mlp_ratio self.distilled = distilled self.layer_norm_eps = layer_norm_eps self.drop_rate = drop_rate self.qkv_bias = qkv_bias self.attn_drop_rate = attn_drop_rate self.drop_path_rate = drop_path_rate self.output_a3_attentions = output_a3_attentions self.initializer_range = initializer_range __all__ = ["MgpstrConfig"]
transformers/src/transformers/models/mgp_str/configuration_mgp_str.py/0
{ "file_path": "transformers/src/transformers/models/mgp_str/configuration_mgp_str.py", "repo_id": "transformers", "token_count": 2254 }
# coding=utf-8 # Copyright 2023 Mixtral AI and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mixtral model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class MixtralConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MixtralModel`]. It is used to instantiate an Mixtral model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mixtral-7B-v0.1 or Mixtral-7B-Instruct-v0.1. [mixtralai/Mixtral-8x7B](https://huggingface.co/mixtralai/Mixtral-8x7B) [mixtralai/Mixtral-7B-Instruct-v0.1](https://huggingface.co/mixtralai/Mixtral-7B-Instruct-v0.1) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Mixtral model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MixtralModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 14336): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`. head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to `4096*32`): The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention allows sequence of up to 4096*32 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 2): The id of the "end-of-sequence" token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_theta (`float`, *optional*, defaults to 1000000.0): The base period of the RoPE embeddings. sliding_window (`int`, *optional*): Sliding window attention window size. If not specified, will default to `4096`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. num_experts_per_tok (`int`, *optional*, defaults to 2): The number of experts to route per-token, can be also interpreted as the `top-k` routing parameter num_local_experts (`int`, *optional*, defaults to 8): Number of experts per Sparse MLP layer. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not the router logits should be returned by the model. Enabeling this will also allow the model to output the auxiliary loss. See [here]() for more details router_aux_loss_coef (`float`, *optional*, defaults to 0.001): The aux loss factor for the total loss. router_jitter_noise (`float`, *optional*, defaults to 0.0): Amount of noise to add to the router. ```python >>> from transformers import MixtralModel, MixtralConfig >>> # Initializing a Mixtral 7B style configuration >>> configuration = MixtralConfig() >>> # Initializing a model from the Mixtral 7B style configuration >>> model = MixtralModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mixtral" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.block_sparse_moe.gate": "colwise_rep", # we need to replicate here to correctly route experts "layers.*.block_sparse_moe.experts.*.w1": "colwise", "layers.*.block_sparse_moe.experts.*.w2": "rowwise", "layers.*.block_sparse_moe.experts.*.w3": "colwise", } def __init__( self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, head_dim=None, hidden_act="silu", max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-5, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=1e6, sliding_window=None, attention_dropout=0.0, num_experts_per_tok=2, num_local_experts=8, output_router_logits=False, router_aux_loss_coef=0.001, router_jitter_noise=0.0, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_dropout = attention_dropout self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads self.num_experts_per_tok = num_experts_per_tok self.num_local_experts = num_local_experts self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef self.router_jitter_noise = router_jitter_noise super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, )
transformers/src/transformers/models/mixtral/configuration_mixtral.py/0
{ "file_path": "transformers/src/transformers/models/mixtral/configuration_mixtral.py", "repo_id": "transformers", "token_count": 3540 }
# MIT License # # Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import math import os import warnings from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_mobilebert import MobileBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/mobilebert-uncased" _CONFIG_FOR_DOC = "MobileBertConfig" # TokenClassification docstring _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "mrm8488/mobilebert-finetuned-ner" _TOKEN_CLASS_EXPECTED_OUTPUT = "['I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC']" _TOKEN_CLASS_EXPECTED_LOSS = 0.03 # QuestionAnswering docstring _CHECKPOINT_FOR_QA = "csarron/mobilebert-uncased-squad-v2" _QA_EXPECTED_OUTPUT = "'a nice puppet'" _QA_EXPECTED_LOSS = 3.98 _QA_TARGET_START_INDEX = 12 _QA_TARGET_END_INDEX = 13 # SequenceClassification docstring _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "lordtt13/emo-mobilebert" _SEQ_CLASS_EXPECTED_OUTPUT = "'others'" _SEQ_CLASS_EXPECTED_LOSS = "4.72" def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.replace("ffn_layer", "ffn") name = name.replace("FakeLayerNorm", "LayerNorm") name = name.replace("extra_output_weights", "dense/kernel") name = name.replace("bert", "mobilebert") name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model class NoNorm(nn.Module): def __init__(self, feat_size, eps=None): super().__init__() self.bias = nn.Parameter(torch.zeros(feat_size)) self.weight = nn.Parameter(torch.ones(feat_size)) def forward(self, input_tensor: torch.Tensor) -> torch.Tensor: return input_tensor * self.weight + self.bias NORM2FN = {"layer_norm": nn.LayerNorm, "no_norm": NoNorm} class MobileBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.trigram_input = config.trigram_input self.embedding_size = config.embedding_size self.hidden_size = config.hidden_size self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) embed_dim_multiplier = 3 if self.trigram_input else 1 embedded_input_size = self.embedding_size * embed_dim_multiplier self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if self.trigram_input: # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited # Devices (https://arxiv.org/abs/2004.02984) # # The embedding table in BERT models accounts for a substantial proportion of model size. To compress # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT. # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512 # dimensional output. inputs_embeds = torch.cat( [ nn.functional.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0.0), inputs_embeds, nn.functional.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0.0), ], dim=2, ) if self.trigram_input or self.embedding_size != self.hidden_size: inputs_embeds = self.embedding_transformation(inputs_embeds) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class MobileBertSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.true_hidden_size, self.all_head_size) self.key = nn.Linear(config.true_hidden_size, self.all_head_size) self.value = nn.Linear( config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size ) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, query_tensor: torch.Tensor, key_tensor: torch.Tensor, value_tensor: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(query_tensor) mixed_key_layer = self.key(key_tensor) mixed_value_layer = self.value(value_tensor) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class MobileBertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.use_bottleneck = config.use_bottleneck self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps) if not self.use_bottleneck: self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor: layer_outputs = self.dense(hidden_states) if not self.use_bottleneck: layer_outputs = self.dropout(layer_outputs) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs class MobileBertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = MobileBertSelfAttention(config) self.output = MobileBertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, query_tensor: torch.Tensor, key_tensor: torch.Tensor, value_tensor: torch.Tensor, layer_input: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, ) -> Tuple[torch.Tensor]: self_outputs = self.self( query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, ) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. attention_output = self.output(self_outputs[0], layer_input) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class MobileBertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class OutputBottleneck(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.true_hidden_size, config.hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor: layer_outputs = self.dense(hidden_states) layer_outputs = self.dropout(layer_outputs) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs class MobileBertOutput(nn.Module): def __init__(self, config): super().__init__() self.use_bottleneck = config.use_bottleneck self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size) if not self.use_bottleneck: self.dropout = nn.Dropout(config.hidden_dropout_prob) else: self.bottleneck = OutputBottleneck(config) def forward( self, intermediate_states: torch.Tensor, residual_tensor_1: torch.Tensor, residual_tensor_2: torch.Tensor ) -> torch.Tensor: layer_output = self.dense(intermediate_states) if not self.use_bottleneck: layer_output = self.dropout(layer_output) layer_output = self.LayerNorm(layer_output + residual_tensor_1) else: layer_output = self.LayerNorm(layer_output + residual_tensor_1) layer_output = self.bottleneck(layer_output, residual_tensor_2) return layer_output class BottleneckLayer(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size) self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: layer_input = self.dense(hidden_states) layer_input = self.LayerNorm(layer_input) return layer_input class Bottleneck(nn.Module): def __init__(self, config): super().__init__() self.key_query_shared_bottleneck = config.key_query_shared_bottleneck self.use_bottleneck_attention = config.use_bottleneck_attention self.input = BottleneckLayer(config) if self.key_query_shared_bottleneck: self.attention = BottleneckLayer(config) def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]: # This method can return three different tuples of values. These different values make use of bottlenecks, # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory # usage. These linear layer have weights that are learned during training. # # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the # key, query, value, and "layer input" to be used by the attention layer. # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor # in the attention self output, after the attention scores have been computed. # # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return # four values, three of which have been passed through a bottleneck: the query and key, passed through the same # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck. # # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck, # and the residual layer will be this value passed through a bottleneck. bottlenecked_hidden_states = self.input(hidden_states) if self.use_bottleneck_attention: return (bottlenecked_hidden_states,) * 4 elif self.key_query_shared_bottleneck: shared_attention_input = self.attention(hidden_states) return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states) else: return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states) class FFNOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor: layer_outputs = self.dense(hidden_states) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs class FFNLayer(nn.Module): def __init__(self, config): super().__init__() self.intermediate = MobileBertIntermediate(config) self.output = FFNOutput(config) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: intermediate_output = self.intermediate(hidden_states) layer_outputs = self.output(intermediate_output, hidden_states) return layer_outputs class MobileBertLayer(nn.Module): def __init__(self, config): super().__init__() self.use_bottleneck = config.use_bottleneck self.num_feedforward_networks = config.num_feedforward_networks self.attention = MobileBertAttention(config) self.intermediate = MobileBertIntermediate(config) self.output = MobileBertOutput(config) if self.use_bottleneck: self.bottleneck = Bottleneck(config) if config.num_feedforward_networks > 1: self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)]) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, ) -> Tuple[torch.Tensor]: if self.use_bottleneck: query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states) else: query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4 self_attention_outputs = self.attention( query_tensor, key_tensor, value_tensor, layer_input, attention_mask, head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] s = (attention_output,) outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if self.num_feedforward_networks != 1: for i, ffn_module in enumerate(self.ffn): attention_output = ffn_module(attention_output) s += (attention_output,) intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output, hidden_states) outputs = ( (layer_output,) + outputs + ( torch.tensor(1000), query_tensor, key_tensor, value_tensor, layer_input, attention_output, intermediate_output, ) + s ) return outputs class MobileBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class MobileBertPooler(nn.Module): def __init__(self, config): super().__init__() self.do_activate = config.classifier_activation if self.do_activate: self.dense = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] if not self.do_activate: return first_token_tensor else: pooled_output = self.dense(first_token_tensor) pooled_output = torch.tanh(pooled_output) return pooled_output class MobileBertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class MobileBertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = MobileBertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False) self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self) -> None: self.decoder.bias = self.bias def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.transform(hidden_states) hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0)) hidden_states += self.decoder.bias return hidden_states class MobileBertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = MobileBertLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class MobileBertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = MobileBertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output: torch.Tensor, pooled_output: torch.Tensor) -> Tuple[torch.Tensor]: prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class MobileBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileBertConfig load_tf_weights = load_tf_weights_in_mobilebert base_model_prefix = "mobilebert" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, (nn.LayerNorm, NoNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class MobileBertForPreTrainingOutput(ModelOutput): """ Output type of [`MobileBertForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None MOBILEBERT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileBertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MOBILEBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.", MOBILEBERT_START_DOCSTRING, ) class MobileBertModel(MobileBertPreTrainedModel): """ https://arxiv.org/pdf/2004.02984.pdf """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = MobileBertEmbeddings(config) self.encoder = MobileBertEncoder(config) self.pooler = MobileBertPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """ MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next sentence prediction (classification)` head. """, MOBILEBERT_START_DOCSTRING, ) class MobileBertForPreTraining(MobileBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config) self.cls = MobileBertPreTrainingHeads(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding: # resize dense output embedings at first self.cls.predictions.dense = self._get_resized_lm_head( self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True ) return super().resize_token_embeddings(new_num_tokens=new_num_tokens) @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=MobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, next_sentence_label: Optional[torch.LongTensor] = None, output_attentions: Optional[torch.FloatTensor] = None, output_hidden_states: Optional[torch.FloatTensor] = None, return_dict: Optional[torch.FloatTensor] = None, ) -> Union[Tuple, MobileBertForPreTrainingOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Returns: Examples: ```python >>> from transformers import AutoTokenizer, MobileBertForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased") >>> model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased") >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) >>> # Batch size 1 >>> outputs = model(input_ids) >>> prediction_logits = outputs.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return MobileBertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""MobileBert Model with a `language modeling` head on top.""", MOBILEBERT_START_DOCSTRING) class MobileBertForMaskedLM(MobileBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.cls = MobileBertOnlyMLMHead(config) self.config = config # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding: # resize dense output embedings at first self.cls.predictions.dense = self._get_resized_lm_head( self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True ) return super().resize_token_embeddings(new_num_tokens=new_num_tokens) @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, expected_output="'paris'", expected_loss=0.57, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class MobileBertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output: torch.Tensor) -> torch.Tensor: seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score @add_start_docstrings( """MobileBert Model with a `next sentence prediction (classification)` head on top.""", MOBILEBERT_START_DOCSTRING, ) class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config) self.cls = MobileBertOnlyNSPHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple, NextSentencePredictorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`. - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Returns: Examples: ```python >>> from transformers import AutoTokenizer, MobileBertForNextSentencePrediction >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased") >>> model = MobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") >>> outputs = model(**encoding, labels=torch.LongTensor([1])) >>> loss = outputs.loss >>> logits = outputs.logits ```""" if "next_sentence_label" in kwargs: warnings.warn( "The `next_sentence_label` argument is deprecated and will be removed in a future version, use" " `labels` instead.", FutureWarning, ) labels = kwargs.pop("next_sentence_label") return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] seq_relationship_score = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_score,) + outputs[2:] return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output return NextSentencePredictorOutput( loss=next_sentence_loss, logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MOBILEBERT_START_DOCSTRING, ) # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing class MobileBertForSequenceClassification(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.mobilebert = MobileBertModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MOBILEBERT_START_DOCSTRING, ) # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering with Bert->MobileBert all-casing class MobileBertForQuestionAnswering(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_QA, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=_QA_TARGET_START_INDEX, qa_target_end_index=_QA_TARGET_END_INDEX, expected_output=_QA_EXPECTED_OUTPUT, expected_loss=_QA_EXPECTED_LOSS, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, MOBILEBERT_START_DOCSTRING, ) # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice with Bert->MobileBert all-casing class MobileBertForMultipleChoice(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, MOBILEBERT_START_DOCSTRING, ) # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification with Bert->MobileBert all-casing class MobileBertForTokenClassification(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mobilebert = MobileBertModel(config, add_pooling_layer=False) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, expected_loss=_TOKEN_CLASS_EXPECTED_LOSS, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ]
transformers/src/transformers/models/mobilebert/modeling_mobilebert.py/0
{ "file_path": "transformers/src/transformers/models/mobilebert/modeling_mobilebert.py", "repo_id": "transformers", "token_count": 29747 }
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/moonshine/modular_moonshine.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_moonshine.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PretrainedConfig from ...modeling_rope_utils import rope_config_validation class MoonshineConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MoonshineModel`]. It is used to instantiate a Moonshine model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Moonshine [UsefulSensors/moonshine-tiny](https://huggingface.co/UsefulSensors/moonshine-tiny). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32768): Vocabulary size of the Moonshine model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MoonshineModel`]. hidden_size (`int`, *optional*, defaults to 288): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 1152): Dimension of the MLP representations. encoder_num_hidden_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. decoder_num_hidden_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer decoder. encoder_num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. encoder_num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `encoder_num_key_value_heads=encoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if `encoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. decoder_num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `decoder_num_key_value_heads=decoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if `decoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `decoder_num_attention_heads`. pad_head_dim_to_multiple_of (`int`, *optional*): Pad head dimension in encoder and decoder to the next multiple of this value. Necessary for using certain optimized attention implementations. encoder_hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. decoder_hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. decoder_start_token_id (`int`, *optional*, defaults to 1): Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids` are provided to the `generate` function. It is used to guide the model`s generation process depending on the task. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE partial_rotary_factor (`float`, *optional*, defaults to 0.9): Percentage of the query and keys which will have rotary embedding. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as an encoder/decoder or not. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. bos_token_id (`int`, *optional*, defaults to 1): Denotes beginning of sequences token id. eos_token_id (`int`, *optional*, defaults to 2): Denotes end of sequences token id. Example: ```python >>> from transformers import MoonshineModel, MoonshineConfig >>> # Initializing a Moonshine style configuration >>> configuration = MoonshineConfig().from_pretrained("UsefulSensors/moonshine-tiny") >>> # Initializing a model from the configuration >>> model = MoonshineModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "moonshine" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_key_value_heads": "encoder_num_key_value_heads", "num_attention_heads": "encoder_num_attention_heads", "num_hidden_layers": "encoder_num_hidden_layers", } def __init__( self, vocab_size=32768, hidden_size=288, intermediate_size=1152, encoder_num_hidden_layers=6, decoder_num_hidden_layers=6, encoder_num_attention_heads=8, decoder_num_attention_heads=8, encoder_num_key_value_heads=None, decoder_num_key_value_heads=None, pad_head_dim_to_multiple_of=None, encoder_hidden_act="gelu", decoder_hidden_act="silu", max_position_embeddings=512, initializer_range=0.02, decoder_start_token_id=1, use_cache=True, rope_theta=10000.0, rope_scaling=None, partial_rotary_factor=0.9, is_encoder_decoder=True, attention_bias=False, attention_dropout=0.0, bos_token_id=1, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.encoder_num_hidden_layers = encoder_num_hidden_layers self.decoder_num_hidden_layers = decoder_num_hidden_layers self.encoder_num_attention_heads = encoder_num_attention_heads self.decoder_num_attention_heads = decoder_num_attention_heads if encoder_num_key_value_heads is None: encoder_num_key_value_heads = encoder_num_attention_heads self.encoder_num_key_value_heads = encoder_num_key_value_heads if decoder_num_key_value_heads is None: decoder_num_key_value_heads = decoder_num_attention_heads self.decoder_num_key_value_heads = decoder_num_key_value_heads self.pad_head_dim_to_multiple_of = pad_head_dim_to_multiple_of self.encoder_hidden_act = encoder_hidden_act self.decoder_hidden_act = decoder_hidden_act self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.partial_rotary_factor = partial_rotary_factor self.is_encoder_decoder = is_encoder_decoder self.attention_bias = attention_bias self.attention_dropout = attention_dropout # Validate the correctness of rotary position embeddings parameters rope_config_validation(self) super().__init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs, ) __all__ = ["MoonshineConfig"]
transformers/src/transformers/models/moonshine/configuration_moonshine.py/0
{ "file_path": "transformers/src/transformers/models/moonshine/configuration_moonshine.py", "repo_id": "transformers", "token_count": 5435 }
# coding=utf-8 # Copyright 2023 HuggingFace Inc. team and MosaicML NLP team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MPT model.""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss from torch.nn import functional as F from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_mpt import MptConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "mosaicml/mpt-7b" _CONFIG_FOR_DOC = "MptConfig" def build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max=8, device=None): r""" Link to paper: https://arxiv.org/abs/2108.12409 - Alibi tensor is not causal as the original paper mentions, it relies on a translation invariance of softmax for quick implementation. This implementation has been copied from the alibi implementation of MPT source code that led to slightly different results than the Bloom alibi: https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L292 """ alibi = torch.arange(1 - sequence_length, 1, dtype=torch.int32, device=device).view(1, 1, 1, sequence_length) num_heads_power_of_2 = 2 ** math.ceil(math.log2(num_heads)) base = torch.arange(1, num_heads_power_of_2 + 1, dtype=torch.int64, device=device).float() base = base * (alibi_bias_max / num_heads_power_of_2) slopes = 1.0 / torch.pow(2, base) slopes = slopes.view(1, num_heads_power_of_2, 1, 1) if num_heads_power_of_2 != num_heads: slopes = torch.concat([slopes[:, 1::2, ...], slopes[:, ::2, ...]], dim=1)[:, :num_heads, ...] alibi = alibi * slopes return alibi.squeeze(0) class MptAttention(nn.Module): """Multi-head self attention. Using torch or triton attention implemetation enables user to also use additive bias. """ def __init__(self, config: MptConfig): super().__init__() self.hidden_size = config.hidden_size self.n_heads = config.n_heads self.max_seq_length = config.max_seq_len self.head_dim = self.hidden_size // self.n_heads self.softmax_scale = config.attn_config.softmax_scale if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.hidden_size / self.n_heads) self.attn_dropout_p = config.attn_config.attn_pdrop self.clip_qkv = config.attn_config.clip_qkv self.Wqkv = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False) def forward( self, hidden_states: torch.Tensor, position_bias: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, ): batch_size, seq_length = hidden_states.shape[:2] mixed_qkv = self.Wqkv(hidden_states) if self.clip_qkv: mixed_qkv = mixed_qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv) query_states, key_states, value_states = mixed_qkv.chunk(3, dim=2) query_states = query_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2) key_states = key_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2) value_states = value_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: if len(past_key_value) != 0: key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) else: past_key_value = (key_states, value_states) attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) * self.softmax_scale query_length = seq_length if past_key_value is None else seq_length + past_key_value[0].shape[2] if position_bias is not None: if len(position_bias.shape) != 3: raise ValueError(f"Expecting position_bias shape to be 3 dimensions, got {len(position_bias.shape)}") key_length = key_states.shape[-2] position_bias_query_index = max(0, position_bias.size(1) - query_length) position_bias_key_index = max(0, position_bias.size(2) - key_length) position_bias = position_bias[:, position_bias_query_index:, position_bias_key_index:] attention_scores = attention_scores + position_bias if attention_mask is not None: attention_scores = attention_scores.masked_fill(attention_mask, torch.finfo(query_states.dtype).min) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(attention_scores.float(), dim=-1).to(value_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attn_dropout_p, training=self.training) context_states = torch.matmul(attn_weights, value_states) context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1) attn_output = self.out_proj(context_states) return attn_output, attn_weights, past_key_value class MptMLP(nn.Module): def __init__(self, config: MptConfig): super().__init__() hidden_size = config.hidden_size self.up_proj = nn.Linear(hidden_size, 4 * hidden_size, bias=False) self.act = nn.GELU(approximate="none") self.down_proj = nn.Linear(4 * hidden_size, hidden_size, bias=False) self.hidden_dropout = config.attn_config.attn_pdrop def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor: hidden_states = self.act(self.up_proj(hidden_states)) intermediate_output = self.down_proj(hidden_states) output = F.dropout(intermediate_output, p=self.hidden_dropout, training=self.training) output = output + residual return output class MptBlock(nn.Module): def __init__(self, config: MptConfig): super().__init__() hidden_size = config.hidden_size self.norm_1 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) # backward compatibility with weights on the Hub self.norm_1.bias = None self.num_heads = config.n_heads self.attn = MptAttention(config) self.norm_2 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) # backward compatibility with weights on the Hub self.norm_2.bias = None self.ffn = MptMLP(config) self.dropout_rate = config.attn_config.attn_pdrop self.resid_attn_dropout = nn.Dropout(self.dropout_rate) def forward( self, hidden_states: torch.Tensor, position_bias: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, use_cache: bool = False, output_attentions: bool = False, ): # hidden_states: [batch_size, seq_length, hidden_size] # Layer norm at the beginning of the transformer layer. layernorm_output = self.norm_1(hidden_states) residual = hidden_states # Self attention. attn_outputs, attn_weights, past_key_value = self.attn( layernorm_output, position_bias=position_bias, attention_mask=attention_mask, past_key_value=layer_past, ) hidden_states = self.resid_attn_dropout(attn_outputs) + residual layernorm_output = self.norm_2(hidden_states) # Get residual residual = hidden_states # MLP. output = self.ffn(layernorm_output, residual) outputs = (output,) if use_cache: outputs += (past_key_value,) if output_attentions: outputs += (attn_weights,) return outputs # hidden_states, present, attentions class MptPreTrainedModel(PreTrainedModel): config_class = MptConfig base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["MptBlock"] _keys_to_ignore_on_load_missing = [r"lm_head.*."] def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module: nn.Module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, LayerNorm): if module.bias is not None: module.bias.data.zero_() module.weight.data.fill_(1.0) @staticmethod def _convert_to_mpt_cache( past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: """ Converts the cache to the format expected by Mpt, i.e. to tuple(tuple([batch_size * num_heads, ...])) """ batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape batch_size_times_num_heads = batch_size * num_heads # key: [batch_size, num_heads, head_dim, seq_length] -> [batch_size * num_heads, head_dim, seq_length] # value: [batch_size, num_heads, seq_length, head_dim] -> [batch_size * num_heads, seq_length, head_dim] return tuple( ( layer_past[0].reshape(batch_size_times_num_heads, head_dim, seq_length), layer_past[1].reshape(batch_size_times_num_heads, seq_length, head_dim), ) for layer_past in past_key_value ) MPT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MptConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ MPT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`): Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have their past given to this model should not be passed as `input_ids` as they have already been computed. Each element of `past_key_values` is a tuple (past_key, past_value): - past_key: [batch_size * num_heads, head_dim, kv_length] - past_value: [batch_size * num_heads, kv_length, head_dim] attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see `past_key_values`). use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Mpt Model transformer outputting raw hidden-states without any specific head on top.", MPT_START_DOCSTRING, ) class MptModel(MptPreTrainedModel): def __init__(self, config: MptConfig): super().__init__(config) self.hidden_size = config.hidden_size self.num_heads = config.n_heads # Embedding + LN Embedding self.wte = nn.Embedding(config.vocab_size, self.hidden_size) # Transformer blocks self.blocks = nn.ModuleList([MptBlock(config) for _ in range(config.n_layers)]) # Final Layer Norm self.norm_f = LayerNorm(self.hidden_size, eps=config.layer_norm_epsilon) # backward compatibility with weights on the Hub self.norm_f.bias = None self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.wte def build_mpt_alibi_tensor(self, num_heads, sequence_length, alibi_bias_max=8, device=None): return build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max, device) def set_input_embeddings(self, new_embeddings: torch.Tensor): self.wte = new_embeddings @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, # NOOP kwargs, for now ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") if past_key_values is None: past_key_values = tuple([None] * len(self.blocks)) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) hidden_states = inputs_embeds presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # Compute alibi tensor: check build_alibi_tensor documentation seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values[0] is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device) else: attention_mask = attention_mask.to(hidden_states.device) alibi = self.build_mpt_alibi_tensor(self.num_heads, self.config.max_seq_len, device=hidden_states.device) causal_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) causal_mask = causal_mask.bool() for block, layer_past in zip(self.blocks, past_key_values): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, alibi, causal_mask, layer_past, use_cache, output_attentions, ) else: outputs = block( hidden_states, layer_past=layer_past, attention_mask=causal_mask, use_cache=use_cache, output_attentions=output_attentions, position_bias=alibi, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) # Add last hidden state hidden_states = self.norm_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @add_start_docstrings( """ The MPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, MPT_START_DOCSTRING, ) class MptForCausalLM(MptPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: MptConfig): super().__init__(config) self.transformer = MptModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings: torch.Tensor): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) # Flatten the tokens loss = self.loss_function( lm_logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def _reorder_cache( self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: """ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. Output shares the same memory storage as `past`. """ # Get a copy of `beam_idx` on all the devices where we need those indices. device_to_beam_idx = { past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past } reordered_past = tuple( ( layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]), layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]), ) for layer_past in past ) return reordered_past @add_start_docstrings( """ The MPT Model transformer with a sequence classification head on top (linear layer). [`MptForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, MPT_START_DOCSTRING, ) class MptForSequenceClassification(MptPreTrainedModel): def __init__(self, config: MptConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = MptModel(config) self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ MPT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, MPT_START_DOCSTRING, ) class MptForTokenClassification(MptPreTrainedModel): def __init__(self, config: MptConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = MptModel(config) if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **deprecated_arguments, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) batch_size, seq_length = labels.shape loss_fct = CrossEntropyLoss() loss = loss_fct( logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length) ) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ The MPT Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MPT_START_DOCSTRING, ) class MptForQuestionAnswering(MptPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = MptModel(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "MptForCausalLM", "MptModel", "MptPreTrainedModel", "MptForSequenceClassification", "MptForTokenClassification", "MptForQuestionAnswering", ]
transformers/src/transformers/models/mpt/modeling_mpt.py/0
{ "file_path": "transformers/src/transformers/models/mpt/modeling_mpt.py", "repo_id": "transformers", "token_count": 16886 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Text/audio processor class for MusicGen """ from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class MusicgenProcessor(ProcessorMixin): r""" Constructs a MusicGen processor which wraps an EnCodec feature extractor and a T5 tokenizer into a single processor class. [`MusicgenProcessor`] offers all the functionalities of [`EncodecFeatureExtractor`] and [`TTokenizer`]. See [`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information. Args: feature_extractor (`EncodecFeatureExtractor`): An instance of [`EncodecFeatureExtractor`]. The feature extractor is a required input. tokenizer (`T5Tokenizer`): An instance of [`T5Tokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "EncodecFeatureExtractor" tokenizer_class = ("T5Tokenizer", "T5TokenizerFast") def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps) def __call__(self, *args, **kwargs): """ Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text` argument to [`~T5Tokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) audio = kwargs.pop("audio", None) sampling_rate = kwargs.pop("sampling_rate", None) text = kwargs.pop("text", None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if text is not None: inputs = self.tokenizer(text, **kwargs) if audio is not None: audio_inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if audio is None: return inputs elif text is None: return audio_inputs else: inputs["input_values"] = audio_inputs["input_values"] if "padding_mask" in audio_inputs: inputs["padding_mask"] = audio_inputs["padding_mask"] return inputs def batch_decode(self, *args, **kwargs): """ This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ audio_values = kwargs.pop("audio", None) padding_mask = kwargs.pop("padding_mask", None) if len(args) > 0: audio_values = args[0] args = args[1:] if audio_values is not None: return self._decode_audio(audio_values, padding_mask=padding_mask) else: return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) def _decode_audio(self, audio_values, padding_mask: Optional = None) -> List[np.ndarray]: """ This method strips any padding from the audio values to return a list of numpy audio arrays. """ audio_values = to_numpy(audio_values) bsz, channels, seq_len = audio_values.shape if padding_mask is None: return list(audio_values) padding_mask = to_numpy(padding_mask) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) difference = seq_len - padding_mask.shape[-1] padding_value = 1 - self.feature_extractor.padding_value padding_mask = np.pad(padding_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value) audio_values = audio_values.tolist() for i in range(bsz): sliced_audio = np.asarray(audio_values[i])[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] audio_values[i] = sliced_audio.reshape(channels, -1) return audio_values __all__ = ["MusicgenProcessor"]
transformers/src/transformers/models/musicgen/processing_musicgen.py/0
{ "file_path": "transformers/src/transformers/models/musicgen/processing_musicgen.py", "repo_id": "transformers", "token_count": 2189 }
# coding=utf-8 # Copyright 2022 UW-Madison and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Nystromformer model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class NystromformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`NystromformerModel`]. It is used to instantiate an Nystromformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Nystromformer [uw-madison/nystromformer-512](https://huggingface.co/uw-madison/nystromformer-512) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30000): Vocabulary size of the Nystromformer model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`NystromformerModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`NystromformerModel`]. segment_means_seq_len (`int`, *optional*, defaults to 64): Sequence length used in segment-means. num_landmarks (`int`, *optional*, defaults to 64): The number of landmark (or Nystrom) points to use in Nystrom approximation of the softmax self-attention matrix. conv_kernel_size (`int`, *optional*, defaults to 65): The kernel size of depthwise convolution used in Nystrom approximation. inv_coeff_init_option (`bool`, *optional*, defaults to `False`): Whether or not to use exact coefficient computation for the initial values for the iterative method of calculating the Moore-Penrose inverse of a matrix. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. Example: ```python >>> from transformers import NystromformerModel, NystromformerConfig >>> # Initializing a Nystromformer uw-madison/nystromformer-512 style configuration >>> configuration = NystromformerConfig() >>> # Initializing a model from the uw-madison/nystromformer-512 style configuration >>> model = NystromformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "nystromformer" def __init__( self, vocab_size=30000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu_new", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=510, type_vocab_size=2, segment_means_seq_len=64, num_landmarks=64, conv_kernel_size=65, inv_coeff_init_option=False, initializer_range=0.02, layer_norm_eps=1e-5, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.segment_means_seq_len = segment_means_seq_len self.num_landmarks = num_landmarks self.conv_kernel_size = conv_kernel_size self.inv_coeff_init_option = inv_coeff_init_option self.layer_norm_eps = layer_norm_eps super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) __all__ = ["NystromformerConfig"]
transformers/src/transformers/models/nystromformer/configuration_nystromformer.py/0
{ "file_path": "transformers/src/transformers/models/nystromformer/configuration_nystromformer.py", "repo_id": "transformers", "token_count": 2357 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """OWL-ViT model configuration""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class OwlViTTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`OwlViTTextModel`]. It is used to instantiate an OwlViT text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OwlViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49408): Vocabulary size of the OWL-ViT text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OwlViTTextModel`]. hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 16): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token in the input sequences. bos_token_id (`int`, *optional*, defaults to 49406): The id of the beginning-of-sequence token in the input sequences. eos_token_id (`int`, *optional*, defaults to 49407): The id of the end-of-sequence token in the input sequences. Example: ```python >>> from transformers import OwlViTTextConfig, OwlViTTextModel >>> # Initializing a OwlViTTextModel with google/owlvit-base-patch32 style configuration >>> configuration = OwlViTTextConfig() >>> # Initializing a OwlViTTextConfig from the google/owlvit-base-patch32 style configuration >>> model = OwlViTTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "owlvit_text_model" base_config_key = "text_config" def __init__( self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=16, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=0, bos_token_id=49406, eos_token_id=49407, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor class OwlViTVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`OwlViTVisionModel`]. It is used to instantiate an OWL-ViT image encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 768): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import OwlViTVisionConfig, OwlViTVisionModel >>> # Initializing a OwlViTVisionModel with google/owlvit-base-patch32 style configuration >>> configuration = OwlViTVisionConfig() >>> # Initializing a OwlViTVisionModel model from the google/owlvit-base-patch32 style configuration >>> model = OwlViTVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "owlvit_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=768, patch_size=32, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor class OwlViTConfig(PretrainedConfig): r""" [`OwlViTConfig`] is the configuration class to store the configuration of an [`OwlViTModel`]. It is used to instantiate an OWL-ViT model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`OwlViTTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`OwlViTVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original OWL-ViT implementation. return_dict (`bool`, *optional*, defaults to `True`): Whether or not the model should return a dictionary. If `False`, returns a tuple. kwargs (*optional*): Dictionary of keyword arguments. """ model_type = "owlvit" sub_configs = {"text_config": OwlViTTextConfig, "vision_config": OwlViTVisionConfig} def __init__( self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, return_dict=True, **kwargs, ): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("text_config is None. Initializing the OwlViTTextConfig with default values.") if vision_config is None: vision_config = {} logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values.") self.text_config = OwlViTTextConfig(**text_config) self.vision_config = OwlViTVisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.return_dict = return_dict self.initializer_factor = 1.0 @classmethod def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwargs): r""" Instantiate a [`OwlViTConfig`] (or a derived class) from owlvit text model configuration and owlvit vision model configuration. Returns: [`OwlViTConfig`]: An instance of a configuration object """ config_dict = {} config_dict["text_config"] = text_config config_dict["vision_config"] = vision_config return cls.from_dict(config_dict, **kwargs) class OwlViTOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("logits_per_image", {0: "batch"}), ("logits_per_text", {0: "batch"}), ("text_embeds", {0: "batch"}), ("image_embeds", {0: "batch"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4 def generate_dummy_inputs( self, processor: "ProcessorMixin", batch_size: int = -1, seq_length: int = -1, framework: Optional["TensorType"] = None, ) -> Mapping[str, Any]: text_input_dict = super().generate_dummy_inputs( processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework ) image_input_dict = super().generate_dummy_inputs( processor.image_processor, batch_size=batch_size, framework=framework ) return {**text_input_dict, **image_input_dict} @property def default_onnx_opset(self) -> int: return 14 __all__ = ["OwlViTConfig", "OwlViTOnnxConfig", "OwlViTTextConfig", "OwlViTVisionConfig"]
transformers/src/transformers/models/owlvit/configuration_owlvit.py/0
{ "file_path": "transformers/src/transformers/models/owlvit/configuration_owlvit.py", "repo_id": "transformers", "token_count": 5546 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PatchTST model configuration""" from typing import List, Optional, Union from transformers.configuration_utils import PretrainedConfig from transformers.utils import logging logger = logging.get_logger(__name__) class PatchTSTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an PatchTST model according to the specified arguments, defining the model architecture. [ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture. Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_input_channels (`int`, *optional*, defaults to 1): The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of multivariate targets. context_length (`int`, *optional*, defaults to 32): The context length of the input sequence. distribution_output (`str`, *optional*, defaults to `"student_t"`): The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or "negative_binomial". loss (`str`, *optional*, defaults to `"mse"`): The loss function for the model corresponding to the `distribution_output` head. For parametric distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared error "mse". patch_length (`int`, *optional*, defaults to 1): Define the patch length of the patchification process. patch_stride (`int`, *optional*, defaults to 1): Define the stride of the patchification process. num_hidden_layers (`int`, *optional*, defaults to 3): Number of hidden layers. d_model (`int`, *optional*, defaults to 128): Dimensionality of the transformer layers. num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. share_embedding (`bool`, *optional*, defaults to `True`): Sharing the input embedding across all channels. channel_attention (`bool`, *optional*, defaults to `False`): Activate channel attention block in the Transformer to allow channels to attend each other. ffn_dim (`int`, *optional*, defaults to 512): Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder. norm_type (`str` , *optional*, defaults to `"batchnorm"`): Normalization at each Transformer layer. Can be `"batchnorm"` or `"layernorm"`. norm_eps (`float`, *optional*, defaults to 1e-05): A value added to the denominator for numerical stability of normalization. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the attention probabilities. positional_dropout (`float`, *optional*, defaults to 0.0): The dropout probability in the positional embedding layer. path_dropout (`float`, *optional*, defaults to 0.0): The dropout path in the residual block. ff_dropout (`float`, *optional*, defaults to 0.0): The dropout probability used between the two layers of the feed-forward networks. bias (`bool`, *optional*, defaults to `True`): Whether to add bias in the feed-forward networks. activation_function (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (string) in the Transformer.`"gelu"` and `"relu"` are supported. pre_norm (`bool`, *optional*, defaults to `True`): Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is applied after residual block. positional_encoding_type (`str`, *optional*, defaults to `"sincos"`): Positional encodings. Options `"random"` and `"sincos"` are supported. use_cls_token (`bool`, *optional*, defaults to `False`): Whether cls token is used. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal weight initialization distribution. share_projection (`bool`, *optional*, defaults to `True`): Sharing the projection layer across different channels in the forecast head. scaling (`Union`, *optional*, defaults to `"std"`): Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the scaler is set to "mean". do_mask_input (`bool`, *optional*): Apply masking during the pretraining. mask_type (`str`, *optional*, defaults to `"random"`): Masking type. Only `"random"` and `"forecast"` are currently supported. random_mask_ratio (`float`, *optional*, defaults to 0.5): Masking ratio applied to mask the input data during random pretraining. num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`): Number of patches to be masked at the end of each batch sample. If it is an integer, all the samples in the batch will have the same number of masked patches. If it is a list, samples in the batch will be randomly masked by numbers defined in the list. This argument is only used for forecast pretraining. channel_consistent_masking (`bool`, *optional*, defaults to `False`): If channel consistent masking is True, all the channels will have the same masking pattern. unmasked_channel_indices (`list`, *optional*): Indices of channels that are not masked during pretraining. Values in the list are number between 1 and `num_input_channels` mask_value (`int`, *optional*, defaults to 0): Values in the masked patches will be filled by `mask_value`. pooling_type (`str`, *optional*, defaults to `"mean"`): Pooling of the embedding. `"mean"`, `"max"` and `None` are supported. head_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for head. prediction_length (`int`, *optional*, defaults to 24): The prediction horizon that the model will output. num_targets (`int`, *optional*, defaults to 1): Number of targets for regression and classification tasks. For classification, it is the number of classes. output_range (`list`, *optional*): Output range for regression task. The range of output values can be set to enforce the model to produce values within a range. num_parallel_samples (`int`, *optional*, defaults to 100): The number of samples is generated in parallel for probabilistic prediction. ```python >>> from transformers import PatchTSTConfig, PatchTSTModel >>> # Initializing an PatchTST configuration with 12 time steps for prediction >>> configuration = PatchTSTConfig(prediction_length=12) >>> # Randomly initializing a model (with random weights) from the configuration >>> model = PatchTSTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "patchtst" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "num_attention_heads", "num_hidden_layers": "num_hidden_layers", } def __init__( self, # time series specific configuration num_input_channels: int = 1, context_length: int = 32, distribution_output: str = "student_t", loss: str = "mse", # PatchTST arguments patch_length: int = 1, patch_stride: int = 1, # Transformer architecture configuration num_hidden_layers: int = 3, d_model: int = 128, num_attention_heads: int = 4, share_embedding: bool = True, channel_attention: bool = False, ffn_dim: int = 512, norm_type: str = "batchnorm", norm_eps: float = 1e-05, attention_dropout: float = 0.0, positional_dropout: float = 0.0, path_dropout: float = 0.0, ff_dropout: float = 0.0, bias: bool = True, activation_function: str = "gelu", pre_norm: bool = True, positional_encoding_type: str = "sincos", use_cls_token: bool = False, init_std: float = 0.02, share_projection: bool = True, scaling: Optional[Union[str, bool]] = "std", # mask pretraining do_mask_input: Optional[bool] = None, mask_type: str = "random", random_mask_ratio: float = 0.5, num_forecast_mask_patches: Optional[Union[List[int], int]] = [2], channel_consistent_masking: Optional[bool] = False, unmasked_channel_indices: Optional[List[int]] = None, mask_value: int = 0, # head pooling_type: str = "mean", head_dropout: float = 0.0, prediction_length: int = 24, num_targets: int = 1, output_range: Optional[List] = None, # distribution head num_parallel_samples: int = 100, **kwargs, ): # time series specific configuration self.context_length = context_length self.num_input_channels = num_input_channels # n_vars self.loss = loss self.distribution_output = distribution_output self.num_parallel_samples = num_parallel_samples # Transformer architecture configuration self.d_model = d_model self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.num_hidden_layers = num_hidden_layers self.attention_dropout = attention_dropout self.share_embedding = share_embedding self.channel_attention = channel_attention self.norm_type = norm_type self.norm_eps = norm_eps self.positional_dropout = positional_dropout self.path_dropout = path_dropout self.ff_dropout = ff_dropout self.bias = bias self.activation_function = activation_function self.pre_norm = pre_norm self.positional_encoding_type = positional_encoding_type self.use_cls_token = use_cls_token self.init_std = init_std self.scaling = scaling # PatchTST parameters self.patch_length = patch_length self.patch_stride = patch_stride # Mask pretraining self.do_mask_input = do_mask_input self.mask_type = mask_type self.random_mask_ratio = random_mask_ratio # for random masking self.num_forecast_mask_patches = num_forecast_mask_patches # for forecast masking self.channel_consistent_masking = channel_consistent_masking self.unmasked_channel_indices = unmasked_channel_indices self.mask_value = mask_value # general head params self.pooling_type = pooling_type self.head_dropout = head_dropout # For prediction head self.share_projection = share_projection self.prediction_length = prediction_length # For prediction and regression head self.num_parallel_samples = num_parallel_samples # Regression self.num_targets = num_targets self.output_range = output_range super().__init__(**kwargs) __all__ = ["PatchTSTConfig"]
transformers/src/transformers/models/patchtst/configuration_patchtst.py/0
{ "file_path": "transformers/src/transformers/models/patchtst/configuration_patchtst.py", "repo_id": "transformers", "token_count": 4676 }
# coding=utf-8 # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Phi-3 model.""" from typing import Callable, Optional, Tuple import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import logging from ..mistral.modeling_mistral import ( MistralDecoderLayer, MistralForCausalLM, MistralForSequenceClassification, MistralForTokenClassification, MistralPreTrainedModel, MistralRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward, ) from .configuration_phi3 import Phi3Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "microsoft/Phi-3-mini-4k-instruct" _CONFIG_FOR_DOC = "Phi3Config" class Phi3MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False) self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False) self.activation_fn = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: up_states = self.gate_up_proj(hidden_states) gate, up_states = up_states.chunk(2, dim=-1) up_states = up_states * self.activation_fn(gate) return self.down_proj(up_states) class Phi3Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.num_key_value_heads = config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True op_size = config.num_attention_heads * self.head_dim + 2 * (config.num_key_value_heads * self.head_dim) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) self.qkv_proj = nn.Linear(config.hidden_size, op_size, bias=False) def forward( self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) qkv = self.qkv_proj(hidden_states) query_pos = self.config.num_attention_heads * self.head_dim query_states = qkv[..., :query_pos] key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim] value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :] query_states = query_states.view(hidden_shape).transpose(1, 2) key_states = key_states.view(hidden_shape).transpose(1, 2) value_states = value_states.view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=getattr(self.config, "sliding_window", None), **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Phi3DecoderLayer(MistralDecoderLayer): def __init__(self, config: Phi3Config, layer_idx: int): super().__init__(config, layer_idx) self.config = config self.self_attn = Phi3Attention(config=config, layer_idx=layer_idx) self.mlp = Phi3MLP(config) self.resid_attn_dropout = nn.Dropout(config.resid_pdrop) self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_value (`Cache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + self.resid_attn_dropout(hidden_states) # main diff with Llama residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + self.resid_mlp_dropout(hidden_states) # main diff with Llama outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs class Phi3RotaryEmbedding(MistralRotaryEmbedding): def __init__(self, config: Phi3Config, device=None): super().__init__(config, device) def _longrope_frequency_update(self, position_ids, device): """Longrope uses long factor if sequence is larger than original pretraining length, short otherwise.""" seq_len = torch.max(position_ids) + 1 if hasattr(self.config, "original_max_position_embeddings"): original_max_position_embeddings = self.config.original_max_position_embeddings else: original_max_position_embeddings = self.config.max_position_embeddings if seq_len > original_max_position_embeddings: if not hasattr(self, "long_inv_freq"): self.long_inv_freq, _ = self.rope_init_fn( self.config, device, seq_len=original_max_position_embeddings + 1 ) self.register_buffer("inv_freq", self.long_inv_freq, persistent=False) else: # This .to() is needed if the model has been moved to a device after being initialized (because # the buffer is automatically moved, but not the original copy) self.original_inv_freq = self.original_inv_freq.to(device) self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) elif self.rope_type == "longrope": self._longrope_frequency_update(position_ids, device=x.device) # Core RoPE block inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) class Phi3PreTrainedModel(MistralPreTrainedModel): _version = "0.0.5" class Phi3ForCausalLM(MistralForCausalLM, Phi3PreTrainedModel): def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, logits_to_keep=None, **kwargs, ): # Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the # process # When the first time input length reached long and short factor switching point, enforce re-compute cache # It will cause downside of slower at this single token position, however, better than current failure. if ( past_key_values and self.config.rope_scaling and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1 ): past_length = cache_position[0] if past_length <= self.config.original_max_position_embeddings: past_key_values = None model_inputs = Phi3PreTrainedModel().prepare_inputs_for_generation( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, position_ids=position_ids, use_cache=use_cache, logits_to_keep=logits_to_keep, **kwargs, ) return model_inputs class Phi3ForSequenceClassification(MistralForSequenceClassification): pass class Phi3ForTokenClassification(MistralForTokenClassification): pass __all__ = [ "Phi3PreTrainedModel", "Phi3Model", # noqa: F822 "Phi3ForCausalLM", "Phi3ForSequenceClassification", "Phi3ForTokenClassification", ]
transformers/src/transformers/models/phi3/modular_phi3.py/0
{ "file_path": "transformers/src/transformers/models/phi3/modular_phi3.py", "repo_id": "transformers", "token_count": 5864 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Pixtral.""" from typing import Dict, List, Optional, Union from ...image_processing_utils import BatchFeature, get_size_dict from ...image_processing_utils_fast import ( BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, BASE_IMAGE_PROCESSOR_FAST_DOCSTRING_PREPROCESS, BaseImageProcessorFast, DefaultFastImageProcessorInitKwargs, DefaultFastImageProcessorPreprocessKwargs, group_images_by_shape, reorder_images, ) from ...image_utils import ( ImageInput, PILImageResampling, SizeDict, ) from ...processing_utils import Unpack from ...utils import ( TensorType, add_start_docstrings, is_torch_available, is_torchvision_available, is_torchvision_v2_available, is_vision_available, logging, ) from .image_processing_pixtral import ( get_resize_output_image_size, ) logger = logging.get_logger(__name__) if is_torch_available(): import torch if is_torchvision_available(): if is_vision_available(): pass if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F class PixtralFastImageProcessorInitKwargs(DefaultFastImageProcessorInitKwargs): patch_size: Optional[Dict[str, int]] class PixtralFastImageProcessorPreprocessKwargs(DefaultFastImageProcessorPreprocessKwargs): patch_size: Optional[Dict[str, int]] @add_start_docstrings( r"Constructs a fast ConvNeXT image processor.", BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, """ patch_size (`Dict[str, int]` *optional*, defaults to `{"height": 16, "width": 16}`): Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method. """, ) class PixtralImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = [0.48145466, 0.4578275, 0.40821073] image_std = [0.26862954, 0.26130258, 0.27577711] patch_size = {"height": 16, "width": 16} size = {"longest_edge": 1024} default_to_square = True do_resize = True do_rescale = True do_normalize = True do_convert_rgb = True valid_init_kwargs = PixtralFastImageProcessorInitKwargs valid_preprocess_kwargs = PixtralFastImageProcessorPreprocessKwargs def __init__(self, **kwargs: Unpack[PixtralFastImageProcessorInitKwargs]): super().__init__(**kwargs) @add_start_docstrings( BASE_IMAGE_PROCESSOR_FAST_DOCSTRING_PREPROCESS, """ patch_size (`Dict[str, int]` *optional*, defaults to `{"height": 16, "width": 16}`): Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method. """, ) def preprocess( self, images: ImageInput, **kwargs: Unpack[PixtralFastImageProcessorPreprocessKwargs] ) -> BatchFeature: return super().preprocess(images, **kwargs) def resize( self, image: torch.Tensor, size: SizeDict, patch_size: SizeDict, interpolation: "F.InterpolationMode" = None, **kwargs, ) -> torch.Tensor: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`torch.Tensor`): Image to resize. size (`SizeDict`): Dict containing the longest possible edge of the image. patch_size (`SizeDict`): Patch size used to calculate the size of the output image. interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): Resampling filter to use when resiizing the image. """ interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR if size.longest_edge: size = (size.longest_edge, size.longest_edge) elif size.height and size.width: size = (size.height, size.width) else: raise ValueError("size must contain either 'longest_edge' or 'height' and 'width'.") if patch_size.height and patch_size.width: patch_size = (patch_size.height, patch_size.width) else: raise ValueError("patch_size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, patch_size=patch_size) return F.resize(image, size=output_size, interpolation=interpolation, **kwargs) # Adapted from transformers.models.pixtral.image_processing_pixtral.PixtralImageProcessor._pad_for_batching def _pad_for_batching( self, pixel_values: List[torch.Tensor], image_sizes: List[List[int]], ): """ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`List[torch.Tensor]`): An array of pixel values of each images of shape (`batch_size`, `channels`, `height`, `width`) image_sizes (`List[List[int]]`): A list of sizes for each image in `pixel_values` in (height, width) format. Returns: List[`torch.Tensor`]: The padded images. """ max_shape = (max([size[0] for size in image_sizes]), max([size[1] for size in image_sizes])) pixel_values = [ torch.nn.functional.pad(image, pad=(0, max_shape[1] - size[1], 0, max_shape[0] - size[0])) for image, size in zip(pixel_values, image_sizes) ] return torch.stack(pixel_values) def _preprocess( self, images: List["torch.Tensor"], do_resize: bool, size: SizeDict, patch_size: Dict[str, int], interpolation: Optional["F.InterpolationMode"], do_center_crop: bool, crop_size: Dict[str, int], do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, List[float]]], image_std: Optional[Union[float, List[float]]], return_tensors: Optional[Union[str, TensorType]], ) -> BatchFeature: patch_size = get_size_dict(patch_size, default_to_square=True) patch_size = SizeDict(**patch_size) # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize( image=stacked_images, size=size, patch_size=patch_size, interpolation=interpolation ) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images) batch_image_sizes = [grouped_images_index[i][0] for i in range(len(grouped_images_index))] processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_center_crop: stacked_images = self.center_crop(stacked_images, crop_size) # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) padded_images = self._pad_for_batching( pixel_values=processed_images, image_sizes=batch_image_sizes, ) return BatchFeature( data={"pixel_values": padded_images, "image_sizes": batch_image_sizes}, tensor_type=return_tensors ) __all__ = ["PixtralImageProcessorFast"]
transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py/0
{ "file_path": "transformers/src/transformers/models/pixtral/image_processing_pixtral_fast.py", "repo_id": "transformers", "token_count": 3640 }
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """File for loading the Pop2Piano model weights from the official repository and to show how tokenizer vocab was constructed""" import json import torch from transformers import Pop2PianoConfig, Pop2PianoForConditionalGeneration ########################## MODEL WEIGHTS ########################## # This weights were downloaded from the official pop2piano repository # https://huggingface.co/sweetcocoa/pop2piano/blob/main/model-1999-val_0.67311615.ckpt official_weights = torch.load("./model-1999-val_0.67311615.ckpt") state_dict = {} # load the config and init the model cfg = Pop2PianoConfig.from_pretrained("sweetcocoa/pop2piano") model = Pop2PianoForConditionalGeneration(cfg) # load relative attention bias state_dict["encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = official_weights["state_dict"][ "transformer.encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight" ] state_dict["decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = official_weights["state_dict"][ "transformer.decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight" ] # load embed tokens and final layer norm for both encoder and decoder state_dict["encoder.embed_tokens.weight"] = official_weights["state_dict"]["transformer.encoder.embed_tokens.weight"] state_dict["decoder.embed_tokens.weight"] = official_weights["state_dict"]["transformer.decoder.embed_tokens.weight"] state_dict["encoder.final_layer_norm.weight"] = official_weights["state_dict"][ "transformer.encoder.final_layer_norm.weight" ] state_dict["decoder.final_layer_norm.weight"] = official_weights["state_dict"][ "transformer.decoder.final_layer_norm.weight" ] # load lm_head, mel_conditioner.emb and shared state_dict["lm_head.weight"] = official_weights["state_dict"]["transformer.lm_head.weight"] state_dict["mel_conditioner.embedding.weight"] = official_weights["state_dict"]["mel_conditioner.embedding.weight"] state_dict["shared.weight"] = official_weights["state_dict"]["transformer.shared.weight"] # load each encoder blocks for i in range(cfg.num_layers): # layer 0 state_dict[f"encoder.block.{i}.layer.0.SelfAttention.q.weight"] = official_weights["state_dict"][ f"transformer.encoder.block.{i}.layer.0.SelfAttention.q.weight" ] state_dict[f"encoder.block.{i}.layer.0.SelfAttention.k.weight"] = official_weights["state_dict"][ f"transformer.encoder.block.{i}.layer.0.SelfAttention.k.weight" ] state_dict[f"encoder.block.{i}.layer.0.SelfAttention.v.weight"] = official_weights["state_dict"][ f"transformer.encoder.block.{i}.layer.0.SelfAttention.v.weight" ] state_dict[f"encoder.block.{i}.layer.0.SelfAttention.o.weight"] = official_weights["state_dict"][ f"transformer.encoder.block.{i}.layer.0.SelfAttention.o.weight" ] state_dict[f"encoder.block.{i}.layer.0.layer_norm.weight"] = official_weights["state_dict"][ f"transformer.encoder.block.{i}.layer.0.layer_norm.weight" ] # layer 1 state_dict[f"encoder.block.{i}.layer.1.DenseReluDense.wi_0.weight"] = official_weights["state_dict"][ f"transformer.encoder.block.{i}.layer.1.DenseReluDense.wi_0.weight" ] state_dict[f"encoder.block.{i}.layer.1.DenseReluDense.wi_1.weight"] = official_weights["state_dict"][ f"transformer.encoder.block.{i}.layer.1.DenseReluDense.wi_1.weight" ] state_dict[f"encoder.block.{i}.layer.1.DenseReluDense.wo.weight"] = official_weights["state_dict"][ f"transformer.encoder.block.{i}.layer.1.DenseReluDense.wo.weight" ] state_dict[f"encoder.block.{i}.layer.1.layer_norm.weight"] = official_weights["state_dict"][ f"transformer.encoder.block.{i}.layer.1.layer_norm.weight" ] # load each decoder blocks for i in range(6): # layer 0 state_dict[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.0.SelfAttention.q.weight" ] state_dict[f"decoder.block.{i}.layer.0.SelfAttention.k.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.0.SelfAttention.k.weight" ] state_dict[f"decoder.block.{i}.layer.0.SelfAttention.v.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.0.SelfAttention.v.weight" ] state_dict[f"decoder.block.{i}.layer.0.SelfAttention.o.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.0.SelfAttention.o.weight" ] state_dict[f"decoder.block.{i}.layer.0.layer_norm.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.0.layer_norm.weight" ] # layer 1 state_dict[f"decoder.block.{i}.layer.1.EncDecAttention.q.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.1.EncDecAttention.q.weight" ] state_dict[f"decoder.block.{i}.layer.1.EncDecAttention.k.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.1.EncDecAttention.k.weight" ] state_dict[f"decoder.block.{i}.layer.1.EncDecAttention.v.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.1.EncDecAttention.v.weight" ] state_dict[f"decoder.block.{i}.layer.1.EncDecAttention.o.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.1.EncDecAttention.o.weight" ] state_dict[f"decoder.block.{i}.layer.1.layer_norm.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.1.layer_norm.weight" ] # layer 2 state_dict[f"decoder.block.{i}.layer.2.DenseReluDense.wi_0.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.2.DenseReluDense.wi_0.weight" ] state_dict[f"decoder.block.{i}.layer.2.DenseReluDense.wi_1.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.2.DenseReluDense.wi_1.weight" ] state_dict[f"decoder.block.{i}.layer.2.DenseReluDense.wo.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.2.DenseReluDense.wo.weight" ] state_dict[f"decoder.block.{i}.layer.2.layer_norm.weight"] = official_weights["state_dict"][ f"transformer.decoder.block.{i}.layer.2.layer_norm.weight" ] model.load_state_dict(state_dict, strict=True) # save the weights torch.save(state_dict, "./pytorch_model.bin") ########################## TOKENIZER ########################## # the tokenize and detokenize methods are taken from the official implementation # link : https://github.com/sweetcocoa/pop2piano/blob/fac11e8dcfc73487513f4588e8d0c22a22f2fdc5/midi_tokenizer.py#L34 def tokenize(idx, token_type, n_special=4, n_note=128, n_velocity=2): if token_type == "TOKEN_TIME": return n_special + n_note + n_velocity + idx elif token_type == "TOKEN_VELOCITY": return n_special + n_note + idx elif token_type == "TOKEN_NOTE": return n_special + idx elif token_type == "TOKEN_SPECIAL": return idx else: return -1 # link : https://github.com/sweetcocoa/pop2piano/blob/fac11e8dcfc73487513f4588e8d0c22a22f2fdc5/midi_tokenizer.py#L48 def detokenize(idx, n_special=4, n_note=128, n_velocity=2, time_idx_offset=0): if idx >= n_special + n_note + n_velocity: return "TOKEN_TIME", (idx - (n_special + n_note + n_velocity)) + time_idx_offset elif idx >= n_special + n_note: return "TOKEN_VELOCITY", idx - (n_special + n_note) elif idx >= n_special: return "TOKEN_NOTE", idx - n_special else: return "TOKEN_SPECIAL", idx # crate the decoder and then the encoder of the tokenizer decoder = {} for i in range(cfg.vocab_size): decoder.update({i: f"{detokenize(i)[1]}_{detokenize(i)[0]}"}) encoder = {v: k for k, v in decoder.items()} # save the vocab with open("./vocab.json", "w") as file: file.write(json.dumps(encoder))
transformers/src/transformers/models/pop2piano/convert_pop2piano_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/pop2piano/convert_pop2piano_weights_to_hf.py", "repo_id": "transformers", "token_count": 3447 }
# coding=utf-8 # Copyright 2024 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, # Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pvt V2 model configuration""" from typing import Callable, List, Tuple, Union from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class PvtV2Config(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PvtV2Model`]. It is used to instantiate a Pvt V2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Pvt V2 B0 [OpenGVLab/pvt_v2_b0](https://huggingface.co/OpenGVLab/pvt_v2_b0) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`Union[int, Tuple[int, int]]`, *optional*, defaults to 224): The input image size. Pass int value for square image, or tuple of (height, width). num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_encoder_blocks (`[int]`, *optional*, defaults to 4): The number of encoder blocks (i.e. stages in the Mix Transformer encoder). depths (`List[int]`, *optional*, defaults to `[2, 2, 2, 2]`): The number of layers in each encoder block. sr_ratios (`List[int]`, *optional*, defaults to `[8, 4, 2, 1]`): Spatial reduction ratios in each encoder block. hidden_sizes (`List[int]`, *optional*, defaults to `[32, 64, 160, 256]`): Dimension of each of the encoder blocks. patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`): Patch size for overlapping patch embedding before each encoder block. strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`): Stride for overlapping patch embedding before each encoder block. num_attention_heads (`List[int]`, *optional*, defaults to `[1, 2, 5, 8]`): Number of attention heads for each attention layer in each block of the Transformer encoder. mlp_ratios (`List[int]`, *optional*, defaults to `[8, 8, 4, 4]`): Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. drop_path_rate (`float`, *optional*, defaults to 0.0): The dropout probability for stochastic depth, used in the blocks of the Transformer encoder. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not a learnable bias should be added to the queries, keys and values. linear_attention (`bool`, *optional*, defaults to `False`): Use linear attention complexity. If set to True, `sr_ratio` is ignored and average pooling is used for dimensionality reduction in the attention layers rather than strided convolution. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Example: ```python >>> from transformers import PvtV2Model, PvtV2Config >>> # Initializing a pvt_v2_b0 style configuration >>> configuration = PvtV2Config() >>> # Initializing a model from the OpenGVLab/pvt_v2_b0 style configuration >>> model = PvtV2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "pvt_v2" def __init__( self, image_size: Union[int, Tuple[int, int]] = 224, num_channels: int = 3, num_encoder_blocks: int = 4, depths: List[int] = [2, 2, 2, 2], sr_ratios: List[int] = [8, 4, 2, 1], hidden_sizes: List[int] = [32, 64, 160, 256], patch_sizes: List[int] = [7, 3, 3, 3], strides: List[int] = [4, 2, 2, 2], num_attention_heads: List[int] = [1, 2, 5, 8], mlp_ratios: List[int] = [8, 8, 4, 4], hidden_act: Union[str, Callable] = "gelu", hidden_dropout_prob: float = 0.0, attention_probs_dropout_prob: float = 0.0, initializer_range: float = 0.02, drop_path_rate: float = 0.0, layer_norm_eps: float = 1e-6, qkv_bias: bool = True, linear_attention: bool = False, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) image_size = (image_size, image_size) if isinstance(image_size, int) else image_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.depths = depths self.sr_ratios = sr_ratios self.hidden_sizes = hidden_sizes self.patch_sizes = patch_sizes self.strides = strides self.mlp_ratios = mlp_ratios self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.drop_path_rate = drop_path_rate self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.linear_attention = linear_attention self.stage_names = [f"stage{idx}" for idx in range(1, len(depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) __all__ = ["PvtV2Config"]
transformers/src/transformers/models/pvt_v2/configuration_pvt_v2.py/0
{ "file_path": "transformers/src/transformers/models/pvt_v2/configuration_pvt_v2.py", "repo_id": "transformers", "token_count": 3089 }
# coding=utf-8 # Copyright 2024 Microsoft Research & University of Wisconsin-Madison and the HuggingFace Inc. team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Qwen2Audio model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) class Qwen2AudioEncoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen2AudioEncoder`]. It is used to instantiate a Qwen2-Audio audio encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the audio encoder of the Qwen2-Audio architecture. e.g. [Qwen/Qwen2-Audio-7B](https://huggingface.co/Qwen/Qwen2-Audio-7B) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_mel_bins (`int`, *optional*, defaults to 128): Number of mel features used per input features. Should correspond to the value used in the `Qwen2AudioProcessor` class. encoder_layers (`int`, *optional*, defaults to 32): Number of encoder layers. encoder_attention_heads (`int`, *optional*, defaults to 20): Number of attention heads for each attention layer in the Transformer encoder. encoder_ffn_dim (`int`, *optional*, defaults to 5120): Dimensionality of the "intermediate" (often named feed-forward) layer in encoder. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. d_model (`int`, *optional*, defaults to 1280): Dimensionality of the layers. dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_function (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. max_source_positions (`int`, *optional*, defaults to 1500): The maximum sequence length of log-mel filter-bank features that this model might ever be used with. Example: ```python >>> from transformers import Qwen2AudioEncoderConfig, Qwen2AudioEncoder >>> # Initializing a Qwen2AudioEncoderConfig >>> configuration = Qwen2AudioEncoderConfig() >>> # Initializing a Qwen2AudioEncoder (with random weights) >>> model = Qwen2AudioEncoder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen2_audio_encoder" def __init__( self, num_mel_bins=128, encoder_layers=32, encoder_attention_heads=20, encoder_ffn_dim=5120, encoder_layerdrop=0.0, d_model=1280, dropout=0.0, attention_dropout=0.0, activation_function="gelu", activation_dropout=0.0, scale_embedding=False, init_std=0.02, max_source_positions=1500, **kwargs, ): super().__init__(**kwargs) self.num_mel_bins = num_mel_bins self.d_model = d_model self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_ffn_dim = encoder_ffn_dim self.dropout = dropout self.attention_dropout = attention_dropout self.activation_function = activation_function self.activation_dropout = activation_dropout self.encoder_layerdrop = encoder_layerdrop self.num_hidden_layers = encoder_layers self.init_std = init_std self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.max_source_positions = max_source_positions class Qwen2AudioConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen2AudioForConditionalGeneration`]. It is used to instantiate an Qwen2-Audio model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Qwen2-Audio. e.g. [Qwen/Qwen2-Audio-7B](https://huggingface.co/Qwen/Qwen2-Audio-7B) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: audio_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`): The config object or dictionary of the audio backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`): The config object or dictionary of the text backbone. audio_token_index (`int`, *optional*, defaults to 151646): The image token index to encode the image prompt. Example: ```python >>> from transformers import Qwen2AudioForConditionalGeneration, Qwen2AudioConfig, Qwen2AudioEncoderConfig, Qwen2Config >>> # Initializing a Qwen2AudioEncoder config >>> audio_config = Qwen2AudioEncoderConfig() >>> # Initializing a Qwen2 config >>> text_config = Qwen2Config() >>> # Initializing a Qwen2Audio configuration >>> configuration = Qwen2AudioConfig(audio_config, text_config) >>> # Initializing a model from the qwen2-audio style configuration >>> model = Qwen2AudioForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen2_audio" sub_configs = {"text_config": AutoConfig, "audio_config": AutoConfig} def __init__( self, audio_config=None, text_config=None, audio_token_index=151646, **kwargs, ): self.audio_token_index = audio_token_index if isinstance(audio_config, dict): audio_config["model_type"] = ( audio_config["model_type"] if "model_type" in audio_config else "qwen2_audio_encoder" ) audio_config = CONFIG_MAPPING[audio_config["model_type"]](**audio_config) elif audio_config is None: audio_config = CONFIG_MAPPING["qwen2_audio_encoder"]( d_model=1280, encoder_attention_heads=20, encoder_ffn_dim=5120, encoder_layerdrop=0.0, encoder_layers=32, num_mel_bins=128, max_source_positions=1500, scale_embedding=False, activation_function="gelu", ) self.audio_config = audio_config if isinstance(text_config, dict): text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "qwen2" text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["qwen2"]() self.text_config = text_config super().__init__(**kwargs) __all__ = ["Qwen2AudioConfig", "Qwen2AudioEncoderConfig"]
transformers/src/transformers/models/qwen2_audio/configuration_qwen2_audio.py/0
{ "file_path": "transformers/src/transformers/models/qwen2_audio/configuration_qwen2_audio.py", "repo_id": "transformers", "token_count": 3278 }
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RAG Retriever model implementation.""" import os import pickle import time from typing import Iterable, List, Optional, Tuple import numpy as np from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends, strtobool from .configuration_rag import RagConfig from .tokenization_rag import RagTokenizer if is_datasets_available(): from datasets import Dataset, load_dataset, load_from_disk if is_faiss_available(): import faiss logger = logging.get_logger(__name__) LEGACY_INDEX_PATH = "https://storage.googleapis.com/huggingface-nlp/datasets/wiki_dpr/" class Index: """ A base class for the Indices encapsulated by the [`RagRetriever`]. """ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]: """ Returns a list of dictionaries, containing titles and text of the retrieved documents. Args: doc_ids (`np.ndarray` of shape `(batch_size, n_docs)`): A tensor of document indices. """ raise NotImplementedError def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: """ For each query in the batch, retrieves `n_docs` documents. Args: question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`): An array of query vectors. n_docs (`int`): The number of docs retrieved per query. Returns: `np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of shape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents. """ raise NotImplementedError def is_initialized(self): """ Returns `True` if index is already initialized. """ raise NotImplementedError def init_index(self): """ A function responsible for loading the index into memory. Should be called only once per training run of a RAG model. E.g. if the model is trained on multiple GPUs in a distributed setup, only one of the workers will load the index. """ raise NotImplementedError class LegacyIndex(Index): """ An index which can be deserialized from the files built using https://github.com/facebookresearch/DPR. We use default faiss index parameters as specified in that repository. Args: vector_size (`int`): The dimension of indexed vectors. index_path (`str`): A path to a *directory* containing index files compatible with [`~models.rag.retrieval_rag.LegacyIndex`] """ INDEX_FILENAME = "hf_bert_base.hnswSQ8_correct_phi_128.c_index" PASSAGE_FILENAME = "psgs_w100.tsv.pkl" def __init__(self, vector_size, index_path): self.index_id_to_db_id = [] self.index_path = index_path self.passages = self._load_passages() self.vector_size = vector_size self.index = None self._index_initialized = False def _resolve_path(self, index_path, filename): is_local = os.path.isdir(index_path) try: # Load from URL or cache if already cached resolved_archive_file = cached_file(index_path, filename) except EnvironmentError: msg = ( f"Can't load '{filename}'. Make sure that:\n\n" f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n" f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n" ) raise EnvironmentError(msg) if is_local: logger.info(f"loading file {resolved_archive_file}") else: logger.info(f"loading file {filename} from cache at {resolved_archive_file}") return resolved_archive_file def _load_passages(self): logger.info(f"Loading passages from {self.index_path}") passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME) if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")): raise ValueError( "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially " "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or " "that could have been tampered with. If you already verified the pickle data and decided to use it, " "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it." ) with open(passages_path, "rb") as passages_file: passages = pickle.load(passages_file) return passages def _deserialize_index(self): logger.info(f"Loading index from {self.index_path}") resolved_index_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index.dpr") self.index = faiss.read_index(resolved_index_path) resolved_meta_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index_meta.dpr") if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")): raise ValueError( "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially " "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or " "that could have been tampered with. If you already verified the pickle data and decided to use it, " "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it." ) with open(resolved_meta_path, "rb") as metadata_file: self.index_id_to_db_id = pickle.load(metadata_file) assert ( len(self.index_id_to_db_id) == self.index.ntotal ), "Deserialized index_id_to_db_id should match faiss index size" def is_initialized(self): return self._index_initialized def init_index(self): index = faiss.IndexHNSWFlat(self.vector_size + 1, 512) index.hnsw.efSearch = 128 index.hnsw.efConstruction = 200 self.index = index self._deserialize_index() self._index_initialized = True def get_doc_dicts(self, doc_ids: np.array): doc_list = [] for doc_ids_i in doc_ids: ids = [str(int(doc_id)) for doc_id in doc_ids_i] docs = [self.passages[doc_id] for doc_id in ids] doc_list.append(docs) doc_dicts = [] for docs in doc_list: doc_dict = {} doc_dict["title"] = [doc[1] for doc in docs] doc_dict["text"] = [doc[0] for doc in docs] doc_dicts.append(doc_dict) return doc_dicts def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: aux_dim = np.zeros(len(question_hidden_states), dtype="float32").reshape(-1, 1) query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim)) _, docs_ids = self.index.search(query_nhsw_vectors, n_docs) vectors = [[self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids] ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids] return np.array(ids), np.array(vectors) class HFIndexBase(Index): def __init__(self, vector_size, dataset, index_initialized=False): self.vector_size = vector_size self.dataset = dataset self._index_initialized = index_initialized self._check_dataset_format(with_index=index_initialized) dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True, dtype="float32") def _check_dataset_format(self, with_index: bool): if not isinstance(self.dataset, Dataset): raise TypeError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}") if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0: raise ValueError( "Dataset should be a dataset with the following columns: " "title (str), text (str) and embeddings (arrays of dimension vector_size), " f"but got columns {self.dataset.column_names}" ) if with_index and "embeddings" not in self.dataset.list_indexes(): raise ValueError( "Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it " "or `dataset.load_faiss_index` to load one from the disk." ) def init_index(self): raise NotImplementedError() def is_initialized(self): return self._index_initialized def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]: return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])] def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: _, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs) docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids] vectors = [doc["embeddings"] for doc in docs] for i in range(len(vectors)): if len(vectors[i]) < n_docs: vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))]) return np.array(ids), np.array(vectors) # shapes (batch_size, n_docs) and (batch_size, n_docs, d) class CanonicalHFIndex(HFIndexBase): """ A wrapper around an instance of [`~datasets.Datasets`]. If `index_path` is set to `None`, we load the pre-computed index available with the [`~datasets.arrow_dataset.Dataset`], otherwise, we load the index from the indicated path on disk. Args: vector_size (`int`): the dimension of the passages embeddings used by the index dataset_name (`str`, optional, defaults to `wiki_dpr`): A dataset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids with `datasets.list_datasets()`). dataset_split (`str`, optional, defaults to `train`) Which split of the `dataset` to load. index_name (`str`, optional, defaults to `train`) The index_name of the index associated with the `dataset`. The index loaded from `index_path` will be saved under this name. index_path (`str`, optional, defaults to `None`) The path to the serialized faiss index on disk. use_dummy_dataset (`bool`, optional, defaults to `False`): If True, use the dummy configuration of the dataset for tests. """ def __init__( self, vector_size: int, dataset_name: str = "wiki_dpr", dataset_split: str = "train", index_name: Optional[str] = None, index_path: Optional[str] = None, use_dummy_dataset=False, dataset_revision=None, ): if int(index_path is None) + int(index_name is None) != 1: raise ValueError("Please provide `index_name` or `index_path`.") self.dataset_name = dataset_name self.dataset_split = dataset_split self.index_name = index_name self.index_path = index_path self.use_dummy_dataset = use_dummy_dataset self.dataset_revision = dataset_revision logger.info(f"Loading passages from {self.dataset_name}") dataset = load_dataset( self.dataset_name, with_index=False, split=self.dataset_split, dummy=self.use_dummy_dataset, revision=dataset_revision, ) super().__init__(vector_size, dataset, index_initialized=False) def init_index(self): if self.index_path is not None: logger.info(f"Loading index from {self.index_path}") self.dataset.load_faiss_index("embeddings", file=self.index_path) else: logger.info(f"Loading index from {self.dataset_name} with index name {self.index_name}") self.dataset = load_dataset( self.dataset_name, with_embeddings=True, with_index=True, split=self.dataset_split, index_name=self.index_name, dummy=self.use_dummy_dataset, revision=self.dataset_revision, ) self.dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True) self._index_initialized = True class CustomHFIndex(HFIndexBase): """ A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the indicated paths on disk. Args: vector_size (`int`): the dimension of the passages embeddings used by the index dataset_path (`str`): The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and embeddings (arrays of dimension vector_size) index_path (`str`) The path to the serialized faiss index on disk. """ def __init__(self, vector_size: int, dataset, index_path=None): super().__init__(vector_size, dataset, index_initialized=index_path is None) self.index_path = index_path @classmethod def load_from_disk(cls, vector_size, dataset_path, index_path): logger.info(f"Loading passages from {dataset_path}") if dataset_path is None or index_path is None: raise ValueError( "Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` " "and `dataset.get_index('embeddings').save(index_path)`." ) dataset = load_from_disk(dataset_path) return cls(vector_size=vector_size, dataset=dataset, index_path=index_path) def init_index(self): if not self.is_initialized(): logger.info(f"Loading index from {self.index_path}") self.dataset.load_faiss_index("embeddings", file=self.index_path) self._index_initialized = True class RagRetriever: """ Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents contents, and it formats them to be used with a RagModel. Args: config ([`RagConfig`]): The configuration of the RAG model this Retriever is used with. Contains parameters indicating which `Index` to build. You can load your own custom dataset with `config.index_name="custom"` or use a canonical one (default) from the datasets library with `config.index_name="wiki_dpr"` for example. question_encoder_tokenizer ([`PreTrainedTokenizer`]): The tokenizer that was used to tokenize the question. It is used to decode the question and then use the generator_tokenizer. generator_tokenizer ([`PreTrainedTokenizer`]): The tokenizer used for the generator part of the RagModel. index ([`~models.rag.retrieval_rag.Index`], optional, defaults to the one defined by the configuration): If specified, use this index instead of the one built using the configuration Examples: ```python >>> # To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact') >>> from transformers import RagRetriever >>> retriever = RagRetriever.from_pretrained( ... "facebook/dpr-ctx_encoder-single-nq-base", dataset="wiki_dpr", index_name="compressed" ... ) >>> # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py >>> from transformers import RagRetriever >>> dataset = ( ... ... ... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset) >>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py >>> from transformers import RagRetriever >>> dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)* >>> index_path = "path/to/my/index.faiss" # faiss index saved via *dataset.get_index("embeddings").save(...)* >>> retriever = RagRetriever.from_pretrained( ... "facebook/dpr-ctx_encoder-single-nq-base", ... index_name="custom", ... passages_path=dataset_path, ... index_path=index_path, ... ) >>> # To load the legacy index built originally for Rag's paper >>> from transformers import RagRetriever >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", index_name="legacy") ```""" def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True): self._init_retrieval = init_retrieval requires_backends(self, ["datasets", "faiss"]) super().__init__() self.index = index or self._build_index(config) self.generator_tokenizer = generator_tokenizer self.question_encoder_tokenizer = question_encoder_tokenizer self.n_docs = config.n_docs self.batch_size = config.retrieval_batch_size self.config = config if self._init_retrieval: self.init_retrieval() self.ctx_encoder_tokenizer = None self.return_tokenized_docs = False @staticmethod def _build_index(config): if config.index_name == "legacy": return LegacyIndex( config.retrieval_vector_size, config.index_path or LEGACY_INDEX_PATH, ) elif config.index_name == "custom": return CustomHFIndex.load_from_disk( vector_size=config.retrieval_vector_size, dataset_path=config.passages_path, index_path=config.index_path, ) else: return CanonicalHFIndex( vector_size=config.retrieval_vector_size, dataset_name=config.dataset, dataset_split=config.dataset_split, index_name=config.index_name, index_path=config.index_path, use_dummy_dataset=config.use_dummy_dataset, dataset_revision=config.dataset_revision, ) @classmethod def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs): requires_backends(cls, ["datasets", "faiss"]) config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs) rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config) question_encoder_tokenizer = rag_tokenizer.question_encoder generator_tokenizer = rag_tokenizer.generator if indexed_dataset is not None: config.index_name = "custom" index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset) else: index = cls._build_index(config) return cls( config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, index=index, ) def save_pretrained(self, save_directory): if isinstance(self.index, CustomHFIndex): if self.config.index_path is None: index_path = os.path.join(save_directory, "hf_dataset_index.faiss") self.index.dataset.get_index("embeddings").save(index_path) self.config.index_path = index_path if self.config.passages_path is None: passages_path = os.path.join(save_directory, "hf_dataset") # datasets don't support save_to_disk with indexes right now faiss_index = self.index.dataset._indexes.pop("embeddings") self.index.dataset.save_to_disk(passages_path) self.index.dataset._indexes["embeddings"] = faiss_index self.config.passages_path = passages_path self.config.save_pretrained(save_directory) rag_tokenizer = RagTokenizer( question_encoder=self.question_encoder_tokenizer, generator=self.generator_tokenizer, ) rag_tokenizer.save_pretrained(save_directory) def init_retrieval(self): """ Retriever initialization function. It loads the index into memory. """ logger.info("initializing retrieval") self.index.init_index() def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None): r""" Postprocessing retrieved `docs` and combining them with `input_strings`. Args: docs (`dict`): Retrieved documents. input_strings (`str`): Input strings decoded by `preprocess_query`. prefix (`str`): Prefix added at the beginning of each input, typically used with T5-based models. Return: `tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible `attention_mask`. """ def cat_input_and_doc(doc_title, doc_text, input_string, prefix): # TODO(Patrick): if we train more RAG models, I want to put the input first to take advantage of effortless truncation # TODO(piktus): better handling of truncation if doc_title.startswith('"'): doc_title = doc_title[1:] if doc_title.endswith('"'): doc_title = doc_title[:-1] if prefix is None: prefix = "" out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace( " ", " " ) return out rag_input_strings = [ cat_input_and_doc( docs[i]["title"][j], docs[i]["text"][j], input_strings[i], prefix, ) for i in range(len(docs)) for j in range(n_docs) ] contextualized_inputs = self.generator_tokenizer.batch_encode_plus( rag_input_strings, max_length=self.config.max_combined_length, return_tensors=return_tensors, padding="max_length", truncation=True, ) return contextualized_inputs["input_ids"], contextualized_inputs["attention_mask"] def _chunk_tensor(self, t: Iterable, chunk_size: int) -> List[Iterable]: return [t[i : i + chunk_size] for i in range(0, len(t), chunk_size)] def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, np.ndarray]: question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size) ids_batched = [] vectors_batched = [] for question_hidden_states in question_hidden_states_batched: start_time = time.time() ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs) logger.debug( f"index search time: {time.time() - start_time} sec, batch size {question_hidden_states.shape}" ) ids_batched.extend(ids) vectors_batched.extend(vectors) return ( np.array(ids_batched), np.array(vectors_batched), ) # shapes (batch_size, n_docs) and (batch_size, n_docs, d) def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]: """ Retrieves documents for specified `question_hidden_states`. Args: question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`): A batch of query vectors to retrieve with. n_docs (`int`): The number of docs retrieved per query. Return: `Tuple[np.ndarray, np.ndarray, List[dict]]`: A tuple with the following objects: - **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings of the retrieved docs per query. - **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index - **doc_dicts** (`List[dict]`): The `retrieved_doc_embeds` examples per query. """ doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids) def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer): # used in end2end retriever training self.ctx_encoder_tokenizer = ctx_encoder_tokenizer self.return_tokenized_docs = True def __call__( self, question_input_ids: List[List[int]], question_hidden_states: np.ndarray, prefix=None, n_docs=None, return_tensors=None, ) -> BatchEncoding: """ Retrieves documents for specified `question_hidden_states`. Args: question_input_ids (`List[List[int]]`) batch of input ids question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`: A batch of query vectors to retrieve with. prefix (`str`, *optional*): The prefix used by the generator's tokenizer. n_docs (`int`, *optional*): The number of docs retrieved per query. return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **context_input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **context_attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **retrieved_doc_embeds** -- List of embeddings of the retrieved documents - **doc_ids** -- List of ids of the retrieved documents """ n_docs = n_docs if n_docs is not None else self.n_docs prefix = prefix if prefix is not None else self.config.generator.prefix retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs) input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True) context_input_ids, context_attention_mask = self.postprocess_docs( docs, input_strings, prefix, n_docs, return_tensors=return_tensors ) if self.return_tokenized_docs: retrieved_doc_text = [] retrieved_doc_title = [] for b_idx in range(len(docs)): for doc_idx in range(n_docs): retrieved_doc_text.append(docs[b_idx]["text"][doc_idx]) retrieved_doc_title.append(docs[b_idx]["title"][doc_idx]) tokenized_docs = self.ctx_encoder_tokenizer( retrieved_doc_title, retrieved_doc_text, truncation=True, padding="longest", return_tensors=return_tensors, ) return BatchEncoding( { "context_input_ids": context_input_ids, "context_attention_mask": context_attention_mask, "retrieved_doc_embeds": retrieved_doc_embeds, "doc_ids": doc_ids, "tokenized_doc_ids": tokenized_docs["input_ids"], "tokenized_doc_attention_mask": tokenized_docs["attention_mask"], }, tensor_type=return_tensors, ) else: return BatchEncoding( { "context_input_ids": context_input_ids, "context_attention_mask": context_attention_mask, "retrieved_doc_embeds": retrieved_doc_embeds, "doc_ids": doc_ids, }, tensor_type=return_tensors, ) __all__ = ["RagRetriever"]
transformers/src/transformers/models/rag/retrieval_rag.py/0
{ "file_path": "transformers/src/transformers/models/rag/retrieval_rag.py", "repo_id": "transformers", "token_count": 13032 }
# coding=utf-8 # Copyright 2023 The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.traverse_util import flatten_dict, unflatten_dict from transformers import RegNetConfig from transformers.modeling_flax_outputs import ( FlaxBaseModelOutputWithNoAttention, FlaxBaseModelOutputWithPooling, FlaxBaseModelOutputWithPoolingAndNoAttention, FlaxImageClassifierOutputWithNoAttention, ) from transformers.modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring, ) from transformers.utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, ) REGNET_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) This model is also a [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ REGNET_INPUTS_DOCSTRING = r""" Args: pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`RegNetImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.resnet.modeling_flax_resnet.Identity class Identity(nn.Module): """Identity function.""" @nn.compact def __call__(self, x, **kwargs): return x class FlaxRegNetConvLayer(nn.Module): out_channels: int kernel_size: int = 3 stride: int = 1 groups: int = 1 activation: Optional[str] = "relu" dtype: jnp.dtype = jnp.float32 def setup(self): self.convolution = nn.Conv( self.out_channels, kernel_size=(self.kernel_size, self.kernel_size), strides=self.stride, padding=self.kernel_size // 2, feature_group_count=self.groups, use_bias=False, kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), dtype=self.dtype, ) self.normalization = nn.BatchNorm(momentum=0.9, epsilon=1e-05, dtype=self.dtype) self.activation_func = ACT2FN[self.activation] if self.activation is not None else Identity() def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: hidden_state = self.convolution(hidden_state) hidden_state = self.normalization(hidden_state, use_running_average=deterministic) hidden_state = self.activation_func(hidden_state) return hidden_state class FlaxRegNetEmbeddings(nn.Module): config: RegNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embedder = FlaxRegNetConvLayer( self.config.embedding_size, kernel_size=3, stride=2, activation=self.config.hidden_act, dtype=self.dtype, ) def __call__(self, pixel_values: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: num_channels = pixel_values.shape[-1] if num_channels != self.config.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) hidden_state = self.embedder(pixel_values, deterministic=deterministic) return hidden_state # Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetShortCut with ResNet->RegNet class FlaxRegNetShortCut(nn.Module): """ RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to downsample the input using `stride=2`. """ out_channels: int stride: int = 2 dtype: jnp.dtype = jnp.float32 def setup(self): self.convolution = nn.Conv( self.out_channels, kernel_size=(1, 1), strides=self.stride, use_bias=False, kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), dtype=self.dtype, ) self.normalization = nn.BatchNorm(momentum=0.9, epsilon=1e-05, dtype=self.dtype) def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: hidden_state = self.convolution(x) hidden_state = self.normalization(hidden_state, use_running_average=deterministic) return hidden_state class FlaxRegNetSELayerCollection(nn.Module): in_channels: int reduced_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv_1 = nn.Conv( self.reduced_channels, kernel_size=(1, 1), kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), dtype=self.dtype, name="0", ) # 0 is the name used in corresponding pytorch implementation self.conv_2 = nn.Conv( self.in_channels, kernel_size=(1, 1), kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"), dtype=self.dtype, name="2", ) # 2 is the name used in corresponding pytorch implementation def __call__(self, hidden_state: jnp.ndarray) -> jnp.ndarray: hidden_state = self.conv_1(hidden_state) hidden_state = nn.relu(hidden_state) hidden_state = self.conv_2(hidden_state) attention = nn.sigmoid(hidden_state) return attention class FlaxRegNetSELayer(nn.Module): """ Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507). """ in_channels: int reduced_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.pooler = partial(nn.avg_pool, padding=((0, 0), (0, 0))) self.attention = FlaxRegNetSELayerCollection(self.in_channels, self.reduced_channels, dtype=self.dtype) def __call__(self, hidden_state: jnp.ndarray) -> jnp.ndarray: pooled = self.pooler( hidden_state, window_shape=(hidden_state.shape[1], hidden_state.shape[2]), strides=(hidden_state.shape[1], hidden_state.shape[2]), ) attention = self.attention(pooled) hidden_state = hidden_state * attention return hidden_state class FlaxRegNetXLayerCollection(nn.Module): config: RegNetConfig out_channels: int stride: int = 1 dtype: jnp.dtype = jnp.float32 def setup(self): groups = max(1, self.out_channels // self.config.groups_width) self.layer = [ FlaxRegNetConvLayer( self.out_channels, kernel_size=1, activation=self.config.hidden_act, dtype=self.dtype, name="0", ), FlaxRegNetConvLayer( self.out_channels, stride=self.stride, groups=groups, activation=self.config.hidden_act, dtype=self.dtype, name="1", ), FlaxRegNetConvLayer( self.out_channels, kernel_size=1, activation=None, dtype=self.dtype, name="2", ), ] def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: for layer in self.layer: hidden_state = layer(hidden_state, deterministic=deterministic) return hidden_state class FlaxRegNetXLayer(nn.Module): """ RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1. """ config: RegNetConfig in_channels: int out_channels: int stride: int = 1 dtype: jnp.dtype = jnp.float32 def setup(self): should_apply_shortcut = self.in_channels != self.out_channels or self.stride != 1 self.shortcut = ( FlaxRegNetShortCut( self.out_channels, stride=self.stride, dtype=self.dtype, ) if should_apply_shortcut else Identity() ) self.layer = FlaxRegNetXLayerCollection( self.config, in_channels=self.in_channels, out_channels=self.out_channels, stride=self.stride, dtype=self.dtype, ) self.activation_func = ACT2FN[self.config.hidden_act] def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: residual = hidden_state hidden_state = self.layer(hidden_state) residual = self.shortcut(residual, deterministic=deterministic) hidden_state += residual hidden_state = self.activation_func(hidden_state) return hidden_state class FlaxRegNetYLayerCollection(nn.Module): config: RegNetConfig in_channels: int out_channels: int stride: int = 1 dtype: jnp.dtype = jnp.float32 def setup(self): groups = max(1, self.out_channels // self.config.groups_width) self.layer = [ FlaxRegNetConvLayer( self.out_channels, kernel_size=1, activation=self.config.hidden_act, dtype=self.dtype, name="0", ), FlaxRegNetConvLayer( self.out_channels, stride=self.stride, groups=groups, activation=self.config.hidden_act, dtype=self.dtype, name="1", ), FlaxRegNetSELayer( self.out_channels, reduced_channels=int(round(self.in_channels / 4)), dtype=self.dtype, name="2", ), FlaxRegNetConvLayer( self.out_channels, kernel_size=1, activation=None, dtype=self.dtype, name="3", ), ] def __call__(self, hidden_state: jnp.ndarray) -> jnp.ndarray: for layer in self.layer: hidden_state = layer(hidden_state) return hidden_state class FlaxRegNetYLayer(nn.Module): """ RegNet's Y layer: an X layer with Squeeze and Excitation. """ config: RegNetConfig in_channels: int out_channels: int stride: int = 1 dtype: jnp.dtype = jnp.float32 def setup(self): should_apply_shortcut = self.in_channels != self.out_channels or self.stride != 1 self.shortcut = ( FlaxRegNetShortCut( self.out_channels, stride=self.stride, dtype=self.dtype, ) if should_apply_shortcut else Identity() ) self.layer = FlaxRegNetYLayerCollection( self.config, in_channels=self.in_channels, out_channels=self.out_channels, stride=self.stride, dtype=self.dtype, ) self.activation_func = ACT2FN[self.config.hidden_act] def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: residual = hidden_state hidden_state = self.layer(hidden_state) residual = self.shortcut(residual, deterministic=deterministic) hidden_state += residual hidden_state = self.activation_func(hidden_state) return hidden_state class FlaxRegNetStageLayersCollection(nn.Module): """ A RegNet stage composed by stacked layers. """ config: RegNetConfig in_channels: int out_channels: int stride: int = 2 depth: int = 2 dtype: jnp.dtype = jnp.float32 def setup(self): layer = FlaxRegNetXLayer if self.config.layer_type == "x" else FlaxRegNetYLayer layers = [ # downsampling is done in the first layer with stride of 2 layer( self.config, self.in_channels, self.out_channels, stride=self.stride, dtype=self.dtype, name="0", ) ] for i in range(self.depth - 1): layers.append( layer( self.config, self.out_channels, self.out_channels, dtype=self.dtype, name=str(i + 1), ) ) self.layers = layers def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: hidden_state = x for layer in self.layers: hidden_state = layer(hidden_state, deterministic=deterministic) return hidden_state # Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetStage with ResNet->RegNet class FlaxRegNetStage(nn.Module): """ A RegNet stage composed by stacked layers. """ config: RegNetConfig in_channels: int out_channels: int stride: int = 2 depth: int = 2 dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = FlaxRegNetStageLayersCollection( self.config, in_channels=self.in_channels, out_channels=self.out_channels, stride=self.stride, depth=self.depth, dtype=self.dtype, ) def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: return self.layers(x, deterministic=deterministic) # Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetStageCollection with ResNet->RegNet class FlaxRegNetStageCollection(nn.Module): config: RegNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): in_out_channels = zip(self.config.hidden_sizes, self.config.hidden_sizes[1:]) stages = [ FlaxRegNetStage( self.config, self.config.embedding_size, self.config.hidden_sizes[0], stride=2 if self.config.downsample_in_first_stage else 1, depth=self.config.depths[0], dtype=self.dtype, name="0", ) ] for i, ((in_channels, out_channels), depth) in enumerate(zip(in_out_channels, self.config.depths[1:])): stages.append( FlaxRegNetStage(self.config, in_channels, out_channels, depth=depth, dtype=self.dtype, name=str(i + 1)) ) self.stages = stages def __call__( self, hidden_state: jnp.ndarray, output_hidden_states: bool = False, deterministic: bool = True, ) -> FlaxBaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state.transpose(0, 3, 1, 2),) hidden_state = stage_module(hidden_state, deterministic=deterministic) return hidden_state, hidden_states # Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetEncoder with ResNet->RegNet class FlaxRegNetEncoder(nn.Module): config: RegNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.stages = FlaxRegNetStageCollection(self.config, dtype=self.dtype) def __call__( self, hidden_state: jnp.ndarray, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ) -> FlaxBaseModelOutputWithNoAttention: hidden_state, hidden_states = self.stages( hidden_state, output_hidden_states=output_hidden_states, deterministic=deterministic ) if output_hidden_states: hidden_states = hidden_states + (hidden_state.transpose(0, 3, 1, 2),) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return FlaxBaseModelOutputWithNoAttention( last_hidden_state=hidden_state, hidden_states=hidden_states, ) # Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetPreTrainedModel with ResNet->RegNet,resnet->regnet,RESNET->REGNET class FlaxRegNetPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RegNetConfig base_model_prefix = "regnet" main_input_name = "pixel_values" module_class: nn.Module = None def __init__( self, config: RegNetConfig, input_shape=(1, 224, 224, 3), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) if input_shape is None: input_shape = (1, config.image_size, config.image_size, config.num_channels) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors pixel_values = jnp.zeros(input_shape, dtype=self.dtype) rngs = {"params": rng} random_params = self.module.init(rngs, pixel_values, return_dict=False) if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING) def __call__( self, pixel_values, params: dict = None, train: bool = False, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) # Handle any PRNG if needed rngs = {} return self.module.apply( { "params": params["params"] if params is not None else self.params["params"], "batch_stats": params["batch_stats"] if params is not None else self.params["batch_stats"], }, jnp.array(pixel_values, dtype=jnp.float32), not train, output_hidden_states, return_dict, rngs=rngs, mutable=["batch_stats"] if train else False, # Returing tuple with batch_stats only when train is True ) # Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetModule with ResNet->RegNet class FlaxRegNetModule(nn.Module): config: RegNetConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.embedder = FlaxRegNetEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxRegNetEncoder(self.config, dtype=self.dtype) # Adaptive average pooling used in resnet self.pooler = partial( nn.avg_pool, padding=((0, 0), (0, 0)), ) def __call__( self, pixel_values, deterministic: bool = True, output_hidden_states: bool = False, return_dict: bool = True, ) -> FlaxBaseModelOutputWithPoolingAndNoAttention: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embedder(pixel_values, deterministic=deterministic) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler( last_hidden_state, window_shape=(last_hidden_state.shape[1], last_hidden_state.shape[2]), strides=(last_hidden_state.shape[1], last_hidden_state.shape[2]), ).transpose(0, 3, 1, 2) last_hidden_state = last_hidden_state.transpose(0, 3, 1, 2) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return FlaxBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, ) @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", REGNET_START_DOCSTRING, ) class FlaxRegNetModel(FlaxRegNetPreTrainedModel): module_class = FlaxRegNetModule FLAX_VISION_MODEL_DOCSTRING = """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, FlaxRegNetModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/regnet-y-040") >>> model = FlaxRegNetModel.from_pretrained("facebook/regnet-y-040") >>> inputs = image_processor(images=image, return_tensors="np") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ overwrite_call_docstring(FlaxRegNetModel, FLAX_VISION_MODEL_DOCSTRING) append_replace_return_docstrings( FlaxRegNetModel, output_type=FlaxBaseModelOutputWithPooling, config_class=RegNetConfig, ) # Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetClassifierCollection with ResNet->RegNet class FlaxRegNetClassifierCollection(nn.Module): config: RegNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype, name="1") def __call__(self, x: jnp.ndarray) -> jnp.ndarray: return self.classifier(x) # Copied from transformers.models.resnet.modeling_flax_resnet.FlaxResNetForImageClassificationModule with ResNet->RegNet,resnet->regnet,RESNET->REGNET class FlaxRegNetForImageClassificationModule(nn.Module): config: RegNetConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.regnet = FlaxRegNetModule(config=self.config, dtype=self.dtype) if self.config.num_labels > 0: self.classifier = FlaxRegNetClassifierCollection(self.config, dtype=self.dtype) else: self.classifier = Identity() def __call__( self, pixel_values=None, deterministic: bool = True, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.regnet( pixel_values, deterministic=deterministic, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output[:, :, 0, 0]) if not return_dict: output = (logits,) + outputs[2:] return output return FlaxImageClassifierOutputWithNoAttention(logits=logits, hidden_states=outputs.hidden_states) @add_start_docstrings( """ RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, REGNET_START_DOCSTRING, ) class FlaxRegNetForImageClassification(FlaxRegNetPreTrainedModel): module_class = FlaxRegNetForImageClassificationModule FLAX_VISION_CLASSIF_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoImageProcessor, FlaxRegNetForImageClassification >>> from PIL import Image >>> import jax >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("facebook/regnet-y-040") >>> model = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040") >>> inputs = image_processor(images=image, return_tensors="np") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = jax.numpy.argmax(logits, axis=-1) >>> print("Predicted class:", model.config.id2label[predicted_class_idx.item()]) ``` """ overwrite_call_docstring(FlaxRegNetForImageClassification, FLAX_VISION_CLASSIF_DOCSTRING) append_replace_return_docstrings( FlaxRegNetForImageClassification, output_type=FlaxImageClassifierOutputWithNoAttention, config_class=RegNetConfig, ) __all__ = ["FlaxRegNetForImageClassification", "FlaxRegNetModel", "FlaxRegNetPreTrainedModel"]
transformers/src/transformers/models/regnet/modeling_flax_regnet.py/0
{ "file_path": "transformers/src/transformers/models/regnet/modeling_flax_regnet.py", "repo_id": "transformers", "token_count": 12500 }
# coding=utf-8 # Copyright 2022 WeChatAI The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch RoCBert model.""" import math import os from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_roc_bert import RoCBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "weiweishi/roc-bert-base-zh" _CONFIG_FOR_DOC = "RoCBertConfig" # Base model docstring _EXPECTED_OUTPUT_SHAPE = [1, 8, 768] # Token Classification output _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "ArthurZ/dummy-rocbert-ner" _TOKEN_CLASS_EXPECTED_OUTPUT = ["S-EVENT", "S-FAC", "I-ORDINAL", "I-ORDINAL", "E-ORG", "E-LANGUAGE", "E-ORG", "E-ORG", "E-ORG", "E-ORG", "I-EVENT", "S-TIME", "S-TIME", "E-LANGUAGE", "S-TIME", "E-DATE", "I-ORDINAL", "E-QUANTITY", "E-LANGUAGE", "S-TIME", "B-ORDINAL", "S-PRODUCT", "E-LANGUAGE", "E-LANGUAGE", "E-ORG", "E-LOC", "S-TIME", "I-ORDINAL", "S-FAC", "O", "S-GPE", "I-EVENT", "S-GPE", "E-LANGUAGE", "E-ORG", "S-EVENT", "S-FAC", "S-FAC", "S-FAC", "E-ORG", "S-FAC", "E-ORG", "S-GPE"] # fmt: skip _TOKEN_CLASS_EXPECTED_LOSS = 3.62 # SequenceClassification docstring _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ArthurZ/dummy-rocbert-seq" _SEQ_CLASS_EXPECTED_OUTPUT = "'financial news'" _SEQ_CLASS_EXPECTED_LOSS = 2.31 # QuestionAsnwering docstring _CHECKPOINT_FOR_QA = "ArthurZ/dummy-rocbert-qa" _QA_EXPECTED_OUTPUT = "''" _QA_EXPECTED_LOSS = 3.75 _QA_TARGET_START_INDEX = 14 _QA_TARGET_END_INDEX = 15 # Maske language modeling # Copied from transformers.models.bert.modeling_bert.load_tf_weights_in_bert with bert->roc_bert def load_tf_weights_in_roc_bert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except ValueError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model class RoCBertEmbeddings(nn.Module): """Construct the embeddings from word, position, shape, pronunciation and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.pronunciation_embed = nn.Embedding( config.pronunciation_vocab_size, config.pronunciation_embed_dim, padding_idx=config.pad_token_id ) self.shape_embed = nn.Embedding( config.shape_vocab_size, config.shape_embed_dim, padding_idx=config.pad_token_id ) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.enable_pronunciation = config.enable_pronunciation self.enable_shape = config.enable_shape if config.concat_input: input_dim = config.hidden_size if self.enable_pronunciation: pronunciation_dim = config.pronunciation_embed_dim input_dim += pronunciation_dim if self.enable_shape: shape_dim = config.shape_embed_dim input_dim += shape_dim self.map_inputs_layer = torch.nn.Linear(input_dim, config.hidden_size) else: self.map_inputs_layer = None # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward( self, input_ids=None, input_shape_ids=None, input_pronunciation_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0, ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if self.map_inputs_layer is None: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) denominator = 1 embedding_in = torch.clone(embeddings) if self.enable_shape and input_shape_ids is not None: embedding_shape = self.shape_embed(input_shape_ids) embedding_in += embedding_shape denominator += 1 if self.enable_pronunciation and input_pronunciation_ids is not None: embedding_pronunciation = self.pronunciation_embed(input_pronunciation_ids) embedding_in += embedding_pronunciation denominator += 1 embedding_in /= denominator return embedding_in else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # embedding_word device = inputs_embeds.device embedding_in = torch.clone(inputs_embeds) if self.enable_shape: if input_shape_ids is None: input_shape_ids = torch.zeros(input_shape, dtype=torch.long, device=device) embedding_shape = self.shape_embed(input_shape_ids) embedding_in = torch.cat((embedding_in, embedding_shape), -1) if self.enable_pronunciation: if input_pronunciation_ids is None: input_pronunciation_ids = torch.zeros(input_shape, dtype=torch.long, device=device) embedding_pronunciation = self.pronunciation_embed(input_pronunciation_ids) embedding_in = torch.cat((embedding_in, embedding_pronunciation), -1) embedding_in = self.map_inputs_layer(embedding_in) # batch_size * seq_len * hidden_dim token_type_embeddings = self.token_type_embeddings(token_type_ids) embedding_in += token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embedding_in += position_embeddings embedding_in = self.LayerNorm(embedding_in) embedding_in = self.dropout(embedding_in) return embedding_in # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->RoCBert class RoCBertSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RoCBertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->RoCBert class RoCBertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states ROC_BERT_SELF_ATTENTION_CLASSES = { "eager": RoCBertSelfAttention, } # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->RoCBert,BERT->ROC_BERT class RoCBertAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = ROC_BERT_SELF_ATTENTION_CLASSES[config._attn_implementation]( config, position_embedding_type=position_embedding_type ) self.output = RoCBertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->RoCBert class RoCBertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->RoCBert class RoCBertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->RoCBert class RoCBertLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = RoCBertAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = RoCBertAttention(config, position_embedding_type="absolute") self.intermediate = RoCBertIntermediate(config) self.output = RoCBertOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->RoCBert class RoCBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([RoCBertLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->RoCBert class RoCBertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->RoCBert class RoCBertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->RoCBert class RoCBertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = RoCBertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->RoCBert class RoCBertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = RoCBertLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class RoCBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RoCBertConfig load_tf_weights = load_tf_weights_in_roc_bert base_model_prefix = "roc_bert" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ROC_BERT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RoCBertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ ROC_BERT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) input_shape_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the shape vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input_shape_ids) input_pronunciation_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the pronunciation vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input_pronunciation_ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RoCBert Model transformer outputting raw hidden-states without any specific head on top.", ROC_BERT_START_DOCSTRING, ) class RoCBertModel(RoCBertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to be initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ # Copied from transformers.models.clap.modeling_clap.ClapTextModel.__init__ with ClapText->RoCBert def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = RoCBertEmbeddings(config) self.encoder = RoCBertEncoder(config) self.pooler = RoCBertPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.bert.modeling_bert.BertModel.get_input_embeddings def get_input_embeddings(self): return self.embeddings.word_embeddings # Copied from transformers.models.bert.modeling_bert.BertModel.set_input_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def get_pronunciation_embeddings(self): return self.embeddings.pronunciation_embed def set_pronunciation_embeddings(self, value): self.embeddings.pronunciation_embed = value def get_shape_embeddings(self): return self.embeddings.shape_embed def set_shape_embeddings(self, value): self.embeddings.shape_embed = value # Copied from transformers.models.bert.modeling_bert.BertModel._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_ids: Optional[torch.Tensor] = None, input_shape_ids: Optional[torch.Tensor] = None, input_pronunciation_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings( """ RoCBert Model with contrastive loss and masked_lm_loss during the pretraining. """, ROC_BERT_START_DOCSTRING, ) class RoCBertForPreTraining(RoCBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.roc_bert = RoCBertModel(config) self.cls = RoCBertOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.get_output_embeddings def get_output_embeddings(self): return self.cls.predictions.decoder # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.set_output_embeddings def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, input_shape_ids: Optional[torch.Tensor] = None, input_pronunciation_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, attack_input_ids: Optional[torch.Tensor] = None, attack_input_shape_ids: Optional[torch.Tensor] = None, attack_input_pronunciation_ids: Optional[torch.Tensor] = None, attack_attention_mask: Optional[torch.Tensor] = None, attack_token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels_input_ids: Optional[torch.Tensor] = None, labels_input_shape_ids: Optional[torch.Tensor] = None, labels_input_pronunciation_ids: Optional[torch.Tensor] = None, labels_attention_mask: Optional[torch.Tensor] = None, labels_token_type_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: r""" attack_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): attack sample ids for computing the contrastive loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` attack_input_shape_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): attack sample shape ids for computing the contrastive loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` attack_input_pronunciation_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): attack sample pronunciation ids for computing the contrastive loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` labels_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): target ids for computing the contrastive loss and masked_lm_loss . Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` labels_input_shape_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): target shape ids for computing the contrastive loss and masked_lm_loss . Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` labels_input_pronunciation_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): target pronunciation ids for computing the contrastive loss and masked_lm_loss . Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` kwargs (`Dict[str, any]`, *optional*, defaults to *{}*): Used to hide legacy arguments that have been deprecated. Returns: Example: ```python >>> from transformers import AutoTokenizer, RoCBertForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("weiweishi/roc-bert-base-zh") >>> model = RoCBertForPreTraining.from_pretrained("weiweishi/roc-bert-base-zh") >>> inputs = tokenizer("你好,很高兴认识你", return_tensors="pt") >>> attack_inputs = {} >>> for key in list(inputs.keys()): ... attack_inputs[f"attack_{key}"] = inputs[key] >>> label_inputs = {} >>> for key in list(inputs.keys()): ... label_inputs[f"labels_{key}"] = inputs[key] >>> inputs.update(label_inputs) >>> inputs.update(attack_inputs) >>> outputs = model(**inputs) >>> logits = outputs.logits >>> logits.shape torch.Size([1, 11, 21128]) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roc_bert( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores = self.cls(sequence_output) loss = None if labels_input_ids is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels_input_ids.view(-1)) if attack_input_ids is not None: batch_size, _ = labels_input_ids.shape device = labels_input_ids.device target_inputs = torch.clone(labels_input_ids) target_inputs[target_inputs == -100] = self.config.pad_token_id labels_output = self.roc_bert( target_inputs, input_shape_ids=labels_input_shape_ids, input_pronunciation_ids=labels_input_pronunciation_ids, attention_mask=labels_attention_mask, token_type_ids=labels_token_type_ids, return_dict=return_dict, ) attack_output = self.roc_bert( attack_input_ids, input_shape_ids=attack_input_shape_ids, input_pronunciation_ids=attack_input_pronunciation_ids, attention_mask=attack_attention_mask, token_type_ids=attack_token_type_ids, return_dict=return_dict, ) labels_pooled_output = labels_output[1] attack_pooled_output = attack_output[1] pooled_output_norm = torch.nn.functional.normalize(pooled_output, dim=-1) labels_pooled_output_norm = torch.nn.functional.normalize(labels_pooled_output, dim=-1) attack_pooled_output_norm = torch.nn.functional.normalize(attack_pooled_output, dim=-1) sim_matrix = torch.matmul(pooled_output_norm, attack_pooled_output_norm.T) # batch_size * hidden_dim sim_matrix_target = torch.matmul(labels_pooled_output_norm, attack_pooled_output_norm.T) batch_labels = torch.tensor(list(range(batch_size)), device=device) contrastive_loss = ( loss_fct(100 * sim_matrix.view(batch_size, -1), batch_labels.view(-1)) + loss_fct(100 * sim_matrix_target.view(batch_size, -1), batch_labels.view(-1)) ) / 2 loss = contrastive_loss + masked_lm_loss else: loss = masked_lm_loss if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""RoCBert Model with a `language modeling` head on top.""", ROC_BERT_START_DOCSTRING) class RoCBertForMaskedLM(RoCBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.__init__ with Bert->RoCBert,bert->roc_bert def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `RoCBertForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.roc_bert = RoCBertModel(config, add_pooling_layer=False) self.cls = RoCBertOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.get_output_embeddings def get_output_embeddings(self): return self.cls.predictions.decoder # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.set_output_embeddings def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def forward( self, input_ids: Optional[torch.Tensor] = None, input_shape_ids: Optional[torch.Tensor] = None, input_pronunciation_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, RoCBertForMaskedLM >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("weiweishi/roc-bert-base-zh") >>> model = RoCBertForMaskedLM.from_pretrained("weiweishi/roc-bert-base-zh") >>> inputs = tokenizer("法国是首都[MASK].", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of {mask} >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) '.' ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roc_bert( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, input_shape_ids=None, input_pronunciation_ids=None, attention_mask=None, **model_kwargs ): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token if self.config.pad_token_id is None: raise ValueError("The PAD token should be defined for generation") attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device ) input_ids = torch.cat([input_ids, dummy_token], dim=1) if input_shape_ids is not None: input_shape_ids = torch.cat([input_shape_ids, dummy_token], dim=1) if input_pronunciation_ids is not None: input_pronunciation_ids = torch.cat([input_pronunciation_ids, dummy_token], dim=1) return { "input_ids": input_ids, "input_shape_ids": input_shape_ids, "input_pronunciation_ids": input_pronunciation_ids, "attention_mask": attention_mask, } @add_start_docstrings( """RoCBert Model with a `language modeling` head on top for CLM fine-tuning.""", ROC_BERT_START_DOCSTRING ) class RoCBertForCausalLM(RoCBertPreTrainedModel, GenerationMixin): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.__init__ with BertLMHeadModel->RoCBertForCausalLM,Bert->RoCBert,bert->roc_bert def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `RoCRoCBertForCausalLM` as a standalone, add `is_decoder=True.`") self.roc_bert = RoCBertModel(config, add_pooling_layer=False) self.cls = RoCBertOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.get_output_embeddings def get_output_embeddings(self): return self.cls.predictions.decoder # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.set_output_embeddings def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, input_shape_ids: Optional[torch.Tensor] = None, input_pronunciation_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.Tensor]] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import AutoTokenizer, RoCBertForCausalLM, RoCBertConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("weiweishi/roc-bert-base-zh") >>> config = RoCBertConfig.from_pretrained("weiweishi/roc-bert-base-zh") >>> config.is_decoder = True >>> model = RoCBertForCausalLM.from_pretrained("weiweishi/roc-bert-base-zh", config=config) >>> inputs = tokenizer("你好,很高兴认识你", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roc_bert( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: lm_loss = self.loss_function( prediction_scores, labels, vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, input_shape_ids=None, input_pronunciation_ids=None, past_key_values=None, attention_mask=None, **model_kwargs, ): # Overwritten -- `input_pronunciation_ids` input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past_key_values is used if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] if input_shape_ids is not None: input_shape_ids = input_shape_ids[:, -1:] if input_pronunciation_ids is not None: input_pronunciation_ids = input_pronunciation_ids[:, -1:] return { "input_ids": input_ids, "input_shape_ids": input_shape_ids, "input_pronunciation_ids": input_pronunciation_ids, "attention_mask": attention_mask, "past_key_values": past_key_values, } # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel._reorder_cache def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past @add_start_docstrings( """RoCBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.""", ROC_BERT_START_DOCSTRING, ) class RoCBertForSequenceClassification(RoCBertPreTrainedModel): # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->RoCBert,bert->roc_bert def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.roc_bert = RoCBertModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS, ) def forward( self, input_ids: Optional[torch.Tensor] = None, input_shape_ids: Optional[torch.Tensor] = None, input_pronunciation_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roc_bert( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """RoCBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""", ROC_BERT_START_DOCSTRING, ) class RoCBertForMultipleChoice(RoCBertPreTrainedModel): # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->RoCBert,bert->roc_bert def __init__(self, config): super().__init__(config) self.roc_bert = RoCBertModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( ROC_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, input_shape_ids: Optional[torch.Tensor] = None, input_pronunciation_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None input_shape_ids = input_shape_ids.view(-1, input_shape_ids.size(-1)) if input_shape_ids is not None else None input_pronunciation_ids = ( input_pronunciation_ids.view(-1, input_pronunciation_ids.size(-1)) if input_pronunciation_ids is not None else None ) attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.roc_bert( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """RoCBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""", ROC_BERT_START_DOCSTRING, ) class RoCBertForTokenClassification(RoCBertPreTrainedModel): # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->RoCBert,bert->roc_bert def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roc_bert = RoCBertModel(config, add_pooling_layer=False) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, expected_loss=_TOKEN_CLASS_EXPECTED_LOSS, ) def forward( self, input_ids: Optional[torch.Tensor] = None, input_shape_ids: Optional[torch.Tensor] = None, input_pronunciation_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roc_bert( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """RoCBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""", ROC_BERT_START_DOCSTRING, ) class RoCBertForQuestionAnswering(RoCBertPreTrainedModel): # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->RoCBert,bert->roc_bert def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roc_bert = RoCBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_QA, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=_QA_TARGET_START_INDEX, qa_target_end_index=_QA_TARGET_END_INDEX, expected_output=_QA_EXPECTED_OUTPUT, expected_loss=_QA_EXPECTED_LOSS, ) def forward( self, input_ids: Optional[torch.Tensor] = None, input_shape_ids: Optional[torch.Tensor] = None, input_pronunciation_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roc_bert( input_ids, input_shape_ids=input_shape_ids, input_pronunciation_ids=input_pronunciation_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "RoCBertForCausalLM", "RoCBertForMaskedLM", "RoCBertForMultipleChoice", "RoCBertForPreTraining", "RoCBertForQuestionAnswering", "RoCBertForSequenceClassification", "RoCBertForTokenClassification", "RoCBertLayer", "RoCBertModel", "RoCBertPreTrainedModel", "load_tf_weights_in_roc_bert", ]
transformers/src/transformers/models/roc_bert/modeling_roc_bert.py/0
{ "file_path": "transformers/src/transformers/models/roc_bert/modeling_roc_bert.py", "repo_id": "transformers", "token_count": 40440 }
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/rt_detr/modular_rt_detr.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_rt_detr.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 import pathlib from typing import Any, Dict, List, Optional, Tuple, Union from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import ( BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, BASE_IMAGE_PROCESSOR_FAST_DOCSTRING_PREPROCESS, BaseImageProcessorFast, DefaultFastImageProcessorInitKwargs, DefaultFastImageProcessorPreprocessKwargs, SizeDict, add_start_docstrings, get_image_size_for_max_height_width, get_max_height_width, safe_squeeze, ) from ...image_transforms import center_to_corners_format, corners_to_center_format from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, AnnotationType, ChannelDimension, ImageInput, PILImageResampling, get_image_size, validate_annotations, ) from ...processing_utils import Unpack from ...utils import ( TensorType, is_torch_available, is_torchvision_available, is_torchvision_v2_available, requires_backends, ) from .image_processing_rt_detr import get_size_with_aspect_ratio if is_torch_available(): import torch if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F elif is_torchvision_available(): from torchvision.transforms import functional as F class RTDetrFastImageProcessorInitKwargs(DefaultFastImageProcessorInitKwargs): format: Optional[Union[str, AnnotationFormat]] do_convert_annotations: Optional[bool] do_pad: Optional[bool] pad_size: Optional[Dict[str, int]] class RTDetrFastImageProcessorPreprocessKwargs(DefaultFastImageProcessorPreprocessKwargs): format: Optional[AnnotationFormat] annotations: Optional[Dict] do_convert_annotations: Optional[bool] do_pad: Optional[bool] pad_size: Optional[Dict[str, int]] return_segmentation_masks: Optional[bool] masks_path: Optional[Union[str, pathlib.Path]] SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) def prepare_coco_detection_annotation( image, target, return_segmentation_masks: bool = False, input_data_format: Optional[Union[ChannelDimension, str]] = None, ): """ Convert the target in COCO format into the format expected by RT-DETR. """ image_height, image_width = image.size()[-2:] image_id = target["image_id"] image_id = torch.as_tensor([image_id], dtype=torch.int64, device=image.device) # Get all COCO annotations for the given image. annotations = target["annotations"] classes = [] area = [] boxes = [] keypoints = [] for obj in annotations: if "iscrowd" not in obj or obj["iscrowd"] == 0: classes.append(obj["category_id"]) area.append(obj["area"]) boxes.append(obj["bbox"]) if "keypoints" in obj: keypoints.append(obj["keypoints"]) classes = torch.as_tensor(classes, dtype=torch.int64, device=image.device) area = torch.as_tensor(area, dtype=torch.float32, device=image.device) iscrowd = torch.zeros_like(classes, dtype=torch.int64, device=image.device) # guard against no boxes via resizing boxes = torch.as_tensor(boxes, dtype=torch.float32, device=image.device).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = { "image_id": image_id, "class_labels": classes[keep], "boxes": boxes[keep], "area": area[keep], "iscrowd": iscrowd[keep], "orig_size": torch.as_tensor([int(image_height), int(image_width)], dtype=torch.int64, device=image.device), } if keypoints: keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=image.device) # Apply the keep mask here to filter the relevant annotations keypoints = keypoints[keep] num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints return new_target @add_start_docstrings( "Constructs a fast RTDetr image processor.", BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, """ format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_convert_annotations (`bool`, *optional*, defaults to `True`): Controls whether to convert the annotations to the format expected by the RT_DETR model. Converts the bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`. Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. If `True`, padding will be applied to the bottom and right of the image with zeros. If `pad_size` is provided, the image will be padded to the specified dimensions. Otherwise, the image will be padded to the maximum height and width of the batch. pad_size (`Dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """, ) class RTDetrImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_DEFAULT_MEAN image_std = IMAGENET_DEFAULT_STD format = AnnotationFormat.COCO_DETECTION do_resize = True do_rescale = True do_normalize = False do_pad = False size = {"height": 640, "width": 640} default_to_square = False model_input_names = ["pixel_values", "pixel_mask"] valid_init_kwargs = RTDetrFastImageProcessorInitKwargs valid_preprocess_kwargs = RTDetrFastImageProcessorPreprocessKwargs do_convert_annotations = True def __init__(self, **kwargs: Unpack[RTDetrFastImageProcessorInitKwargs]) -> None: # Backwards compatibility do_convert_annotations = kwargs.get("do_convert_annotations", None) do_normalize = kwargs.get("do_normalize", None) if do_convert_annotations is None and getattr(self, "do_convert_annotations", None) is None: self.do_convert_annotations = do_normalize if do_normalize is not None else self.do_normalize super().__init__(**kwargs) def prepare_annotation( self, image: torch.Tensor, target: Dict, format: Optional[AnnotationFormat] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Dict: """ Prepare an annotation for feeding into RT_DETR model. """ format = format if format is not None else self.format if format == AnnotationFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation( image, target, return_segmentation_masks, input_data_format=input_data_format ) else: raise ValueError(f"Format {format} is not supported.") return target def resize( self, image: torch.Tensor, size: SizeDict, interpolation: "F.InterpolationMode" = None, **kwargs, ) -> torch.Tensor: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. Args: image (`torch.Tensor`): Image to resize. size (`SizeDict`): Size of the image's `(height, width)` dimensions after resizing. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): Resampling filter to use if resizing the image. """ interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR if size.shortest_edge and size.longest_edge: # Resize the image so that the shortest edge or the longest edge is of the given size # while maintaining the aspect ratio of the original image. new_size = get_size_with_aspect_ratio( image.size()[-2:], size["shortest_edge"], size["longest_edge"], ) elif size.max_height and size.max_width: new_size = get_image_size_for_max_height_width(image.size()[-2:], size["max_height"], size["max_width"]) elif size.height and size.width: new_size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = F.resize( image, size=new_size, interpolation=interpolation, **kwargs, ) return image def resize_annotation( self, annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float = 0.5, interpolation: "F.InterpolationMode" = None, ): """ Resizes an annotation to a target size. Args: annotation (`Dict[str, Any]`): The annotation dictionary. orig_size (`Tuple[int, int]`): The original size of the input image. target_size (`Tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`InterpolationMode`, defaults to `InterpolationMode.NEAREST`): The resampling filter to use when resizing the masks. """ interpolation = interpolation if interpolation is not None else F.InterpolationMode.NEAREST ratio_height, ratio_width = [target / orig for target, orig in zip(target_size, orig_size)] new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * torch.as_tensor( [ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32, device=boxes.device ) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = [F.resize(mask, target_size, interpolation=interpolation) for mask in masks] masks = torch.stack(masks).to(torch.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= torch.as_tensor( [image_width, image_height, image_width, image_height], dtype=torch.float32, device=boxes.device ) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation def _update_annotation_for_padded_image( self, annotation: Dict, input_image_size: Tuple[int, int], output_image_size: Tuple[int, int], padding, update_bboxes, ) -> Dict: """ Update the annotation for a padded image. """ new_annotation = {} new_annotation["size"] = output_image_size ratio_height, ratio_width = (input / output for output, input in zip(output_image_size, input_image_size)) for key, value in annotation.items(): if key == "masks": masks = value masks = F.pad( masks, padding, fill=0, ) masks = safe_squeeze(masks, 1) new_annotation["masks"] = masks elif key == "boxes" and update_bboxes: boxes = value boxes *= torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height], device=boxes.device) new_annotation["boxes"] = boxes elif key == "size": new_annotation["size"] = output_image_size else: new_annotation[key] = value return new_annotation def pad( self, image: torch.Tensor, padded_size: Tuple[int, int], annotation: Optional[Dict[str, Any]] = None, update_bboxes: bool = True, fill: int = 0, ): original_size = image.size()[-2:] padding_bottom = padded_size[0] - original_size[0] padding_right = padded_size[1] - original_size[1] if padding_bottom < 0 or padding_right < 0: raise ValueError( f"Padding dimensions are negative. Please make sure that the padded size is larger than the " f"original size. Got padded size: {padded_size}, original size: {original_size}." ) if original_size != padded_size: padding = [0, 0, padding_right, padding_bottom] image = F.pad(image, padding, fill=fill) if annotation is not None: annotation = self._update_annotation_for_padded_image( annotation, original_size, padded_size, padding, update_bboxes ) # Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. pixel_mask = torch.zeros(padded_size, dtype=torch.int64, device=image.device) pixel_mask[: original_size[0], : original_size[1]] = 1 return image, pixel_mask, annotation @add_start_docstrings( BASE_IMAGE_PROCESSOR_FAST_DOCSTRING_PREPROCESS, """ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): List of annotations associated with the image or batch of images. If annotation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_convert_annotations (`bool`, *optional*, defaults to `True`): Controls whether to convert the annotations to the format expected by the DETR model. Converts the bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`. Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. If `True`, padding will be applied to the bottom and right of the image with zeros. If `pad_size` is provided, the image will be padded to the specified dimensions. Otherwise, the image will be padded to the maximum height and width of the batch. pad_size (`Dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. return_segmentation_masks (`bool`, *optional*, defaults to `False`): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. """, ) def preprocess( self, images: ImageInput, **kwargs: Unpack[RTDetrFastImageProcessorPreprocessKwargs] ) -> BatchFeature: return super().preprocess(images, **kwargs) def _preprocess( self, images: List["torch.Tensor"], annotations: Optional[Union[AnnotationType, List[AnnotationType]]], return_segmentation_masks: bool, masks_path: Optional[Union[str, pathlib.Path]], do_resize: bool, size: SizeDict, interpolation: Optional["F.InterpolationMode"], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, do_convert_annotations: bool, image_mean: Optional[Union[float, List[float]]], image_std: Optional[Union[float, List[float]]], do_pad: bool, pad_size: Optional[Dict[str, int]], format: Optional[Union[str, AnnotationFormat]], return_tensors: Optional[Union[str, TensorType]], ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. """ if annotations is not None and isinstance(annotations, dict): annotations = [annotations] if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) format = AnnotationFormat(format) if annotations is not None: validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations) data = {} processed_images = [] processed_annotations = [] pixel_masks = [] # Initialize pixel_masks here for image, annotation in zip(images, annotations if annotations is not None else [None] * len(images)): # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: annotation = self.prepare_annotation( image, annotation, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=ChannelDimension.FIRST, ) if do_resize: resized_image = self.resize(image, size=size, interpolation=interpolation) if annotations is not None: annotation = self.resize_annotation( annotation, orig_size=image.size()[-2:], target_size=resized_image.size()[-2:], ) image = resized_image if do_rescale and do_normalize: # fused rescale and normalize image = F.normalize(image.to(dtype=torch.float32), image_mean, image_std) elif do_rescale: image = image * rescale_factor elif do_normalize: image = F.normalize(image, image_mean, image_std) if do_convert_annotations and annotations is not None: annotation = self.normalize_annotation(annotation, get_image_size(image, ChannelDimension.FIRST)) processed_images.append(image) processed_annotations.append(annotation) images = processed_images annotations = processed_annotations if annotations is not None else None if do_pad: # depends on all resized image shapes so we need another loop if pad_size is not None: padded_size = (pad_size["height"], pad_size["width"]) else: padded_size = get_max_height_width(images) padded_images = [] padded_annotations = [] for image, annotation in zip(images, annotations if annotations is not None else [None] * len(images)): # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} if padded_size == image.size()[-2:]: padded_images.append(image) pixel_masks.append(torch.ones(padded_size, dtype=torch.int64, device=image.device)) padded_annotations.append(annotation) continue image, pixel_mask, annotation = self.pad( image, padded_size, annotation=annotation, update_bboxes=do_convert_annotations ) padded_images.append(image) padded_annotations.append(annotation) pixel_masks.append(pixel_mask) images = padded_images annotations = padded_annotations if annotations is not None else None data.update({"pixel_mask": torch.stack(pixel_masks, dim=0)}) data.update({"pixel_values": torch.stack(images, dim=0)}) encoded_inputs = BatchFeature(data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, use_focal_loss: bool = True, ): """ Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. use_focal_loss (`bool` defaults to `True`): Variable informing if the focal loss was used to predict the outputs. If `True`, a sigmoid is applied to compute the scores of each detection, otherwise, a softmax function is used. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ requires_backends(self, ["torch"]) out_logits, out_bbox = outputs.logits, outputs.pred_boxes # convert from relative cxcywh to absolute xyxy boxes = center_to_corners_format(out_bbox) if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if isinstance(target_sizes, List): img_h, img_w = torch.as_tensor(target_sizes).unbind(1) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] num_top_queries = out_logits.shape[1] num_classes = out_logits.shape[2] if use_focal_loss: scores = torch.nn.functional.sigmoid(out_logits) scores, index = torch.topk(scores.flatten(1), num_top_queries, axis=-1) labels = index % num_classes index = index // num_classes boxes = boxes.gather(dim=1, index=index.unsqueeze(-1).repeat(1, 1, boxes.shape[-1])) else: scores = torch.nn.functional.softmax(out_logits)[:, :, :-1] scores, labels = scores.max(dim=-1) if scores.shape[1] > num_top_queries: scores, index = torch.topk(scores, num_top_queries, dim=-1) labels = torch.gather(labels, dim=1, index=index) boxes = torch.gather(boxes, dim=1, index=index.unsqueeze(-1).tile(1, 1, boxes.shape[-1])) results = [] for score, label, box in zip(scores, labels, boxes): results.append( { "scores": score[score > threshold], "labels": label[score > threshold], "boxes": box[score > threshold], } ) return results __all__ = ["RTDetrImageProcessorFast"]
transformers/src/transformers/models/rt_detr/image_processing_rt_detr_fast.py/0
{ "file_path": "transformers/src/transformers/models/rt_detr/image_processing_rt_detr_fast.py", "repo_id": "transformers", "token_count": 12465 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for SAM.""" import math from copy import deepcopy from itertools import product from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, pad, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import ( TensorType, filter_out_non_signature_kwargs, is_tf_available, is_torch_available, is_torchvision_available, logging, requires_backends, ) if is_torch_available(): import torch import torch.nn.functional as F if is_torchvision_available(): from torchvision.ops.boxes import batched_nms if is_tf_available(): import tensorflow as tf from tensorflow.experimental import numpy as tnp from ...tf_utils import flatten, shape_list logger = logging.get_logger(__name__) class SamImageProcessor(BaseImageProcessor): r""" Constructs a SAM image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"longest_edge": 1024}`): Size of the output image after resizing. Resizes the longest edge of the image to match `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `size` parameter in the `preprocess` method. mask_size (`dict`, *optional*, defaults to `{"longest_edge": 256}`): Size of the output segmentation map after resizing. Resizes the longest edge of the image to match `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `mask_size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image to the specified `pad_size`. Can be overridden by the `do_pad` parameter in the `preprocess` method. pad_size (`dict`, *optional*, defaults to `{"height": 1024, "width": 1024}`): Size of the output image after padding. Can be overridden by the `pad_size` parameter in the `preprocess` method. mask_pad_size (`dict`, *optional*, defaults to `{"height": 256, "width": 256}`): Size of the output segmentation map after padding. Can be overridden by the `mask_pad_size` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, mask_size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: bool = True, pad_size: int = None, mask_pad_size: int = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"longest_edge": 1024} size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size pad_size = pad_size if pad_size is not None else {"height": 1024, "width": 1024} pad_size = get_size_dict(pad_size, default_to_square=True) mask_size = mask_size if mask_size is not None else {"longest_edge": 256} mask_size = ( get_size_dict(max_size=mask_size, default_to_square=False) if not isinstance(mask_size, dict) else mask_size ) mask_pad_size = mask_pad_size if mask_pad_size is not None else {"height": 256, "width": 256} mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True) self.do_resize = do_resize self.size = size self.mask_size = mask_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad self.pad_size = pad_size self.mask_pad_size = mask_pad_size self.do_convert_rgb = do_convert_rgb def pad_image( self, image: np.ndarray, pad_size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Pad an image to `(pad_size["height"], pad_size["width"])` with zeros to the right and bottom. Args: image (`np.ndarray`): Image to pad. pad_size (`Dict[str, int]`): Size of the output image after padding. data_format (`str` or `ChannelDimension`, *optional*): The data format of the image. Can be either "channels_first" or "channels_last". If `None`, the `data_format` of the `image` will be used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ output_height, output_width = pad_size["height"], pad_size["width"] input_height, input_width = get_image_size(image, channel_dim=input_data_format) pad_width = output_width - input_width pad_height = output_height - input_height padded_image = pad( image, ((0, pad_height), (0, pad_width)), data_format=data_format, input_data_format=input_data_format, **kwargs, ) return padded_image def _get_preprocess_shape(self, old_shape: Tuple[int, int], longest_edge: int): """ Compute the output size given input size and target long side length. """ oldh, oldw = old_shape scale = longest_edge * 1.0 / max(oldh, oldw) newh, neww = oldh * scale, oldw * scale newh = int(newh + 0.5) neww = int(neww + 0.5) return (newh, neww) def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Dictionary in the format `{"longest_edge": int}` specifying the size of the output image. The longest edge of the image will be resized to the specified size, while the other edge will be resized to maintain the aspect ratio. resample: `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "longest_edge" not in size: raise ValueError(f"The `size` dictionary must contain the key `longest_edge`. Got {size.keys()}") input_size = get_image_size(image, channel_dim=input_data_format) output_height, output_width = self._get_preprocess_shape(input_size, size["longest_edge"]) return resize( image, size=(output_height, output_width), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def _preprocess( self, image: ImageInput, do_resize: bool, do_rescale: bool, do_normalize: bool, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, rescale_factor: Optional[float] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, pad_size: Optional[Dict[str, int]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) reshaped_input_size = get_image_size(image, channel_dim=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) if do_pad: image = self.pad_image(image=image, pad_size=pad_size, input_data_format=input_data_format) return image, reshaped_input_size def _preprocess_image( self, image: ImageInput, do_resize: Optional[bool] = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, pad_size: Optional[Dict[str, int]] = None, do_convert_rgb: Optional[bool] = None, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]]: image = to_numpy_array(image) # PIL RGBA images are converted to RGB if do_convert_rgb: image = convert_to_rgb(image) # All transformations expect numpy arrays. image = to_numpy_array(image) if do_rescale and is_scaled_image(image): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) original_size = get_image_size(image, channel_dim=input_data_format) image, reshaped_input_size = self._preprocess( image=image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, pad_size=pad_size, input_data_format=input_data_format, ) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image, original_size, reshaped_input_size def _preprocess_mask( self, segmentation_map: ImageInput, do_resize: Optional[bool] = None, mask_size: Dict[str, int] = None, do_pad: Optional[bool] = None, mask_pad_size: Optional[Dict[str, int]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: segmentation_map = to_numpy_array(segmentation_map) # Add channel dimension if missing - needed for certain transformations if segmentation_map.ndim == 2: added_channel_dim = True segmentation_map = segmentation_map[None, ...] input_data_format = ChannelDimension.FIRST else: added_channel_dim = False if input_data_format is None: input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1) original_size = get_image_size(segmentation_map, channel_dim=input_data_format) segmentation_map, _ = self._preprocess( image=segmentation_map, do_resize=do_resize, size=mask_size, resample=PILImageResampling.NEAREST, do_rescale=False, do_normalize=False, do_pad=do_pad, pad_size=mask_pad_size, input_data_format=input_data_format, ) # Remove extra channel dimension if added for processing if added_channel_dim: segmentation_map = segmentation_map.squeeze(0) segmentation_map = segmentation_map.astype(np.int64) return segmentation_map, original_size @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, segmentation_maps: Optional[ImageInput] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, mask_size: Optional[Dict[str, int]] = None, resample: Optional["PILImageResampling"] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, pad_size: Optional[Dict[str, int]] = None, mask_pad_size: Optional[Dict[str, int]] = None, do_convert_rgb: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. segmentation_maps (`ImageInput`, *optional*): Segmentation map to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Controls the size of the image after `resize`. The longest edge of the image is resized to `size["longest_edge"]` whilst preserving the aspect ratio. mask_size (`Dict[str, int]`, *optional*, defaults to `self.mask_size`): Controls the size of the segmentation map after `resize`. The longest edge of the image is resized to `size["longest_edge"]` whilst preserving the aspect ratio. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image pixel values by rescaling factor. rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to apply to the image pixel values. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to normalize the image by if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to normalize the image by if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. pad_size (`Dict[str, int]`, *optional*, defaults to `self.pad_size`): Controls the size of the padding applied to the image. The image is padded to `pad_size["height"]` and `pad_size["width"]` if `do_pad` is set to `True`. mask_pad_size (`Dict[str, int]`, *optional*, defaults to `self.mask_pad_size`): Controls the size of the padding applied to the segmentation map. The image is padded to `mask_pad_size["height"]` and `mask_pad_size["width"]` if `do_pad` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size mask_size = mask_size if mask_size is not None else self.mask_size mask_size = ( get_size_dict(max_size=mask_size, default_to_square=False) if not isinstance(mask_size, dict) else mask_size ) resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad pad_size = pad_size if pad_size is not None else self.pad_size pad_size = get_size_dict(pad_size, default_to_square=True) mask_pad_size = mask_pad_size if mask_pad_size is not None else self.mask_pad_size mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True) do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if segmentation_maps is not None: segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) if not valid_images(segmentation_maps): raise ValueError( "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=pad_size, # Here _preprocess needs do_pad and pad_size. do_resize=do_resize, size=size, resample=resample, ) images, original_sizes, reshaped_input_sizes = zip( *( self._preprocess_image( image=img, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, pad_size=pad_size, do_convert_rgb=do_convert_rgb, data_format=data_format, input_data_format=input_data_format, ) for img in images ) ) data = { "pixel_values": images, "original_sizes": original_sizes, "reshaped_input_sizes": reshaped_input_sizes, } if segmentation_maps is not None: segmentation_maps, original_mask_sizes = zip( *( self._preprocess_mask( segmentation_map=mask, do_resize=do_resize, mask_size=mask_size, do_pad=do_pad, mask_pad_size=mask_pad_size, input_data_format=input_data_format, ) for mask in segmentation_maps ) ) # masks should start out the same size as input images assert all( original_im_size == original_mask_size for original_im_size, original_mask_size in zip(original_sizes, original_mask_sizes) ), "Segmentation maps should be the same size as input images." data["labels"] = segmentation_maps return BatchFeature(data=data, tensor_type=return_tensors) def post_process_masks( self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None, return_tensors="pt", ): """ Remove padding and upscale masks to the original image size. Args: masks (`Union[List[torch.Tensor], List[np.ndarray], List[tf.Tensor]]`): Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. original_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`): The original sizes of each image before it was resized to the model's expected input shape, in (height, width) format. reshaped_input_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`): The size of each image as it is fed to the model, in (height, width) format. Used to remove padding. mask_threshold (`float`, *optional*, defaults to 0.0): The threshold to use for binarizing the masks. binarize (`bool`, *optional*, defaults to `True`): Whether to binarize the masks. pad_size (`int`, *optional*, defaults to `self.pad_size`): The target size the images were padded to before being passed to the model. If None, the target size is assumed to be the processor's `pad_size`. return_tensors (`str`, *optional*, defaults to `"pt"`): If `"pt"`, return PyTorch tensors. If `"tf"`, return TensorFlow tensors. Returns: (`Union[torch.Tensor, tf.Tensor]`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. """ if return_tensors == "pt": return self._post_process_masks_pt( masks=masks, original_sizes=original_sizes, reshaped_input_sizes=reshaped_input_sizes, mask_threshold=mask_threshold, binarize=binarize, pad_size=pad_size, ) elif return_tensors == "tf": return self._post_process_masks_tf( masks=masks, original_sizes=original_sizes, reshaped_input_sizes=reshaped_input_sizes, mask_threshold=mask_threshold, binarize=binarize, pad_size=pad_size, ) else: raise ValueError("return_tensors must be either 'pt' or 'tf'") def _post_process_masks_pt( self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None ): """ Remove padding and upscale masks to the original image size. Args: masks (`Union[List[torch.Tensor], List[np.ndarray]]`): Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`): The original sizes of each image before it was resized to the model's expected input shape, in (height, width) format. reshaped_input_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`): The size of each image as it is fed to the model, in (height, width) format. Used to remove padding. mask_threshold (`float`, *optional*, defaults to 0.0): The threshold to use for binarizing the masks. binarize (`bool`, *optional*, defaults to `True`): Whether to binarize the masks. pad_size (`int`, *optional*, defaults to `self.pad_size`): The target size the images were padded to before being passed to the model. If None, the target size is assumed to be the processor's `pad_size`. Returns: (`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. """ requires_backends(self, ["torch"]) pad_size = self.pad_size if pad_size is None else pad_size target_image_size = (pad_size["height"], pad_size["width"]) if isinstance(original_sizes, (torch.Tensor, np.ndarray)): original_sizes = original_sizes.tolist() if isinstance(reshaped_input_sizes, (torch.Tensor, np.ndarray)): reshaped_input_sizes = reshaped_input_sizes.tolist() output_masks = [] for i, original_size in enumerate(original_sizes): if isinstance(masks[i], np.ndarray): masks[i] = torch.from_numpy(masks[i]) elif not isinstance(masks[i], torch.Tensor): raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") interpolated_mask = F.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False) interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]] interpolated_mask = F.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False) if binarize: interpolated_mask = interpolated_mask > mask_threshold output_masks.append(interpolated_mask) return output_masks def _post_process_masks_tf( self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None ): """ Remove padding and upscale masks to the original image size. Args: masks (`tf.Tensor`): Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. original_sizes (`tf.Tensor`): The original size of the images before resizing for input to the model, in (height, width) format. reshaped_input_sizes (`tf.Tensor`): The size of the image input to the model, in (height, width) format. Used to remove padding. mask_threshold (`float`, *optional*, defaults to 0.0): The threshold to use for binarizing the masks. binarize (`bool`, *optional*, defaults to `True`): Whether to binarize the masks. pad_size (`int`, *optional*, defaults to `self.pad_size`): The target size the images were padded to before being passed to the model. If None, the target size is assumed to be the processor's `pad_size`. Returns: (`tf.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. """ requires_backends(self, ["tf"]) pad_size = self.pad_size if pad_size is None else pad_size target_image_size = (pad_size["height"], pad_size["width"]) output_masks = [] for i, original_size in enumerate(original_sizes): # tf.image expects NHWC, we transpose the NCHW inputs for it mask = tf.transpose(masks[i], perm=[0, 2, 3, 1]) interpolated_mask = tf.image.resize(mask, target_image_size, method="bilinear") interpolated_mask = interpolated_mask[:, : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1], :] interpolated_mask = tf.image.resize(interpolated_mask, original_size, method="bilinear") if binarize: interpolated_mask = interpolated_mask > mask_threshold # And then we transpose them back at the end output_masks.append(tf.transpose(interpolated_mask, perm=[0, 3, 1, 2])) return output_masks def post_process_for_mask_generation( self, all_masks, all_scores, all_boxes, crops_nms_thresh, return_tensors="pt" ): """ Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks. Args: all_masks (`Union[List[torch.Tensor], List[tf.Tensor]]`): List of all predicted segmentation masks all_scores (`Union[List[torch.Tensor], List[tf.Tensor]]`): List of all predicted iou scores all_boxes (`Union[List[torch.Tensor], List[tf.Tensor]]`): List of all bounding boxes of the predicted masks crops_nms_thresh (`float`): Threshold for NMS (Non Maximum Suppression) algorithm. return_tensors (`str`, *optional*, defaults to `pt`): If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. """ if return_tensors == "pt": return _postprocess_for_mg(all_masks, all_scores, all_boxes, crops_nms_thresh) elif return_tensors == "tf": return _postprocess_for_mg_tf(all_masks, all_scores, all_boxes, crops_nms_thresh) def generate_crop_boxes( self, image, target_size, crop_n_layers: int = 0, overlap_ratio: float = 512 / 1500, points_per_crop: Optional[int] = 32, crop_n_points_downscale_factor: Optional[List[int]] = 1, device: Optional["torch.device"] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, return_tensors: str = "pt", ): """ Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. Args: image (`np.array`): Input original image target_size (`int`): Target size of the resized image crop_n_layers (`int`, *optional*, defaults to 0): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. overlap_ratio (`float`, *optional*, defaults to 512/1500): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. points_per_crop (`int`, *optional*, defaults to 32): Number of points to sample from each crop. crop_n_points_downscale_factor (`List[int]`, *optional*, defaults to 1): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. device (`torch.device`, *optional*, defaults to None): Device to use for the computation. If None, cpu will be used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. return_tensors (`str`, *optional*, defaults to `pt`): If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. """ crop_boxes, points_per_crop, cropped_images, input_labels = _generate_crop_boxes( image, target_size, crop_n_layers, overlap_ratio, points_per_crop, crop_n_points_downscale_factor, input_data_format, ) if return_tensors == "pt": if device is None: device = torch.device("cpu") crop_boxes = torch.tensor(crop_boxes, device=device) points_per_crop = torch.tensor(points_per_crop, device=device) # cropped_images stays as np input_labels = torch.tensor(input_labels, device=device) elif return_tensors == "tf": if device is not None: raise ValueError("device is not a supported argument when return_tensors is tf!") crop_boxes = tf.convert_to_tensor(crop_boxes) points_per_crop = tf.convert_to_tensor(points_per_crop) # cropped_images stays as np input_labels = tf.convert_to_tensor(input_labels) else: raise ValueError("return_tensors must be either 'pt' or 'tf'.") return crop_boxes, points_per_crop, cropped_images, input_labels def filter_masks( self, masks, iou_scores, original_size, cropped_box_image, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, return_tensors="pt", ): """ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to bounding boxes and pad the predicted masks if necessary. Args: masks (`Union[torch.Tensor, tf.Tensor]`): Input masks. iou_scores (`Union[torch.Tensor, tf.Tensor]`): List of IoU scores. original_size (`Tuple[int,int]`): Size of the orginal image. cropped_box_image (`np.array`): The cropped image. pred_iou_thresh (`float`, *optional*, defaults to 0.88): The threshold for the iou scores. stability_score_thresh (`float`, *optional*, defaults to 0.95): The threshold for the stability score. mask_threshold (`float`, *optional*, defaults to 0): The threshold for the predicted masks. stability_score_offset (`float`, *optional*, defaults to 1): The offset for the stability score used in the `_compute_stability_score` method. return_tensors (`str`, *optional*, defaults to `pt`): If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. """ if return_tensors == "pt": return self._filter_masks_pt( masks=masks, iou_scores=iou_scores, original_size=original_size, cropped_box_image=cropped_box_image, pred_iou_thresh=pred_iou_thresh, stability_score_thresh=stability_score_thresh, mask_threshold=mask_threshold, stability_score_offset=stability_score_offset, ) elif return_tensors == "tf": return self._filter_masks_tf( masks=masks, iou_scores=iou_scores, original_size=original_size, cropped_box_image=cropped_box_image, pred_iou_thresh=pred_iou_thresh, stability_score_thresh=stability_score_thresh, mask_threshold=mask_threshold, stability_score_offset=stability_score_offset, ) def _filter_masks_pt( self, masks, iou_scores, original_size, cropped_box_image, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, ): """ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to bounding boxes and pad the predicted masks if necessary. Args: masks (`torch.Tensor`): Input masks. iou_scores (`torch.Tensor`): List of IoU scores. original_size (`Tuple[int,int]`): Size of the orginal image. cropped_box_image (`np.array`): The cropped image. pred_iou_thresh (`float`, *optional*, defaults to 0.88): The threshold for the iou scores. stability_score_thresh (`float`, *optional*, defaults to 0.95): The threshold for the stability score. mask_threshold (`float`, *optional*, defaults to 0): The threshold for the predicted masks. stability_score_offset (`float`, *optional*, defaults to 1): The offset for the stability score used in the `_compute_stability_score` method. """ requires_backends(self, ["torch"]) original_height, original_width = original_size iou_scores = iou_scores.flatten(0, 1) masks = masks.flatten(0, 1) if masks.shape[0] != iou_scores.shape[0]: raise ValueError("masks and iou_scores must have the same batch size.") if masks.device != iou_scores.device: iou_scores = iou_scores.to(masks.device) batch_size = masks.shape[0] keep_mask = torch.ones(batch_size, dtype=torch.bool, device=masks.device) if pred_iou_thresh > 0.0: keep_mask = keep_mask & (iou_scores > pred_iou_thresh) # compute stability score if stability_score_thresh > 0.0: stability_scores = _compute_stability_score_pt(masks, mask_threshold, stability_score_offset) keep_mask = keep_mask & (stability_scores > stability_score_thresh) scores = iou_scores[keep_mask] masks = masks[keep_mask] # binarize masks masks = masks > mask_threshold converted_boxes = _batched_mask_to_box(masks) keep_mask = ~_is_box_near_crop_edge( converted_boxes, cropped_box_image, [0, 0, original_width, original_height] ) scores = scores[keep_mask] masks = masks[keep_mask] converted_boxes = converted_boxes[keep_mask] masks = _pad_masks(masks, cropped_box_image, original_height, original_width) # conversion to rle is necessary to run non-maximum suppresion masks = _mask_to_rle_pytorch(masks) return masks, scores, converted_boxes def _filter_masks_tf( self, masks, iou_scores, original_size, cropped_box_image, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, ): """ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to bounding boxes and pad the predicted masks if necessary. Args: masks (`tf.Tensor`): Input masks. iou_scores (`tf.Tensor`): List of IoU scores. original_size (`Tuple[int,int]`): Size of the orginal image. cropped_box_image (`np.array`): The cropped image. pred_iou_thresh (`float`, *optional*, defaults to 0.88): The threshold for the iou scores. stability_score_thresh (`float`, *optional*, defaults to 0.95): The threshold for the stability score. mask_threshold (`float`, *optional*, defaults to 0): The threshold for the predicted masks. stability_score_offset (`float`, *optional*, defaults to 1): The offset for the stability score used in the `_compute_stability_score` method. """ requires_backends(self, ["tf"]) original_height, original_width = original_size iou_scores = tf.reshape(iou_scores, [iou_scores.shape[0] * iou_scores.shape[1], iou_scores.shape[2:]]) masks = tf.reshape(masks, [masks.shape[0] * masks.shape[1], masks.shape[2:]]) if masks.shape[0] != iou_scores.shape[0]: raise ValueError("masks and iou_scores must have the same batch size.") batch_size = masks.shape[0] keep_mask = tf.ones(batch_size, dtype=tf.bool) if pred_iou_thresh > 0.0: keep_mask = keep_mask & (iou_scores > pred_iou_thresh) # compute stability score if stability_score_thresh > 0.0: stability_scores = _compute_stability_score_tf(masks, mask_threshold, stability_score_offset) keep_mask = keep_mask & (stability_scores > stability_score_thresh) scores = iou_scores[keep_mask] masks = masks[keep_mask] # binarize masks masks = masks > mask_threshold converted_boxes = _batched_mask_to_box_tf(masks) keep_mask = ~_is_box_near_crop_edge_tf( converted_boxes, cropped_box_image, [0, 0, original_width, original_height] ) scores = scores[keep_mask] masks = masks[keep_mask] converted_boxes = converted_boxes[keep_mask] masks = _pad_masks_tf(masks, cropped_box_image, original_height, original_width) # conversion to rle is necessary to run non-maximum suppresion masks = _mask_to_rle_tf(masks) return masks, scores, converted_boxes def _compute_stability_score_pt(masks: "torch.Tensor", mask_threshold: float, stability_score_offset: int): # One mask is always contained inside the other. # Save memory by preventing unnecesary cast to torch.int64 intersections = ( (masks > (mask_threshold + stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32) ) unions = (masks > (mask_threshold - stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32) stability_scores = intersections / unions return stability_scores def _compute_stability_score_tf(masks: "tf.Tensor", mask_threshold: float, stability_score_offset: int): # Torch does Py3-style division but TF does floor division with ints. We cast to float32 in TF to make sure # we get the right division results. intersections = tf.count_nonzero( masks > (mask_threshold + stability_score_offset), axis=[-1, -2], dtype=tf.float32 ) unions = tf.count_nonzero(masks > (mask_threshold - stability_score_offset), axis=[-1, -2], dtype=tf.float32) stability_scores = intersections / unions return stability_scores def _build_point_grid(n_per_side: int) -> np.ndarray: """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" offset = 1 / (2 * n_per_side) points_one_side = np.linspace(offset, 1 - offset, n_per_side) points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) points_y = np.tile(points_one_side[:, None], (1, n_per_side)) points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2) return points def _normalize_coordinates( target_size: int, coords: np.ndarray, original_size: Tuple[int, int], is_bounding_box=False ) -> np.ndarray: """ Expects a numpy array of length 2 in the final dimension. Requires the original image size in (height, width) format. """ old_height, old_width = original_size scale = target_size * 1.0 / max(old_height, old_width) new_height, new_width = old_height * scale, old_width * scale new_width = int(new_width + 0.5) new_height = int(new_height + 0.5) coords = deepcopy(coords).astype(float) if is_bounding_box: coords = coords.reshape(-1, 2, 2) coords[..., 0] = coords[..., 0] * (new_width / old_width) coords[..., 1] = coords[..., 1] * (new_height / old_height) if is_bounding_box: coords = coords.reshape(-1, 4) return coords def _generate_crop_boxes( image, target_size: int, # Is it tuple here? crop_n_layers: int = 0, overlap_ratio: float = 512 / 1500, points_per_crop: Optional[int] = 32, crop_n_points_downscale_factor: Optional[List[int]] = 1, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[List[List[int]], List[int]]: """ Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. Args: image (Union[`numpy.ndarray`, `PIL.Image`, `torch.Tensor`]): Image to generate crops for. target_size (`int`): Size of the smallest crop. crop_n_layers (`int`, *optional*): If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. overlap_ratio (`int`, *optional*): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. points_per_crop (`int`, *optional*): Number of points to sample per crop. crop_n_points_downscale_factor (`int`, *optional*): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if isinstance(image, list): raise ValueError("Only one image is allowed for crop generation.") image = to_numpy_array(image) original_size = get_image_size(image, input_data_format) points_grid = [] for i in range(crop_n_layers + 1): n_points = int(points_per_crop / (crop_n_points_downscale_factor**i)) points_grid.append(_build_point_grid(n_points)) crop_boxes, layer_idxs = _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size) cropped_images, point_grid_per_crop = _generate_crop_images( crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format ) crop_boxes = np.array(crop_boxes) crop_boxes = crop_boxes.astype(np.float32) points_per_crop = np.array([point_grid_per_crop]) points_per_crop = np.transpose(points_per_crop, axes=(0, 2, 1, 3)) input_labels = np.ones_like(points_per_crop[:, :, :, 0], dtype=np.int64) return crop_boxes, points_per_crop, cropped_images, input_labels def _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size): """ Generates 2 ** (layers idx + 1) crops for each crop_n_layers. Crops are in the XYWH format : The XYWH format consists of the following required indices: - X: X coordinate of the top left of the bounding box - Y: Y coordinate of the top left of the bounding box - W: width of the bounding box - H: height of the bounding box """ crop_boxes, layer_idxs = [], [] im_height, im_width = original_size short_side = min(im_height, im_width) # Original image crop_boxes.append([0, 0, im_width, im_height]) layer_idxs.append(0) for i_layer in range(crop_n_layers): n_crops_per_side = 2 ** (i_layer + 1) overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) crop_width = int(math.ceil((overlap * (n_crops_per_side - 1) + im_width) / n_crops_per_side)) crop_height = int(math.ceil((overlap * (n_crops_per_side - 1) + im_height) / n_crops_per_side)) crop_box_x0 = [int((crop_width - overlap) * i) for i in range(n_crops_per_side)] crop_box_y0 = [int((crop_height - overlap) * i) for i in range(n_crops_per_side)] for left, top in product(crop_box_x0, crop_box_y0): box = [left, top, min(left + crop_width, im_width), min(top + crop_height, im_height)] crop_boxes.append(box) layer_idxs.append(i_layer + 1) return crop_boxes, layer_idxs def _generate_crop_images( crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None ): """ Takes as an input bounding boxes that are used to crop the image. Based in the crops, the corresponding points are also passed. """ cropped_images = [] total_points_per_crop = [] for i, crop_box in enumerate(crop_boxes): left, top, right, bottom = crop_box channel_dim = infer_channel_dimension_format(image, input_data_format) if channel_dim == ChannelDimension.LAST: cropped_im = image[top:bottom, left:right, :] else: cropped_im = image[:, top:bottom, left:right] cropped_images.append(cropped_im) cropped_im_size = get_image_size(cropped_im, channel_dim) points_scale = np.array(cropped_im_size)[None, ::-1] points = points_grid[layer_idxs[i]] * points_scale normalized_points = _normalize_coordinates(target_size, points, original_size) total_points_per_crop.append(normalized_points) return cropped_images, total_points_per_crop def _pad_masks(masks, crop_box: List[int], orig_height: int, orig_width: int): left, top, right, bottom = crop_box if left == 0 and top == 0 and right == orig_width and bottom == orig_height: return masks # Coordinate transform masks pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top) pad = (left, pad_x - left, top, pad_y - top) return torch.nn.functional.pad(masks, pad, value=0) def _pad_masks_tf(masks, crop_box: List[int], orig_height: int, orig_width: int): left, top, right, bottom = crop_box if left == 0 and top == 0 and right == orig_width and bottom == orig_height: return masks # Coordinate transform masks pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top) pad = (left, pad_x - left, top, pad_y - top) return tf.pad(masks, pad, constant_values=0) def _is_box_near_crop_edge(boxes, crop_box, orig_box, atol=20.0): """Filter masks at the edge of a crop, but not at the edge of the original image.""" crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) left, top, _, _ = crop_box offset = torch.tensor([[left, top, left, top]], device=boxes.device) # Check if boxes has a channel dimension if len(boxes.shape) == 3: offset = offset.unsqueeze(1) boxes = (boxes + offset).float() near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) return torch.any(near_crop_edge, dim=1) def _is_box_near_crop_edge_tf(boxes, crop_box, orig_box, atol=20.0): """Filter masks at the edge of a crop, but not at the edge of the original image.""" crop_box_tf = tf.convert_to_tensor(crop_box, dtype=tf.float32) orig_box_tf = tf.convert_to_tensor(orig_box, dtype=tf.float32) left, top, _, _ = crop_box offset = tf.convert_to_tensor([[left, top, left, top]]) # Check if boxes has a channel dimension if len(boxes.shape) == 3: offset = tf.expand_dims(offset, 1) boxes = tf.cast(boxes + offset, tf.float32) near_crop_edge = tnp.isclose(boxes, crop_box_tf[None, :], atol=atol, rtol=0) near_image_edge = tnp.isclose(boxes, orig_box_tf[None, :], atol=atol, rtol=0) near_crop_edge = tf.math.logical_and(near_crop_edge, ~near_image_edge) return tf.reduce_any(near_crop_edge, axis=1) def _batched_mask_to_box(masks: "torch.Tensor"): """ Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which corresponds the following required indices: - LEFT: left hand side of the bounding box - TOP: top of the bounding box - RIGHT: right of the bounding box - BOTTOM: bottom of the bounding box Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape is channel_1 x channel_2 x ... x 4. Args: - masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`) """ # torch.max below raises an error on empty inputs, just skip in this case if torch.numel(masks) == 0: return torch.zeros(*masks.shape[:-2], 4, device=masks.device) # Normalize shape to Cxheightxwidth shape = masks.shape height, width = shape[-2:] # Get top and bottom edges in_height, _ = torch.max(masks, dim=-1) in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :] bottom_edges, _ = torch.max(in_height_coords, dim=-1) in_height_coords = in_height_coords + height * (~in_height) top_edges, _ = torch.min(in_height_coords, dim=-1) # Get left and right edges in_width, _ = torch.max(masks, dim=-2) in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :] right_edges, _ = torch.max(in_width_coords, dim=-1) in_width_coords = in_width_coords + width * (~in_width) left_edges, _ = torch.min(in_width_coords, dim=-1) # If the mask is empty the right edge will be to the left of the left edge. # Replace these boxes with [0, 0, 0, 0] empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) out = out * (~empty_filter).unsqueeze(-1) # Return to original shape out = out.reshape(*shape[:-2], 4) return out def _batched_mask_to_box_tf(masks: "tf.Tensor"): """ Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which corresponds the following required indices: - LEFT: left hand side of the bounding box - TOP: top of the bounding box - RIGHT: right of the bounding box - BOTTOM: bottom of the bounding box Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape is channel_1 x channel_2 x ... x 4. Args: - masks (`tf.Tensor` of shape `(batch, nb_mask, height, width)`) """ if tf.size(masks) == 0: return tf.zeros([*masks.shape[:-2], 4]) # Normalize shape to Cxheightxwidth shape = shape_list(masks) height, width = shape[-2:] # Get top and bottom edges in_height = tf.reduce_max(masks, axis=-1) in_height_coords = in_height * tf.range(height)[None, :] bottom_edges = tf.reduce_max(in_height_coords, axis=-1) in_height_coords = in_height_coords + height * (~in_height) top_edges = tf.reduce_min(in_height_coords, axis=-1) # Get left and right edges in_width, _ = tf.reduce_max(masks, axis=-2) in_width_coords = in_width * tf.range(width)[None, :] right_edges, _ = tf.reduce_max(in_width_coords, axis=-1) in_width_coords = in_width_coords + width * (~in_width) left_edges, _ = tf.reduce_min(in_width_coords, axis=-1) # If the mask is empty the right edge will be to the left of the left edge. # Replace these boxes with [0, 0, 0, 0] empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) out = tf.stack([left_edges, top_edges, right_edges, bottom_edges], axis=-1) out = out * tf.expand_dims(~empty_filter, -1) # Return to original shape out = tf.reshape(out, *shape[:-2], 4) return out def _mask_to_rle_pytorch(input_mask: "torch.Tensor"): """ Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools. """ # Put in fortran order and flatten height and width batch_size, height, width = input_mask.shape input_mask = input_mask.permute(0, 2, 1).flatten(1) # Compute change indices diff = input_mask[:, 1:] ^ input_mask[:, :-1] change_indices = diff.nonzero() # Encode run length out = [] for i in range(batch_size): cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1 if len(cur_idxs) == 0: # No changes => either all 0 or all 1 # If the entire mask is 0, RLE is [height*width] or if the entire mask is 1, RLE is [0, height*width]. if input_mask[i, 0] == 0: out.append({"size": [height, width], "counts": [height * width]}) else: out.append({"size": [height, width], "counts": [0, height * width]}) continue btw_idxs = cur_idxs[1:] - cur_idxs[:-1] counts = [] if input_mask[i, 0] == 0 else [0] counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1]] out.append({"size": [height, width], "counts": counts}) return out def _mask_to_rle_tf(input_mask: "tf.Tensor"): """ Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools. """ # Put in fortran order and flatten height and width batch_size, height, width = input_mask.shape input_mask = flatten(tf.transpose(input_mask, perm=(0, 2, 1)), 1) # Compute change indices diff = input_mask[:, 1:] ^ input_mask[:, :-1] change_indices = tf.where(diff) # Encode run length out = [] for i in range(batch_size): cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1 if len(cur_idxs) == 0: # No changes => either all 0 or all 1 # If the entire mask is 0, RLE is [height*width] or if the entire mask is 1, RLE is [0, height*width]. if input_mask[i, 0] == 0: out.append({"size": [height, width], "counts": [height * width]}) else: out.append({"size": [height, width], "counts": [0, height * width]}) continue btw_idxs = cur_idxs[1:] - cur_idxs[:-1] counts = [] if input_mask[i, 0] == 0 else [0] counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1]] out.append({"size": [height, width], "counts": counts}) return out def _rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: """Compute a binary mask from an uncompressed RLE.""" height, width = rle["size"] mask = np.empty(height * width, dtype=bool) idx = 0 parity = False for count in rle["counts"]: mask[idx : idx + count] = parity idx += count parity = not parity mask = mask.reshape(width, height) return mask.transpose() # Reshape to original shape def _postprocess_for_mg(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7): """ Perform NMS (Non Maximum Suppression) on the outputs. Args: rle_masks (`torch.Tensor`): binary masks in the RLE format iou_scores (`torch.Tensor` of shape (nb_masks, 1)): iou_scores predicted by the model mask_boxes (`torch.Tensor`): The bounding boxes corresponding to segmentation masks amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7): NMS threshold. """ keep_by_nms = batched_nms( boxes=mask_boxes.float(), scores=iou_scores, idxs=torch.zeros(mask_boxes.shape[0]), iou_threshold=amg_crops_nms_thresh, ) iou_scores = iou_scores[keep_by_nms] rle_masks = [rle_masks[i] for i in keep_by_nms] mask_boxes = mask_boxes[keep_by_nms] masks = [_rle_to_mask(rle) for rle in rle_masks] return masks, iou_scores, rle_masks, mask_boxes def _postprocess_for_mg_tf(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7): """ Perform NMS (Non Maximum Suppression) on the outputs. Args: rle_masks (`tf.Tensor`): binary masks in the RLE format iou_scores (`tf.Tensor` of shape (nb_masks, 1)): iou_scores predicted by the model mask_boxes (`tf.Tensor`): The bounding boxes corresponding to segmentation masks amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7): NMS threshold. """ keep_by_nms = tf.image.combined_non_max_suppression( boxes=mask_boxes.float(), scores=iou_scores, idxs=torch.zeros(mask_boxes.shape[0]), iou_threshold=amg_crops_nms_thresh, ) iou_scores = iou_scores[keep_by_nms] rle_masks = [rle_masks[i] for i in keep_by_nms] mask_boxes = mask_boxes[keep_by_nms] masks = [_rle_to_mask(rle) for rle in rle_masks] return masks, iou_scores, rle_masks, mask_boxes __all__ = ["SamImageProcessor"]
transformers/src/transformers/models/sam/image_processing_sam.py/0
{ "file_path": "transformers/src/transformers/models/sam/image_processing_sam.py", "repo_id": "transformers", "token_count": 29707 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes to support Flax Speech-Encoder-Decoder architectures""" import os from typing import Optional, Tuple, Union import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput from ...modeling_flax_utils import FlaxPreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ..auto.configuration_auto import AutoConfig from ..auto.modeling_flax_auto import FlaxAutoModel, FlaxAutoModelForCausalLM from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "SpeechEncoderDecoderConfig" SPEECH_ENCODER_DECODER_START_DOCSTRING = r""" This class can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is loaded via [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`] function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream generative task, like summarization. The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. Additionally, in [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) it is shown how leveraging large pretrained speech models for speech translation yields a significant performance improvement. After such an Speech-Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information). This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Parameters: config ([`SpeechEncoderDecoderConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING = r""" Args: inputs (`jnp.ndarray` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, feature_dim)`, *optional*): Float values of input raw speech waveform or speech features. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `inputs`, either the [`Wav2Vec2Processor`] or [`Speech2TextProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`. decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.decoder.max_position_embeddings - 1]`. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.FlaxSeq2SeqLMOutput`] instead of a plain tuple. """ SPEECH_ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING = r""" Args: inputs (`jnp.ndarray` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, feature_dim)`, *optional*): Float values of input raw speech waveform or speech features. Values can be obtained by loading a *.flac* or *.wav* audio file into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install soundfile*). To prepare the array into *inputs*, either the [`Wav2Vec2Processor`] or [`Speech2TextProcessor`] should be used for padding and conversion into a tensor of type *torch.FloatTensor*. attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.FlaxBaseModelOutput`] instead of a plain tuple. """ SPEECH_ENCODER_DECODER_DECODE_INPUTS_DOCSTRING = r""" Args: decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`. encoder_outputs (`tuple(tuple(jnp.ndarray)`): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.decoder.max_position_embeddings - 1]`. past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.FlaxCausalLMOutputWithCrossAttentions`] instead of a plain tuple. """ class FlaxSpeechEncoderDecoderModule(nn.Module): config: SpeechEncoderDecoderConfig dtype: jnp.dtype = jnp.float32 def setup(self): encoder_config = self.config.encoder decoder_config = self.config.decoder # Copied from `modeling_hybrid_clip.py` with modifications. from ...models.auto.modeling_flax_auto import FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_MAPPING encoder_module = FLAX_MODEL_MAPPING[encoder_config.__class__].module_class decoder_module = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING[decoder_config.__class__].module_class self.encoder = encoder_module(encoder_config, dtype=self.dtype) self.decoder = decoder_module(decoder_config, dtype=self.dtype) # encoder outputs might need to be projected to different dimension for decoder if ( self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None ): self.enc_to_dec_proj = nn.Dense( self.decoder.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.decoder.config.initializer_range), dtype=self.dtype, ) else: self.enc_to_dec_proj = None def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.encoder.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.encoder.conv_kernel, self.config.encoder.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.encoder.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.encoder.adapter_stride) return input_lengths def _get_encoder_module(self): return self.encoder def _get_projection_module(self): return self.enc_to_dec_proj def _get_decoder_module(self): return self.decoder def __call__( self, inputs, attention_mask, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_outputs=None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, freeze_feature_encoder: bool = False, ): if encoder_outputs is None: encoder_outputs = self.encoder( inputs, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, freeze_feature_encoder=freeze_feature_encoder, ) encoder_hidden_states = encoder_outputs[0] # optionally project encoder_hidden_states if self.enc_to_dec_proj is not None: encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) # compute correct encoder attention mask if attention_mask is not None: encoder_attention_mask = self.encoder._get_feature_vector_attention_mask( encoder_hidden_states.shape[1], attention_mask ) else: encoder_attention_mask = None # flax script modeling_flax_wav2vec2.py decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqLMOutput( logits=decoder_outputs.logits, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_hidden_states, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(SPEECH_ENCODER_DECODER_START_DOCSTRING) class FlaxSpeechEncoderDecoderModel(FlaxPreTrainedModel): r""" [`FlaxSpeechEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with the module (flax.nn.Module) of one of the base model classes of the library as encoder module and another one as decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method for the encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder. """ config_class = SpeechEncoderDecoderConfig base_model_prefix: str = "speech_encoder_decoder" module_class = FlaxSpeechEncoderDecoderModule def __init__( self, config: SpeechEncoderDecoderConfig, input_shape: Optional[Tuple] = None, seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): if not _do_init: raise ValueError( "`FlaxSpeechEncoderDecoderModel` cannot be created without initializing, `_do_init` must be `True`." ) if config.decoder.cross_attention_hidden_size is not None: # Raise ValueError or option to project enc to dec hidden_size (eg EncAdapterLayer) if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: raise ValueError( "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal" f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for" f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for" " `config.encoder.hidden_size`." ) # make sure input & output embeddings are not tied config.tie_word_embeddings = False module = self.module_class(config=config, dtype=dtype, **kwargs) if input_shape is None: # speech encoders almost always downsample the sequence length dimension encoder_input_length = 1024 decoder_input_length = module._get_feat_extract_output_lengths(encoder_input_length) input_shape = ((1, encoder_input_length), (1, decoder_input_length)) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: encoder_input_shape, decoder_input_shape = input_shape # init input DeviceArrays inputs = jnp.zeros(encoder_input_shape, dtype="f4") attention_mask = jnp.ones_like(inputs, dtype="i4") decoder_input_ids = jnp.zeros(decoder_input_shape, dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) batch_size, sequence_length = inputs.shape decoder_batch_size, decoder_sequence_length = decoder_input_ids.shape if not decoder_batch_size == batch_size: raise ValueError( f"The inputs of encoder and decoder should have the same batch size, but got {batch_size} for encoder" f" and {decoder_batch_size} for decoder." ) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_sequence_length)[None, :], (decoder_batch_size, decoder_sequence_length) ) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, inputs, attention_mask, decoder_input_ids, decoder_attention_mask, decoder_position_ids, )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length, encoder_outputs): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. """ # init input variables to retrieve cache decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to( jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape ) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, **kwargs, ) init_variables = self.module.init( jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward, # we only need to call the decoder to init the cache ) return unfreeze(init_variables["cache"]) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): return self.module._get_feat_extract_output_lengths(input_lengths, add_adapter=add_adapter) @add_start_docstrings(SPEECH_ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=_CONFIG_FOR_DOC) def encode( self, inputs: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, freeze_feature_encoder: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import FlaxSpeechEncoderDecoderModel >>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized >>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( ... "facebook/wav2vec2-large-lv60", "facebook/bart-large" ... ) >>> inputs = jnp.ones((2, 5000), dtype=jnp.float32) >>> encoder_outputs = model.encode(inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(inputs, dtype="i4") # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _encoder_forward(module, inputs, attention_mask, **kwargs): encode_module = module._get_encoder_module() return encode_module(inputs, attention_mask, **kwargs) outputs = self.module.apply( {"params": params or self.params}, inputs=jnp.array(inputs, dtype="f4"), attention_mask=jnp.array(attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, freeze_feature_encoder=freeze_feature_encoder, rngs=rngs, method=_encoder_forward, ) if return_dict: outputs = FlaxBaseModelOutput( last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) return outputs @add_start_docstrings(SPEECH_ENCODER_DECODER_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: dict = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import FlaxSpeechEncoderDecoderModel >>> import jax.numpy as jnp >>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized >>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( ... "facebook/wav2vec2-large-lv60", "facebook/bart-large" ... ) >>> inputs = jnp.ones((2, 5000), dtype=jnp.float32) >>> encoder_outputs = model.encode(inputs) >>> decoder_start_token_id = model.config.decoder.bos_token_id >>> decoder_input_ids = jnp.ones((inputs.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng params = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxBartAttention module if past_key_values: params["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward( module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states, **kwargs ): projection_module = module._get_projection_module() decoder_module = module._get_decoder_module() # optionally project encoder_hidden_states if projection_module is not None: encoder_hidden_states = projection_module(encoder_hidden_states) return decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states=encoder_hidden_states, **kwargs, ) outputs = self.module.apply( params, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past = outputs outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past = outputs outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs @add_start_docstrings_to_model_forward(SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def __call__( self, inputs: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, decoder_input_ids: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, freeze_feature_encoder: bool = False, params: dict = None, dropout_rng: PRNGKey = None, ): r""" Returns: Examples: ```python >>> from transformers import FlaxSpeechEncoderDecoderModel, AutoTokenizer >>> # load a fine-tuned wav2vec2-2-bart model >>> model = FlaxSpeechEncoderDecoderModel.from_pretrained("patrickvonplaten/wav2vec2-2-bart-large") >>> # load output tokenizer >>> tokenizer_output = AutoTokenizer.from_pretrained("facebook/bart-large") >>> inputs = jnp.ones((2, 5000), dtype=jnp.float32) >>> # use bart's special bos, pad and eos tokens >>> model.config.decoder_start_token_id = model.decoder.config.bos_token_id >>> model.config.pad_token_id = model.decoder.config.pad_token_id >>> model.config.eos_token_id = model.decoder.config.eos_token_id >>> outputs = model.generate(inputs) # Assert something? More interesting input? dtype correct? ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(inputs, dtype="i4") # prepare decoder inputs if decoder_input_ids is None: raise ValueError( "`decoder_input_ids` cannot be `None`. For sequence to sequence training, `decoder_position_ids` must" " be specified as an input argument." ) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) if decoder_position_ids is None: batch_size, sequence_length = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, inputs=jnp.array(inputs, dtype="f4"), attention_mask=jnp.array(attention_mask, dtype="i4"), decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, freeze_feature_encoder=freeze_feature_encoder, rngs=rngs, ) def prepare_inputs_for_generation( self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array] = None, decoder_attention_mask: Optional[jax.Array] = None, encoder_outputs=None, **kwargs, ): # initializing the cache batch_size, seq_length = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) # Note that usually one would have to put 0's in the attention_mask for x > input.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if decoder_attention_mask is not None: decoder_position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: decoder_position_ids = jnp.broadcast_to( jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length) ) return { "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "encoder_attention_mask": attention_mask, "decoder_attention_mask": extended_attention_mask, "decoder_position_ids": decoder_position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1 return model_kwargs @classmethod def from_encoder_decoder_pretrained( cls, encoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, decoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, *model_args, **kwargs, ) -> FlaxPreTrainedModel: r""" Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model checkpoints. Params: encoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*): Information necessary to initiate the encoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. decoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*, defaults to `None`): Information necessary to initiate the decoder. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. model_args (remaining positional arguments, *optional*): All remaning positional arguments will be passed to the underlying model's `__init__` method. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter. - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import FlaxSpeechEncoderDecoderModel >>> # initialize a wav2vec2-2-bart from pretrained wav2vec2 and bart models. Note that the cross-attention layers will be randomly initialized >>> model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained( ... "facebook/wav2vec2-large-lv60", "facebook/bart-large" ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./wav2vec2-2-bart-large") >>> # load fine-tuned model >>> model = FlaxSpeechEncoderDecoderModel.from_pretrained("./wav2vec2-2-bart-large") ```""" kwargs_encoder = { argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") } kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } # remove encoder, decoder kwargs from kwargs for key in kwargs_encoder.keys(): del kwargs["encoder_" + key] for key in kwargs_decoder.keys(): del kwargs["decoder_" + key] # Load and initialize the encoder and decoder # The distinction between encoder and decoder at the model level is made # by the value of the flag `is_decoder` that we need to set correctly. encoder = kwargs_encoder.pop("model", None) if encoder is None: if encoder_pretrained_model_name_or_path is None: raise ValueError( "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_encoder: encoder_config, kwargs_encoder = AutoConfig.from_pretrained( encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True ) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info( f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model " "from a decoder model. Cross-attention and casual mask are disabled." ) encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_encoder["config"] = encoder_config encoder = FlaxAutoModel.from_pretrained( encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder ) decoder = kwargs_decoder.pop("model", None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError( "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has " "to be defined." ) if "config" not in kwargs_decoder: decoder_config, kwargs_decoder = AutoConfig.from_pretrained( decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True ) if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info( f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention" f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if" f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." ) decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder["config"] = decoder_config if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False: logger.warning( f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. " f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, " "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` " "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a " "`decoder_config` to `.from_encoder_decoder_pretrained(...)`" ) decoder = FlaxAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) # instantiate config with corresponding kwargs dtype = kwargs.pop("dtype", jnp.float32) config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) # make sure input & output word embeddings are not tied config.tie_word_embeddings = False # init model model = cls(config, dtype=dtype) model.params["encoder"] = encoder.params model.params["decoder"] = decoder.params return model __all__ = ["FlaxSpeechEncoderDecoderModel"]
transformers/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py/0
{ "file_path": "transformers/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py", "repo_id": "transformers", "token_count": 18916 }
# coding=utf-8 # Copyright 2023 The Fairseq Authors, Microsoft Research, and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Number Normalizer class for SpeechT5.""" import re class EnglishNumberNormalizer: def __init__(self): self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] self.teens = [ "", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", ] self.tens = ["", "ten", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"] self.thousands = [ "", "thousand", "million", "billion", "trillion", "quadrillion", "quintillion", "sextillion", "septillion", "octillion", "nonillion", "decillion", ] # Define a dictionary to map currency symbols to their names # Top most traded currencies according to # https://en.wikipedia.org/wiki/Template:Most_traded_currencies self.currency_symbols = { "$": " dollars", "€": " euros", "£": " pounds", "¢": " cents", "¥": " japanese yen", "﷼": " saudi riyal", "₹": " indian rupees", "₽": " russian rubles", "฿": " thai baht", "₺": " turkish liras", "₴": " ukrainian hryvnia", "₣": " swiss francs", "₡": " costa rican colon", "₱": " philippine peso", "₪": " israeli shekels", "₮": " mongolian tögrög", "₩": " south korean won", "₦": " nigerian naira", "₫": " vietnamese Đồng", } def spell_number(self, num): if num == 0: return "zero" parts = [] for i in range(0, len(self.thousands)): if num % 1000 != 0: part = "" hundreds = num % 1000 // 100 tens_units = num % 100 if hundreds > 0: part += self.ones[hundreds] + " hundred" if tens_units > 0: part += " and " if tens_units > 10 and tens_units < 20: part += self.teens[tens_units - 10] else: tens_digit = self.tens[tens_units // 10] ones_digit = self.ones[tens_units % 10] if tens_digit: part += tens_digit if ones_digit: if tens_digit: part += " " part += ones_digit parts.append(part) num //= 1000 return " ".join(reversed(parts)) def convert(self, number): """ Converts an individual number passed in string form to spelt-out form """ if "." in number: integer_part, decimal_part = number.split(".") else: integer_part, decimal_part = number, "00" # Extract currency symbol if present currency_symbol = "" for symbol, name in self.currency_symbols.items(): if integer_part.startswith(symbol): currency_symbol = name integer_part = integer_part[len(symbol) :] break if integer_part.startswith("-"): if integer_part[1:].startswith(symbol): currency_symbol = name integer_part = "-" + integer_part[len(symbol) + 1 :] break # Extract 'minus' prefix for negative numbers minus_prefix = "" if integer_part.startswith("-"): minus_prefix = "minus " integer_part = integer_part[1:] elif integer_part.startswith("minus"): minus_prefix = "minus " integer_part = integer_part[len("minus") :] percent_suffix = "" if "%" in integer_part or "%" in decimal_part: percent_suffix = " percent" integer_part = integer_part.replace("%", "") decimal_part = decimal_part.replace("%", "") integer_part = integer_part.zfill(3 * ((len(integer_part) - 1) // 3 + 1)) parts = [] for i in range(0, len(integer_part), 3): chunk = int(integer_part[i : i + 3]) if chunk > 0: part = self.spell_number(chunk) unit = self.thousands[len(integer_part[i:]) // 3 - 1] if unit: part += " " + unit parts.append(part) spelled_integer = " ".join(parts) # Format the spelt-out number based on conditions, such as: # If it has decimal parts, currency symbol, minus prefix, etc if decimal_part == "00": return ( f"{minus_prefix}{spelled_integer}{percent_suffix}{currency_symbol}" if minus_prefix or currency_symbol else f"{spelled_integer}{percent_suffix}" ) else: spelled_decimal = " ".join([self.spell_number(int(digit)) for digit in decimal_part]) return ( f"{minus_prefix}{spelled_integer} point {spelled_decimal}{percent_suffix}{currency_symbol}" if minus_prefix or currency_symbol else f"{minus_prefix}{spelled_integer} point {spelled_decimal}{percent_suffix}" ) def __call__(self, text): """ Convert numbers / number-like quantities in a string to their spelt-out counterparts """ # Form part of the pattern for all currency symbols pattern = r"(?<!\w)(-?\$?\€?\£?\¢?\¥?\₹?\₽?\฿?\₺?\₴?\₣?\₡?\₱?\₪?\₮?\₩?\₦?\₫?\﷼?\d+(?:\.\d{1,2})?%?)(?!\w)" # Find and replace commas in numbers (15,000 -> 15000, etc) text = re.sub(r"(\d+,\d+)", lambda match: match.group(1).replace(",", ""), text) # Use regex to find and replace numbers in the text converted_text = re.sub(pattern, lambda match: self.convert(match.group(1)), text) converted_text = re.sub(" +", " ", converted_text) return converted_text
transformers/src/transformers/models/speecht5/number_normalizer.py/0
{ "file_path": "transformers/src/transformers/models/speecht5/number_normalizer.py", "repo_id": "transformers", "token_count": 3534 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert SwiftFormer checkpoints from the original implementation.""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) device = torch.device("cpu") # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im def get_expected_output(swiftformer_name): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01]) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01]) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02]) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02]) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def create_rename_keys(state_dict): rename_keys = [] for k in state_dict.keys(): k_new = k if ".pwconv" in k: k_new = k_new.replace(".pwconv", ".point_wise_conv") if ".dwconv" in k: k_new = k_new.replace(".dwconv", ".depth_wise_conv") if ".Proj." in k: k_new = k_new.replace(".Proj.", ".proj.") if "patch_embed" in k_new: k_new = k_new.replace("patch_embed", "swiftformer.patch_embed.patch_embedding") if "network" in k_new: ls = k_new.split(".") if ls[2].isdigit(): k_new = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:]) else: k_new = k_new.replace("network", "swiftformer.encoder.network") rename_keys.append((k, k_new)) return rename_keys @torch.no_grad() def convert_swiftformer_checkpoint(swiftformer_name, pytorch_dump_folder_path, original_ckpt): """ Copy/paste/tweak model's weights to our SwiftFormer structure. """ # define default SwiftFormer configuration config = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size config.num_labels = 1000 repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": config.depths = [3, 3, 6, 4] config.embed_dims = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": config.depths = [3, 3, 9, 6] config.embed_dims = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": config.depths = [4, 3, 10, 5] config.embed_dims = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": config.depths = [4, 4, 12, 6] config.embed_dims = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https"): checkpoint = torch.hub.load_state_dict_from_url(original_ckpt, map_location="cpu", check_hash=True) else: checkpoint = torch.load(original_ckpt, map_location="cpu") state_dict = checkpoint rename_keys = create_rename_keys(state_dict) for rename_key_src, rename_key_dest in rename_keys: rename_key(state_dict, rename_key_src, rename_key_dest) # load HuggingFace model hf_model = SwiftFormerForImageClassification(config).eval() hf_model.load_state_dict(state_dict) # prepare test inputs image = prepare_img() processor = ViTImageProcessor.from_pretrained("preprocessor_config") inputs = processor(images=image, return_tensors="pt") # compare outputs from both models timm_logits = get_expected_output(swiftformer_name) hf_logits = hf_model(inputs["pixel_values"]).logits assert hf_logits.shape == torch.Size([1, 1000]) assert torch.allclose(hf_logits[0, 0:5], timm_logits, atol=1e-3) Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}") hf_model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") args = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
transformers/src/transformers/models/swiftformer/convert_swiftformer_original_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/swiftformer/convert_swiftformer_original_to_hf.py", "repo_id": "transformers", "token_count": 2556 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Swinv2 checkpoints from the timm library.""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, Swinv2Config, Swinv2ForImageClassification def get_swinv2_config(swinv2_name): config = Swinv2Config() name_split = swinv2_name.split("_") model_size = name_split[1] if "to" in name_split[3]: img_size = int(name_split[3][-3:]) else: img_size = int(name_split[3]) if "to" in name_split[2]: window_size = int(name_split[2][-2:]) else: window_size = int(name_split[2][6:]) if model_size == "tiny": embed_dim = 96 depths = (2, 2, 6, 2) num_heads = (3, 6, 12, 24) elif model_size == "small": embed_dim = 96 depths = (2, 2, 18, 2) num_heads = (3, 6, 12, 24) elif model_size == "base": embed_dim = 128 depths = (2, 2, 18, 2) num_heads = (4, 8, 16, 32) else: embed_dim = 192 depths = (2, 2, 18, 2) num_heads = (6, 12, 24, 48) if "to" in swinv2_name: config.pretrained_window_sizes = (12, 12, 12, 6) if ("22k" in swinv2_name) and ("to" not in swinv2_name): num_classes = 21841 repo_id = "huggingface/label-files" filename = "imagenet-22k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} else: num_classes = 1000 repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} config.image_size = img_size config.num_labels = num_classes config.embed_dim = embed_dim config.depths = depths config.num_heads = num_heads config.window_size = window_size return config def rename_key(name): if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: name = name.replace("patch_embed.norm", "embeddings.norm") if "layers" in name: name = "encoder." + name if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name: name = name.replace("attn", "attention.self") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "q_bias" in name: name = name.replace("q_bias", "query.bias") if "k_bias" in name: name = name.replace("k_bias", "key.bias") if "v_bias" in name: name = name.replace("v_bias", "value.bias") if "cpb_mlp" in name: name = name.replace("cpb_mlp", "continuous_position_bias_mlp") if name == "norm.weight": name = "layernorm.weight" if name == "norm.bias": name = "layernorm.bias" if "head" in name: name = name.replace("head", "classifier") else: name = "swinv2." + name return name def convert_state_dict(orig_state_dict, model): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if "mask" in key: continue elif "qkv" in key: key_split = key.split(".") layer_num = int(key_split[1]) block_num = int(key_split[3]) dim = model.swinv2.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: orig_state_dict[ f"swinv2.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight" ] = val[:dim, :] orig_state_dict[f"swinv2.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"] = ( val[dim : dim * 2, :] ) orig_state_dict[ f"swinv2.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight" ] = val[-dim:, :] else: orig_state_dict[f"swinv2.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"] = ( val[:dim] ) orig_state_dict[f"swinv2.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"] = val[ dim : dim * 2 ] orig_state_dict[f"swinv2.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"] = ( val[-dim:] ) else: orig_state_dict[rename_key(key)] = val return orig_state_dict def convert_swinv2_checkpoint(swinv2_name, pytorch_dump_folder_path): timm_model = timm.create_model(swinv2_name, pretrained=True) timm_model.eval() config = get_swinv2_config(swinv2_name) model = Swinv2ForImageClassification(config) model.eval() new_state_dict = convert_state_dict(timm_model.state_dict(), model) model.load_state_dict(new_state_dict) url = "http://images.cocodataset.org/val2017/000000039769.jpg" image_processor = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinv2_name.replace("_", "-"))) image = Image.open(requests.get(url, stream=True).raw) inputs = image_processor(images=image, return_tensors="pt") timm_outs = timm_model(inputs["pixel_values"]) hf_outs = model(**inputs).logits assert torch.allclose(timm_outs, hf_outs, atol=1e-3) print(f"Saving model {swinv2_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) model.push_to_hub( repo_path_or_name=Path(pytorch_dump_folder_path, swinv2_name), organization="nandwalritik", commit_message="Add model", ) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swinv2_name", default="swinv2_tiny_patch4_window8_256", type=str, help="Name of the Swinv2 timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) args = parser.parse_args() convert_swinv2_checkpoint(args.swinv2_name, args.pytorch_dump_folder_path)
transformers/src/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py", "repo_id": "transformers", "token_count": 3497 }
# coding=utf-8 # Copyright 2018 T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for model T5.""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...convert_slow_tokenizer import import_protobuf from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} # TODO(PVP) - this should be removed in Transformers v5 SPIECE_UNDERLINE = "▁" class T5Tokenizer(PreTrainedTokenizer): """ Construct a T5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. extra_ids (`int`, *optional*, defaults to 100): Add a number of extra ids added to the vocabulary for use as sentinels. These tokens are accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. These tokens can be retrieved by calling get_sentinel_tokens method and token ids can be by calling get_sentinel_token_ids method additional_special_tokens (`List[str]`, *optional*): Additional special tokens used by the tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. legacy (`bool`, *optional*): Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622 and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple example: - `legacy=True`: ```python >>> from transformers import T5Tokenizer >>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=True) >>> tokenizer.encode("Hello <extra_id_0>.") [8774, 32099, 3, 5, 1] ``` - `legacy=False`: ```python >>> from transformers import T5Tokenizer >>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=False) >>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here [8774, 32099, 5, 1] ``` Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, eos_token="</s>", unk_token="<unk>", pad_token="<pad>", extra_ids=100, additional_special_tokens=None, sp_model_kwargs: Optional[Dict[str, Any]] = None, legacy=None, add_prefix_space=True, **kwargs, ) -> None: pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.vocab_file = vocab_file self._extra_ids = extra_ids self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) if additional_special_tokens is not None: extra_tokens = [x for x in additional_special_tokens if "<extra_id_" in str(x)] if len(extra_tokens) < 1: additional_special_tokens += [f"<extra_id_{i}>" for i in range(extra_ids)] elif extra_ids > 0 and extra_ids != len(extra_tokens): raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids" " tokens" ) else: extra_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)] additional_special_tokens = extra_tokens # for legacy purpose, we keep this. Will be removed and tests updated. (when `added_tokens_decoder` is not passed as kwargs) self._added_tokens_decoder = {} for i in range(len(extra_tokens)): self._added_tokens_decoder[len(self.sp_model) - 1 + extra_ids - i] = AddedToken( f"<extra_id_{i}>", single_word=False, lstrip=True, rstrip=True, special=True, normalized=False ) if legacy is None: logger.warning_once( f"You are using the default legacy behaviour of the {self.__class__}. This is" " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you." " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it" " means, and thoroughly read the reason why this was added as explained in" " https://github.com/huggingface/transformers/pull/24565" ) legacy = True self.legacy = legacy self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False)) self.vocab_file = vocab_file self._extra_ids = extra_ids self.add_prefix_space = add_prefix_space super().__init__( eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=extra_ids, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, legacy=legacy, add_prefix_space=add_prefix_space, **kwargs, ) # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor def get_spm_processor(self, from_slow=False): tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs) if self.legacy or from_slow: # no dependency on protobuf tokenizer.Load(self.vocab_file) return tokenizer with open(self.vocab_file, "rb") as f: sp_model = f.read() model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)") model = model_pb2.ModelProto.FromString(sp_model) normalizer_spec = model_pb2.NormalizerSpec() normalizer_spec.add_dummy_prefix = False model.normalizer_spec.MergeFrom(normalizer_spec) sp_model = model.SerializeToString() tokenizer.LoadFromSerializedProto(sp_model) return tokenizer @staticmethod def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length): if pretrained_model_name_or_path in T5Tokenizer.max_model_input_sizes: deprecated_max_model_length = T5Tokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( "This tokenizer was incorrectly instantiated with a model max length of" f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" " behavior is kept to avoid breaking backwards compatibility when padding/encoding with" " `truncation is True`.\n- Be aware that you SHOULD NOT rely on" f" {pretrained_model_name_or_path} automatically truncating your input to" f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please" " instantiate this tokenizer with `model_max_length` set to your preferred value.", FutureWarning, ) return max_model_length @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) # normal case: some special tokens if token_ids_1 is None: return ([0] * len(token_ids_0)) + [1] return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def get_sentinel_tokens(self): return list( set(filter(lambda x: bool(re.search(r"<extra_id_\d+>", x)) is not None, self.additional_special_tokens)) ) def get_sentinel_token_ids(self): return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()] def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]: """Do not add eos again if user already added it.""" if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ eos = [self.eos_token_id] if token_ids_1 is None: return len(token_ids_0 + eos) * [0] return len(token_ids_0 + eos + token_ids_1 + eos) * [0] def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: `X </s>` - pair of sequences: `A </s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ token_ids_0 = self._add_eos_if_not_present(token_ids_0) if token_ids_1 is None: return token_ids_0 else: token_ids_1 = self._add_eos_if_not_present(token_ids_1) return token_ids_0 + token_ids_1 def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def tokenize(self, text: "TextInput", **kwargs) -> List[str]: """ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the first token is special. """ if self.legacy or len(text) == 0: return super().tokenize(text, **kwargs) text = text.replace(SPIECE_UNDERLINE, " ") if self.add_prefix_space: text = SPIECE_UNDERLINE + text tokens = super().tokenize(text, **kwargs) if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens: tokens = tokens[1:] return tokens @property def unk_token_length(self): return len(self.sp_model.encode(str(self.unk_token))) def _tokenize(self, text, **kwargs): """ Returns a tokenized string. We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`. `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`. """ if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")): return self.sp_model.encode(text, out_type=str) # 1. Encode string + prefix ex: "<unk> Hey" tokens = self.sp_model.encode(self.unk_token + text, out_type=str) # 2. Remove self.unk_token from ['<','unk','>', '▁Hey'] return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" # since we manually add the prefix space, we have to remove it when decoding if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space: tokens[0] = tokens[0][1:] current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) __all__ = ["T5Tokenizer"]
transformers/src/transformers/models/t5/tokenization_t5.py/0
{ "file_path": "transformers/src/transformers/models/t5/tokenization_t5.py", "repo_id": "transformers", "token_count": 8696 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """Tokenization classes for UDOP model.""" import os from shutil import copyfile from typing import Dict, List, Optional, Tuple, Union from ...tokenization_utils_base import ( BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, TensorType, add_end_docstrings, is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_udop import UdopTokenizer else: UdopTokenizer = None VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} logger = logging.get_logger(__name__) UDOP_ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **bbox** -- List of bounding boxes to be fed to a model. - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified). - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`). """ class UdopTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" UDOP tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from [`LayoutXLMTokenizer`] and [`T5Tokenizer`]. Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): Path to the vocabulary file. tokenizer_file (`str`, *optional*): Path to the tokenizer file. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = UdopTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, eos_token="</s>", sep_token="</s>", unk_token="<unk>", pad_token="<pad>", sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, additional_special_tokens=None, **kwargs, ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, eos_token=eos_token, sep_token=sep_token, unk_token=unk_token, pad_token=pad_token, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, additional_special_tokens=additional_special_tokens, **kwargs, ) self.vocab_file = vocab_file # additional properties self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, text_pair_target: Optional[ Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] ] = None, **kwargs, ) -> BatchEncoding: if text is None and text_target is None: raise ValueError("You need to specify either `text` or `text_target`.") if text is not None: # The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the # input mode in this case. if not self._in_target_context_manager: self._switch_to_input_mode() encodings = self.call_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, **kwargs) if text_target is not None: self._switch_to_target_mode() target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **kwargs) # Leave back tokenizer in input mode self._switch_to_input_mode() if text_target is None: return encodings elif text is None: return target_encodings else: encodings["labels"] = target_encodings["input_ids"] return encodings @add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING) def call_boxes( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus_boxes( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus_boxes( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Copied from transformers.models.layoutxlm.tokenization_layoutxlm_fast.LayoutXLMTokenizerFast.tokenize def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]: batched_input = [(text, pair)] if pair else [text] self._tokenizer.encode_special_tokens = kwargs.pop( "split_special_tokens", self._tokenizer.encode_special_tokens ) encodings = self._tokenizer.encode_batch( batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs ) return encodings[0].tokens def batch_encode_plus_boxes( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`): Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in `encode_plus`). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus_boxes( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus_boxes( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, list): raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})") # Set the truncation and padding strategy and restore the initial configuration self.set_truncation_and_padding( padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, ) if is_pair: batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs] encodings = self._tokenizer.encode_batch( batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs ) # Convert encoding to dict # `Tokens` has type: Tuple[ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]], # List[EncodingFast] # ] # with nested dimensions corresponding to batch, overflows, sequence length tokens_and_encodings = [ self._convert_encoding( encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if word_labels is not None else return_offsets_mapping, # we use offsets to create the labels return_length=return_length, verbose=verbose, ) for encoding in encodings ] # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length) # (we say ~ because the number of overflow varies with the example in the batch) # # To match each overflowing sample with the original sample in the batch # we add an overflow_to_sample_mapping array (see below) sanitized_tokens = {} for key in tokens_and_encodings[0][0].keys(): stack = [e for item, _ in tokens_and_encodings for e in item[key]] sanitized_tokens[key] = stack sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] # If returning overflowing tokens, we need to return a mapping # from the batch idx to the original sample if return_overflowing_tokens: overflow_to_sample_mapping = [] for i, (toks, _) in enumerate(tokens_and_encodings): overflow_to_sample_mapping += [i] * len(toks["input_ids"]) sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping for input_ids in sanitized_tokens["input_ids"]: self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) # create the token boxes token_boxes = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index token_boxes_example = [] for id, sequence_id, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if is_pair and sequence_id == 0: token_boxes_example.append(self.pad_token_box) else: token_boxes_example.append(boxes[original_index][word_id]) else: if id == self.sep_token_id: token_boxes_example.append(self.sep_token_box) elif id == self.pad_token_id: token_boxes_example.append(self.pad_token_box) else: raise ValueError("Id not recognized") token_boxes.append(token_boxes_example) sanitized_tokens["bbox"] = token_boxes # optionally, create the labels if word_labels is not None: labels = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index labels_example = [] previous_token_empty = False for id, offset, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_tokens["offset_mapping"][batch_index], sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if self.only_label_first_subword: if offset[0] == 0 and not previous_token_empty: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) else: labels_example.append(word_labels[original_index][word_id]) if self.decode(id) == "": previous_token_empty = True else: previous_token_empty = False else: labels_example.append(self.pad_token_label) labels.append(labels_example) sanitized_tokens["labels"] = labels # finally, remove offsets if the user didn't want them if not return_offsets_mapping: del sanitized_tokens["offset_mapping"] return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) def _encode_plus_boxes( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[bool] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # make it a batched input # 2 options: # 1) only text, in case text must be a list of str # 2) text + text_pair, in which case text = str and text_pair a list of str batched_input = [(text, text_pair)] if text_pair else [text] batched_boxes = [boxes] batched_word_labels = [word_labels] if word_labels is not None else None batched_output = self._batch_encode_plus_boxes( batched_input, is_pair=bool(text_pair is not None), boxes=batched_boxes, word_labels=batched_word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Return tensor is None, then we can remove the leading batch axis # Overflowing tokens are returned as a batch of output so we keep them in this case if return_tensors is None and not return_overflowing_tokens: batched_output = BatchEncoding( { key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items() }, batched_output.encodings, ) self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose) return batched_output def encode_boxes( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> List[int]: """ Args: Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`. text (`str`, `List[str]` or `List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). """ encoded_inputs = self.encode_plus_boxes( text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, return_tensors=return_tensors, **kwargs, ) return encoded_inputs["input_ids"] def encode_plus_boxes( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: text (`str`, `List[str]` or (for non-fast tokenizers) `List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus_boxes( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Copied from transformers.models.layoutxlm.tokenization_layoutxlm_fast.LayoutXLMTokenizerFast._pad def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[bool] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) padding_side = padding_side if padding_side is not None else self.padding_side if padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(padding_side)) return encoded_inputs def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return token_ids_0 + [self.sep_token_id] sep = [self.sep_token_id] return token_ids_0 + sep + token_ids_1 + sep def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] return len(token_ids_0 + sep + token_ids_1 + sep) * [0] # Copied from transformers.models.layoutxlm.tokenization_layoutxlm_fast.LayoutXLMTokenizerFast.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) __all__ = ["UdopTokenizerFast"]
transformers/src/transformers/models/udop/tokenization_udop_fast.py/0
{ "file_path": "transformers/src/transformers/models/udop/tokenization_udop_fast.py", "repo_id": "transformers", "token_count": 22524 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from transformers import UnivNetConfig, UnivNetModel, logging logging.set_verbosity_info() logger = logging.get_logger("transformers.models.univnet") def get_kernel_predictor_key_mapping(config: UnivNetConfig, old_prefix: str = "", new_prefix: str = ""): mapping = {} # Initial conv layer mapping[f"{old_prefix}.input_conv.0.weight_g"] = f"{new_prefix}.input_conv.weight_g" mapping[f"{old_prefix}.input_conv.0.weight_v"] = f"{new_prefix}.input_conv.weight_v" mapping[f"{old_prefix}.input_conv.0.bias"] = f"{new_prefix}.input_conv.bias" # Kernel predictor resnet blocks for i in range(config.kernel_predictor_num_blocks): mapping[f"{old_prefix}.residual_convs.{i}.1.weight_g"] = f"{new_prefix}.resblocks.{i}.conv1.weight_g" mapping[f"{old_prefix}.residual_convs.{i}.1.weight_v"] = f"{new_prefix}.resblocks.{i}.conv1.weight_v" mapping[f"{old_prefix}.residual_convs.{i}.1.bias"] = f"{new_prefix}.resblocks.{i}.conv1.bias" mapping[f"{old_prefix}.residual_convs.{i}.3.weight_g"] = f"{new_prefix}.resblocks.{i}.conv2.weight_g" mapping[f"{old_prefix}.residual_convs.{i}.3.weight_v"] = f"{new_prefix}.resblocks.{i}.conv2.weight_v" mapping[f"{old_prefix}.residual_convs.{i}.3.bias"] = f"{new_prefix}.resblocks.{i}.conv2.bias" # Kernel output conv mapping[f"{old_prefix}.kernel_conv.weight_g"] = f"{new_prefix}.kernel_conv.weight_g" mapping[f"{old_prefix}.kernel_conv.weight_v"] = f"{new_prefix}.kernel_conv.weight_v" mapping[f"{old_prefix}.kernel_conv.bias"] = f"{new_prefix}.kernel_conv.bias" # Bias output conv mapping[f"{old_prefix}.bias_conv.weight_g"] = f"{new_prefix}.bias_conv.weight_g" mapping[f"{old_prefix}.bias_conv.weight_v"] = f"{new_prefix}.bias_conv.weight_v" mapping[f"{old_prefix}.bias_conv.bias"] = f"{new_prefix}.bias_conv.bias" return mapping def get_key_mapping(config: UnivNetConfig): mapping = {} # NOTE: inital conv layer keys are the same # LVC Residual blocks for i in range(len(config.resblock_stride_sizes)): # LVCBlock initial convt layer mapping[f"res_stack.{i}.convt_pre.1.weight_g"] = f"resblocks.{i}.convt_pre.weight_g" mapping[f"res_stack.{i}.convt_pre.1.weight_v"] = f"resblocks.{i}.convt_pre.weight_v" mapping[f"res_stack.{i}.convt_pre.1.bias"] = f"resblocks.{i}.convt_pre.bias" # Kernel predictor kernel_predictor_mapping = get_kernel_predictor_key_mapping( config, old_prefix=f"res_stack.{i}.kernel_predictor", new_prefix=f"resblocks.{i}.kernel_predictor" ) mapping.update(kernel_predictor_mapping) # LVC Residual blocks for j in range(len(config.resblock_dilation_sizes[i])): mapping[f"res_stack.{i}.conv_blocks.{j}.1.weight_g"] = f"resblocks.{i}.resblocks.{j}.conv.weight_g" mapping[f"res_stack.{i}.conv_blocks.{j}.1.weight_v"] = f"resblocks.{i}.resblocks.{j}.conv.weight_v" mapping[f"res_stack.{i}.conv_blocks.{j}.1.bias"] = f"resblocks.{i}.resblocks.{j}.conv.bias" # Output conv layer mapping["conv_post.1.weight_g"] = "conv_post.weight_g" mapping["conv_post.1.weight_v"] = "conv_post.weight_v" mapping["conv_post.1.bias"] = "conv_post.bias" return mapping def rename_state_dict(state_dict, keys_to_modify, keys_to_remove): model_state_dict = {} for key, value in state_dict.items(): if key in keys_to_remove: continue if key in keys_to_modify: new_key = keys_to_modify[key] model_state_dict[new_key] = value else: model_state_dict[key] = value return model_state_dict def convert_univnet_checkpoint( checkpoint_path, pytorch_dump_folder_path, config_path=None, repo_id=None, safe_serialization=False, ): model_state_dict_base = torch.load(checkpoint_path, map_location="cpu") # Get the generator's state dict state_dict = model_state_dict_base["model_g"] if config_path is not None: config = UnivNetConfig.from_pretrained(config_path) else: config = UnivNetConfig() keys_to_modify = get_key_mapping(config) keys_to_remove = set() hf_state_dict = rename_state_dict(state_dict, keys_to_modify, keys_to_remove) model = UnivNetModel(config) # Apply weight norm since the original checkpoint has weight norm applied model.apply_weight_norm() model.load_state_dict(hf_state_dict) # Remove weight norm in preparation for inference model.remove_weight_norm() model.save_pretrained(pytorch_dump_folder_path, safe_serialization=safe_serialization) if repo_id: print("Pushing to the hub...") model.push_to_hub(repo_id) def main(): parser = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--safe_serialization", action="store_true", help="Whether to save the model using `safetensors`." ) args = parser.parse_args() convert_univnet_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, args.safe_serialization, ) if __name__ == "__main__": main()
transformers/src/transformers/models/univnet/convert_univnet.py/0
{ "file_path": "transformers/src/transformers/models/univnet/convert_univnet.py", "repo_id": "transformers", "token_count": 2613 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert VideoMAE checkpoints from the original repository: https://github.com/MCG-NJU/VideoMAE""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def get_videomae_config(model_name): config = VideoMAEConfig() set_architecture_configs(model_name, config) if "finetuned" not in model_name: config.use_mean_pooling = False if "finetuned" in model_name: repo_id = "huggingface/label-files" if "kinetics" in model_name: config.num_labels = 400 filename = "kinetics400-id2label.json" elif "ssv2" in model_name: config.num_labels = 174 filename = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.") id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config def set_architecture_configs(model_name, config): if "small" in model_name: config.hidden_size = 384 config.intermediate_size = 1536 config.num_hidden_layers = 12 config.num_attention_heads = 16 config.decoder_num_hidden_layers = 12 config.decoder_num_attention_heads = 3 config.decoder_hidden_size = 192 config.decoder_intermediate_size = 768 elif "large" in model_name: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 config.decoder_num_hidden_layers = 12 config.decoder_num_attention_heads = 8 config.decoder_hidden_size = 512 config.decoder_intermediate_size = 2048 elif "huge" in model_name: config.hidden_size = 1280 config.intermediate_size = 5120 config.num_hidden_layers = 32 config.num_attention_heads = 16 config.decoder_num_hidden_layers = 12 config.decoder_num_attention_heads = 8 config.decoder_hidden_size = 640 config.decoder_intermediate_size = 2560 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"') def rename_key(name): if "encoder." in name: name = name.replace("encoder.", "") if "cls_token" in name: name = name.replace("cls_token", "videomae.embeddings.cls_token") if "decoder_pos_embed" in name: name = name.replace("decoder_pos_embed", "decoder.decoder_pos_embed") if "pos_embed" in name and "decoder" not in name: name = name.replace("pos_embed", "videomae.embeddings.position_embeddings") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "videomae.embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: name = name.replace("patch_embed.norm", "videomae.embeddings.norm") if "decoder.blocks" in name: name = name.replace("decoder.blocks", "decoder.decoder_layers") if "blocks" in name: name = name.replace("blocks", "videomae.encoder.layer") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name and "bias" not in name: name = name.replace("attn", "attention.self") if "attn" in name: name = name.replace("attn", "attention.attention") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "decoder_embed" in name: name = name.replace("decoder_embed", "decoder.decoder_embed") if "decoder_norm" in name: name = name.replace("decoder_norm", "decoder.decoder_norm") if "decoder_pred" in name: name = name.replace("decoder_pred", "decoder.decoder_pred") if "norm.weight" in name and "decoder" not in name and "fc" not in name: name = name.replace("norm.weight", "videomae.layernorm.weight") if "norm.bias" in name and "decoder" not in name and "fc" not in name: name = name.replace("norm.bias", "videomae.layernorm.bias") if "head" in name and "decoder" not in name: name = name.replace("head", "classifier") return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if key.startswith("encoder."): key = key.replace("encoder.", "") if "qkv" in key: key_split = key.split(".") if key.startswith("decoder.blocks"): dim = config.decoder_hidden_size layer_num = int(key_split[2]) prefix = "decoder.decoder_layers." if "weight" in key: orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :] else: dim = config.hidden_size layer_num = int(key_split[1]) prefix = "videomae.encoder.layer." if "weight" in key: orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :] else: orig_state_dict[rename_key(key)] = val return orig_state_dict # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file) return list(video) def convert_videomae_checkpoint(checkpoint_url, pytorch_dump_folder_path, model_name, push_to_hub): config = get_videomae_config(model_name) if "finetuned" in model_name: model = VideoMAEForVideoClassification(config) else: model = VideoMAEForPreTraining(config) # download original checkpoint, hosted on Google Drive output = "pytorch_model.bin" gdown.cached_download(checkpoint_url, output, quiet=False) files = torch.load(output, map_location="cpu") if "model" in files: state_dict = files["model"] else: state_dict = files["module"] new_state_dict = convert_state_dict(state_dict, config) model.load_state_dict(new_state_dict) model.eval() # verify model on basic input image_processor = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) video = prepare_video() inputs = image_processor(video, return_tensors="pt") if "finetuned" not in model_name: local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt") inputs["bool_masked_pos"] = torch.load(local_path) outputs = model(**inputs) logits = outputs.logits model_names = [ "videomae-small-finetuned-kinetics", "videomae-small-finetuned-ssv2", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) "videomae-base-short", "videomae-base-short-finetuned-kinetics", "videomae-base", "videomae-base-finetuned-kinetics", "videomae-large", "videomae-large-finetuned-kinetics", "videomae-huge-finetuned-kinetics", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) "videomae-base-short-ssv2", "videomae-base-short-finetuned-ssv2", "videomae-base-ssv2", "videomae-base-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([-0.9291, -0.4061, -0.9307]) elif model_name == "videomae-small-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([0.2671, -0.4689, -0.8235]) elif model_name == "videomae-base": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]]) elif model_name == "videomae-base-short": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]]) # we verified the loss both for normalized and unnormalized targets for this one expected_loss = torch.tensor([0.5142]) if config.norm_pix_loss else torch.tensor([0.6469]) elif model_name == "videomae-large": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]]) elif model_name == "videomae-large-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.0771, 0.0011, -0.3625]) elif model_name == "videomae-huge-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.2433, 0.1632, -0.4894]) elif model_name == "videomae-base-short-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.6588, 0.0990, -0.2493]) elif model_name == "videomae-base-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.3669, -0.0688, -0.2421]) elif model_name == "videomae-base-short-ssv2": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]]) elif model_name == "videomae-base-short-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([-0.0537, -0.1539, -0.3266]) elif model_name == "videomae-base-ssv2": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]]) elif model_name == "videomae-base-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([0.1961, -0.8337, -0.6389]) else: raise ValueError(f"Model name not supported. Should be one of {model_names}") # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3], expected_slice, atol=1e-4) else: print("Logits:", logits[0, :3, :3]) assert torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4) print("Logits ok!") # verify loss, if applicable if model_name == "videomae-base-short": loss = outputs.loss assert torch.allclose(loss, expected_loss, atol=1e-4) print("Loss ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) model.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print("Pushing to the hub...") model.push_to_hub(model_name, organization="nielsr") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
transformers/src/transformers/models/videomae/convert_videomae_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/videomae/convert_videomae_to_pytorch.py", "repo_id": "transformers", "token_count": 6110 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType logger = logging.get_logger(__name__) class VisionEncoderDecoderConfig(PretrainedConfig): r""" [`VisionEncoderDecoderConfig`] is the configuration class to store the configuration of a [`VisionEncoderDecoderModel`]. It is used to instantiate a Vision-Encoder-Text-Decoder model according to the specified arguments, defining the encoder and decoder configs. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: kwargs (*optional*): Dictionary of keyword arguments. Notably: - **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the encoder config. - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the decoder config. Examples: ```python >>> from transformers import BertConfig, ViTConfig, VisionEncoderDecoderConfig, VisionEncoderDecoderModel >>> # Initializing a ViT & BERT style configuration >>> config_encoder = ViTConfig() >>> config_decoder = BertConfig() >>> config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) >>> # Initializing a ViTBert model (with random weights) from a ViT & google-bert/bert-base-uncased style configurations >>> model = VisionEncoderDecoderModel(config=config) >>> # Accessing the model configuration >>> config_encoder = model.config.encoder >>> config_decoder = model.config.decoder >>> # set decoder config to causal lm >>> config_decoder.is_decoder = True >>> config_decoder.add_cross_attention = True >>> # Saving the model, including its configuration >>> model.save_pretrained("my-model") >>> # loading model and config from pretrained folder >>> encoder_decoder_config = VisionEncoderDecoderConfig.from_pretrained("my-model") >>> model = VisionEncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config) ```""" model_type = "vision-encoder-decoder" sub_configs = {"encoder": AutoConfig, "decoder": AutoConfig} is_composition = True def __init__(self, **kwargs): super().__init__(**kwargs) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"A configuraton of type {self.model_type} cannot be instantiated because " f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" ) encoder_config = kwargs.pop("encoder") encoder_model_type = encoder_config.pop("model_type") decoder_config = kwargs.pop("decoder") decoder_model_type = decoder_config.pop("model_type") self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config) self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config) self.is_encoder_decoder = True @classmethod def from_encoder_decoder_configs( cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs ) -> PretrainedConfig: r""" Instantiate a [`VisionEncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and decoder model configuration. Returns: [`VisionEncoderDecoderConfig`]: An instance of a configuration object """ logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config") decoder_config.is_decoder = True decoder_config.add_cross_attention = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs) class VisionEncoderDecoderEncoderOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4 @property def outputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}}) class VisionEncoderDecoderDecoderOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = OrderedDict() common_inputs["input_ids"] = {0: "batch", 1: "past_decoder_sequence + sequence"} common_inputs["attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} common_inputs["encoder_hidden_states"] = {0: "batch", 1: "encoder_sequence"} return common_inputs def generate_dummy_inputs( self, tokenizer: "PreTrainedTokenizerBase", batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional["TensorType"] = None, ) -> Mapping[str, Any]: import torch common_inputs = OrderedDict() dummy_input = super().generate_dummy_inputs( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) batch, encoder_sequence = dummy_input["input_ids"].shape encoder_hidden_states_shape = (batch, encoder_sequence, self._config.encoder_hidden_size) common_inputs["input_ids"] = dummy_input.pop("input_ids") common_inputs["attention_mask"] = dummy_input.pop("attention_mask") common_inputs["encoder_hidden_states"] = torch.zeros(encoder_hidden_states_shape) return common_inputs class VisionEncoderDecoderOnnxConfig(OnnxConfig): @property def inputs(self) -> None: pass def get_encoder_config(self, encoder_config: PretrainedConfig) -> OnnxConfig: r""" Returns ONNX encoder config for `VisionEncoderDecoder` model. Args: encoder_config (`PretrainedConfig`): The encoder model's configuration to use when exporting to ONNX. Returns: [`VisionEncoderDecoderEncoderOnnxConfig`]: An instance of the ONNX configuration object """ return VisionEncoderDecoderEncoderOnnxConfig(encoder_config) def get_decoder_config( self, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, feature: str = "default" ) -> OnnxConfig: r""" Returns ONNX decoder config for `VisionEncoderDecoder` model. Args: encoder_config (`PretrainedConfig`): The encoder model's configuration to use when exporting to ONNX. decoder_config (`PretrainedConfig`): The decoder model's configuration to use when exporting to ONNX feature (`str`, *optional*): The type of feature to export the model with. Returns: [`VisionEncoderDecoderDecoderOnnxConfig`]: An instance of the ONNX configuration object. """ decoder_config.encoder_hidden_size = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(decoder_config, feature) __all__ = ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
transformers/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py/0
{ "file_path": "transformers/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py", "repo_id": "transformers", "token_count": 3173 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ViT checkpoints trained with the DINO method.""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config, base_model=False): rename_keys = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias")) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config, base_model=False): for i in range(config.num_hidden_layers): if base_model: prefix = "" else: prefix = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[ : config.hidden_size, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ -config.hidden_size :, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] def remove_classification_head_(state_dict): ignore_keys = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(k, None) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_vit_checkpoint(model_name, pytorch_dump_folder_path, base_model=True): """ Copy/paste/tweak model's weights to our ViT structure. """ # define default ViT configuration config = ViTConfig() # patch_size if model_name[-1] == "8": config.patch_size = 8 # set labels if required if not base_model: config.num_labels = 1000 repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: config.hidden_size = 384 config.intermediate_size = 1536 config.num_hidden_layers = 12 config.num_attention_heads = 6 # load original model from torch hub original_model = torch.hub.load("facebookresearch/dino:main", model_name) original_model.eval() # load state_dict of original model, remove and rename some keys state_dict = original_model.state_dict() if base_model: remove_classification_head_(state_dict) rename_keys = create_rename_keys(config, base_model=base_model) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, base_model) # load HuggingFace model if base_model: model = ViTModel(config, add_pooling_layer=False).eval() else: model = ViTForImageClassification(config).eval() model.load_state_dict(state_dict) # Check outputs on an image, prepared by ViTImageProcessor image_processor = ViTImageProcessor() encoding = image_processor(images=prepare_img(), return_tensors="pt") pixel_values = encoding["pixel_values"] outputs = model(pixel_values) if base_model: final_hidden_state_cls_token = original_model(pixel_values) assert torch.allclose(final_hidden_state_cls_token, outputs.last_hidden_state[:, 0, :], atol=1e-1) else: logits = original_model(pixel_values) assert logits.shape == outputs.logits.shape assert torch.allclose(logits, outputs.logits, atol=1e-3) Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="dino_vitb16", type=str, help="Name of the model trained with DINO you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--base_model", action="store_true", help="Whether to only convert the base model (no projection head weights).", ) parser.set_defaults(base_model=True) args = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
transformers/src/transformers/models/vit/convert_dino_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/vit/convert_dino_to_pytorch.py", "repo_id": "transformers", "token_count": 3685 }
# coding=utf-8 # Copyright 2024 University of Sydney and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch VitPose backbone model. This code is the same as the original Vision Transformer (ViT) with 2 modifications: - use of padding=2 in the patch embedding layer - addition of a mixture-of-experts MLP layer """ import collections.abc import math from typing import Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BackboneOutput, BaseModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_vitpose_backbone import VitPoseBackboneConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "VitPoseBackboneConfig" class VitPoseBackbonePatchEmbeddings(nn.Module): """Image to Patch Embedding.""" def __init__(self, config): super().__init__() image_size = config.image_size patch_size = config.patch_size num_channels = config.num_channels embed_dim = config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size, padding=2) def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: height, width = pixel_values.shape[-2:] if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) embeddings = self.projection(pixel_values) embeddings = embeddings.flatten(2).transpose(1, 2) return embeddings class VitPoseBackboneEmbeddings(nn.Module): """ Construct the position and patch embeddings. """ def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() self.patch_embeddings = VitPoseBackbonePatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: embeddings = self.patch_embeddings(pixel_values) # add positional encoding to each token embeddings = embeddings + self.position_embeddings[:, 1:] + self.position_embeddings[:, :1] embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->VitPoseBackbone class VitPoseBackboneSelfAttention(nn.Module): def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->VitPoseBackbone class VitPoseBackboneSelfOutput(nn.Module): """ The residual connection is defined in VitPoseBackboneLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->VitPoseBackbone class VitPoseBackboneAttention(nn.Module): def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() self.attention = VitPoseBackboneSelfAttention(config) self.output = VitPoseBackboneSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class VitPoseBackboneMoeMLP(nn.Module): def __init__(self, config: VitPoseBackboneConfig): super().__init__() in_features = out_features = config.hidden_size hidden_features = int(config.hidden_size * config.mlp_ratio) num_experts = config.num_experts part_features = config.part_features self.part_features = part_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = ACT2FN[config.hidden_act] self.fc2 = nn.Linear(hidden_features, out_features - part_features) self.drop = nn.Dropout(config.hidden_dropout_prob) self.num_experts = num_experts experts = [nn.Linear(hidden_features, part_features) for _ in range(num_experts)] self.experts = nn.ModuleList(experts) def forward(self, hidden_state: torch.Tensor, indices: torch.Tensor) -> torch.Tensor: expert_hidden_state = torch.zeros_like(hidden_state[:, :, -self.part_features :]) hidden_state = self.fc1(hidden_state) hidden_state = self.act(hidden_state) shared_hidden_state = self.fc2(hidden_state) indices = indices.view(-1, 1, 1) # to support ddp training for i in range(self.num_experts): selected_index = indices == i current_hidden_state = self.experts[i](hidden_state) * selected_index expert_hidden_state = expert_hidden_state + current_hidden_state hidden_state = torch.cat([shared_hidden_state, expert_hidden_state], dim=-1) return hidden_state class VitPoseBackboneMLP(nn.Module): def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() in_features = out_features = config.hidden_size hidden_features = int(config.hidden_size * config.mlp_ratio) self.fc1 = nn.Linear(in_features, hidden_features, bias=True) self.activation = ACT2FN[config.hidden_act] self.fc2 = nn.Linear(hidden_features, out_features, bias=True) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: hidden_state = self.fc1(hidden_state) hidden_state = self.activation(hidden_state) hidden_state = self.fc2(hidden_state) return hidden_state class VitPoseBackboneLayer(nn.Module): def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() self.num_experts = config.num_experts self.attention = VitPoseBackboneAttention(config) self.mlp = VitPoseBackboneMLP(config) if self.num_experts == 1 else VitPoseBackboneMoeMLP(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, dataset_index: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: # Validate dataset_index when using multiple experts if self.num_experts > 1 and dataset_index is None: raise ValueError( "dataset_index must be provided when using multiple experts " f"(num_experts={self.num_experts}). Please provide dataset_index " "to the forward pass." ) self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in VitPoseBackbone, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states layer_output = self.layernorm_after(hidden_states) if self.num_experts == 1: layer_output = self.mlp(layer_output) else: layer_output = self.mlp(layer_output, indices=dataset_index) # second residual connection layer_output = layer_output + hidden_states outputs = (layer_output,) + outputs return outputs # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->VitPoseBackbone class VitPoseBackboneEncoder(nn.Module): def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([VitPoseBackboneLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False # Ignore copy def forward( self, hidden_states: torch.Tensor, dataset_index: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, dataset_index, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, dataset_index, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class VitPoseBackbonePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = VitPoseBackboneConfig base_model_prefix = "vit" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["VitPoseBackboneEmbeddings", "VitPoseBackboneLayer"] def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm, VitPoseBackboneEmbeddings]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, VitPoseBackboneEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_( module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range, ).to(module.position_embeddings.dtype) VITPOSE_BACKBONE_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`VitPoseBackboneConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ VITPOSE_BACKBONE_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. dataset_index (`torch.Tensor` of shape `(batch_size,)`): Index to use in the Mixture-of-Experts (MoE) blocks of the backbone. This corresponds to the dataset index used during training, e.g. index 0 refers to COCO. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The VitPose backbone useful for downstream tasks.", VITPOSE_BACKBONE_START_DOCSTRING, ) class VitPoseBackbone(VitPoseBackbonePreTrainedModel, BackboneMixin): def __init__(self, config: VitPoseBackboneConfig): super().__init__(config) super()._init_backbone(config) self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] self.embeddings = VitPoseBackboneEmbeddings(config) self.encoder = VitPoseBackboneEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(VITPOSE_BACKBONE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.Tensor, dataset_index: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): """ Returns: Examples: ```python >>> from transformers import VitPoseBackboneConfig, VitPoseBackbone >>> import torch >>> config = VitPoseBackboneConfig(out_indices=[-1]) >>> model = VitPoseBackbone(config) >>> pixel_values = torch.randn(1, 3, 256, 192) >>> dataset_index = torch.tensor([1]) >>> outputs = model(pixel_values, dataset_index) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(pixel_values) outputs = self.encoder( embedding_output, dataset_index=dataset_index, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict, ) hidden_states = outputs.hidden_states if return_dict else outputs[1] feature_maps = () for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: hidden_state = self.layernorm(hidden_state) feature_maps += (hidden_state,) if not return_dict: if output_hidden_states: output = (feature_maps,) + outputs[1:] else: output = (feature_maps,) + outputs[2:] return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, )
transformers/src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py/0
{ "file_path": "transformers/src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py", "repo_id": "transformers", "token_count": 9297 }
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Flax Wav2Vec2 model.""" from functools import partial from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_wav2vec2 import Wav2Vec2Config logger = logging.get_logger(__name__) @flax.struct.dataclass class FlaxWav2Vec2BaseModelOutput(ModelOutput): """ Output type of [`FlaxWav2Vec2BaseModelOutput`], with potential hidden states and attentions. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. extract_features (`jnp.ndarray` of shape `(batch_size, sequence_length, last_conv_dim)`): Sequence of extracted feature vectors of the last convolutional layer of the model with `last_conv_dim` being the dimension of the last convolutional layer. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None extract_features: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxWav2Vec2ForPreTrainingOutput(ModelOutput): """ Output type of [`FlaxWav2Vec2ForPreTrainingOutput`], with potential hidden states and attentions. Args: loss (*optional*, returned when model is in train mode, `jnp.ndarray` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss. projected_states (`jnp.ndarray` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`jnp.ndarray` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ projected_states: jnp.ndarray = None projected_quantized_states: jnp.ndarray = None codevector_perplexity: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[np.ndarray] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_length: size of the mask min_masks: minimum number of masked spans """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and" f" `sequence_length`: {sequence_length}`" ) # compute number of masked spans in batch num_masked_spans = int(mask_prob * sequence_length / mask_length + np.random.rand(1).item()) num_masked_spans = max(num_masked_spans, min_masks) # make sure num masked indices <= sequence_length if num_masked_spans * mask_length > sequence_length: num_masked_spans = sequence_length // mask_length # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) # get random indices to mask spec_aug_mask_idxs = np.array( [ np.random.choice(np.arange(sequence_length - (mask_length - 1)), num_masked_spans, replace=False) for _ in range(batch_size) ] ) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to(spec_aug_mask_idxs[:, :, None], (batch_size, num_masked_spans, mask_length)) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, num_masked_spans * mask_length) offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, num_masked_spans, mask_length)).reshape( batch_size, num_masked_spans * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) if attention_mask is not None: # make sure padded input ids cannot be masked spec_aug_mask = np.where(attention_mask, spec_aug_mask, False) return spec_aug_mask def _sample_negative_indices(features_shape: Tuple, num_negatives: int, attention_mask: Optional[np.ndarray] = None): """ Sample `num_negatives` vectors from feature vectors. """ batch_size, sequence_length, hidden_size = features_shape if sequence_length <= 1: raise ValueError( "`features should have `sequence_length` > 1, but are of shape " f"(batch_size, sequence_length, hidden_size) = ({batch_size, sequence_length, hidden_size})." ) # get `num_negatives` random vector indices from the same utterance sampled_negative_indices = [] for batch_idx in range(batch_size): high = attention_mask[batch_idx].sum() - 1 if attention_mask is not None else sequence_length - 1 sampled_indices_slice = np.random.randint(0, high, size=(num_negatives * sequence_length,)) sampled_negative_indices.append(sampled_indices_slice) sampled_negative_indices = np.asarray(sampled_negative_indices, dtype=np.int32) # generate indices of the positive vectors themselves, repeat them `num_negatives` times feature_indices = np.broadcast_to(np.arange(sequence_length)[:, None], (sequence_length, num_negatives)).flatten() # avoid sampling the same positive vector, but keep the distribution uniform sampled_negative_indices[sampled_negative_indices >= feature_indices] += 1 # correct for batch size for batch_idx in range(1, batch_size): sampled_negative_indices[batch_idx] += batch_idx * sequence_length return sampled_negative_indices WAV_2_VEC_2_START_DOCSTRING = r""" Wav2Vec2 was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`Wav2Vec2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ WAV_2_VEC_2_INPUTS_DOCSTRING = r""" Args: input_values (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `jnp.ndarray`. See [`Wav2Vec2Processor.__call__`] for details. attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) .. warning:: `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor has `config.return_attention_mask == False`, such as [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different results depending on whether `input_values` is padded or not. mask_time_indices (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxWav2Vec2LayerNormConvLayer(nn.Module): config: Wav2Vec2Config layer_id: int = 0 dtype: jnp.dtype = jnp.float32 def setup(self): self.in_conv_dim = self.config.conv_dim[self.layer_id] if self.layer_id > 0 else 1 self.out_conv_dim = self.config.conv_dim[self.layer_id] self.conv = nn.Conv( features=self.config.conv_dim[self.layer_id], kernel_size=(self.config.conv_kernel[self.layer_id],), strides=(self.config.conv_stride[self.layer_id],), use_bias=self.config.conv_bias, kernel_init=jax.nn.initializers.he_normal(), padding="VALID", dtype=self.dtype, ) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.activation = ACT2FN[self.config.feat_extract_activation] def __call__(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxConvWithWeightNorm(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( features=self.config.hidden_size, kernel_size=(self.config.num_conv_pos_embeddings,), kernel_init=jax.nn.initializers.he_normal(), padding="VALID", feature_group_count=self.config.num_conv_pos_embedding_groups, dtype=self.dtype, ) weight_shape = ( self.conv.features, self.conv.features // self.conv.feature_group_count, self.conv.kernel_size[0], ) self.weight_v = self.param("weight_v", jax.nn.initializers.he_normal(), weight_shape) self.weight_g = self.param("weight_g", lambda _: jnp.linalg.norm(self.weight_v, axis=(0, 1))[None, None, :]) self.bias = self.param("bias", jax.nn.initializers.zeros, (self.conv.features,)) self.prev_padding = self.conv.kernel_size[0] // 2 def _get_normed_weights(self): weight_v_norm = jnp.linalg.norm(self.weight_v, axis=(0, 1))[None, None, :] normed_weight_v = jnp.divide(self.weight_v, weight_v_norm) normed_kernel = jnp.multiply(normed_weight_v, self.weight_g) return normed_kernel def __call__(self, hidden_states): kernel = self._get_normed_weights() hidden_states = jnp.pad(hidden_states, ((0, 0), (self.prev_padding, self.prev_padding), (0, 0))) hidden_states = self.conv.apply({"params": {"kernel": kernel.T, "bias": self.bias}}, hidden_states) return hidden_states class FlaxWav2Vec2PositionalConvEmbedding(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = FlaxConvWithWeightNorm(self.config, dtype=self.dtype) self.activation = ACT2FN[self.config.feat_extract_activation] self.num_pad_remove = 1 if self.config.num_conv_pos_embeddings % 2 == 0 else 0 def __call__(self, hidden_states): hidden_states = hidden_states.transpose((0, 1, 2)) hidden_states = self.conv(hidden_states) if self.num_pad_remove > 0: hidden_states = hidden_states[:, : -self.num_pad_remove, :] hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose((0, 1, 2)) return hidden_states class FlaxConvLayersCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): if self.config.feat_extract_norm == "layer": self.layers = [ FlaxWav2Vec2LayerNormConvLayer(self.config, layer_id=i, name=str(i), dtype=self.dtype) for i in range(self.config.num_feat_extract_layers) ] elif self.config.feat_extract_norm == "group": raise NotImplementedError("At the moment only ``config.feat_extact_norm == 'layer'`` is supported") else: raise ValueError( f"`config.feat_extract_norm` is {self.config.feat_extract_norm}, but has to be one of ['group'," " 'layer']" ) def __call__(self, hidden_states): for i, conv_layer in enumerate(self.layers): hidden_states = conv_layer(hidden_states) return hidden_states class FlaxWav2Vec2FeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv_layers = FlaxConvLayersCollection(self.config, dtype=self.dtype) def __call__(self, input_values, freeze_feature_encoder=False): hidden_states = input_values[:, :, None] hidden_states = self.conv_layers(hidden_states) if freeze_feature_encoder: hidden_states = jax.lax.stop_gradient(hidden_states) return hidden_states class FlaxWav2Vec2FeatureProjection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.projection = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.feat_proj_dropout) def __call__(self, hidden_states, deterministic=True): norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states, norm_hidden_states class FlaxWav2Vec2Attention(nn.Module): config: Wav2Vec2Config embed_dim: int num_heads: int dropout: float = 0.0 bias: bool = True dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) dense = partial( nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) def __call__( self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, deterministic: bool = True, ) -> Tuple[jnp.ndarray]: """Input shape: Batch x Time x Channel""" # get query proj query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) if attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class FlaxWav2Vec2FeedForward(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.intermediate_dropout = nn.Dropout(rate=self.config.activation_dropout) self.intermediate_dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) if isinstance(self.config.hidden_act, str): self.intermediate_act_fn = ACT2FN[self.config.hidden_act] else: self.intermediate_act_fn = self.config.hidden_act self.output_dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.output_dropout = nn.Dropout(rate=self.config.hidden_dropout) def __call__(self, hidden_states, deterministic=True): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states, deterministic=deterministic) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxWav2Vec2EncoderLayerStableLayerNorm(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxWav2Vec2Attention( config=self.config, embed_dim=self.config.hidden_size, num_heads=self.config.num_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.feed_forward = FlaxWav2Vec2FeedForward(self.config, dtype=self.dtype) self.final_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_mask=None, deterministic=True, output_attentions=False): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights = self.attention( hidden_states, attention_mask=attention_mask, deterministic=deterministic ) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward( self.final_layer_norm(hidden_states), deterministic=deterministic ) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class FlaxWav2Vec2EncoderLayerStableLayerNormCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [ FlaxWav2Vec2EncoderLayerStableLayerNorm(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class FlaxWav2Vec2StableLayerNormEncoder(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.pos_conv_embed = FlaxWav2Vec2PositionalConvEmbedding(self.config, dtype=self.dtype) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout) self.layers = FlaxWav2Vec2EncoderLayerStableLayerNormCollection(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask=None, deterministic=True, output_attentions=False, output_hidden_states=False, return_dict=True, ): if attention_mask is not None: # make sure padded tokens are not attended to hidden_states = jnp.where( jnp.broadcast_to(attention_mask[:, :, None], hidden_states.shape), hidden_states, 0 ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = self.layer_norm(outputs[0]) # update the last element in `hidden_states` after applying `layernorm` above hidden_states = None if output_hidden_states: hidden_states = outputs[1] hidden_states = hidden_states[:-1] + (last_hidden_state,) if not return_dict: outputs = (last_hidden_state, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=outputs.attentions ) class FlaxWav2Vec2GumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information. """ config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.num_groups = self.config.num_codevector_groups self.num_vars = self.config.num_codevectors_per_group if self.config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {self.config.codevector_dim} must be divisible by" f" `config.num_codevector_groups` {self.num_groups} for concatenation" ) # storage for codebook variables (codewords) self.codevectors = self.param( "codevectors", jax.nn.initializers.uniform(), (1, self.num_groups * self.num_vars, self.config.codevector_dim // self.num_groups), ) self.weight_proj = nn.Dense( self.num_groups * self.num_vars, kernel_init=jax.nn.initializers.normal(1.0), dtype=self.dtype, ) @staticmethod def _compute_perplexity(probs, mask=None): if mask is not None: mask_extended = jnp.broadcast_to(mask.flatten()[:, None, None], probs.shape) probs = jnp.where(mask_extended, probs, jnp.zeros_like(probs)) marginal_probs = probs.sum(axis=0) / mask.sum() else: marginal_probs = probs.mean(axis=0) perplexity = jnp.exp(-jnp.sum(marginal_probs * jnp.log(marginal_probs + 1e-7), axis=-1)).sum() return perplexity def __call__(self, hidden_states, mask_time_indices=None, deterministic=True, temperature=1): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.reshape(batch_size * sequence_length * self.num_groups, -1) if not deterministic: # sample code vector probs via gumbel in differentiateable way gumbel_rng = self.make_rng("gumbel") gumbels = jax.random.gumbel(gumbel_rng, hidden_states.shape) codevector_probs = nn.softmax((hidden_states + gumbels) / temperature) # compute perplexity codevector_soft_dist = nn.softmax( hidden_states.reshape(batch_size * sequence_length, self.num_groups, -1), axis=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist, mask_time_indices) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(axis=-1) codevector_probs = jax.nn.one_hot(codevector_idx, hidden_states.shape[-1]) * 1.0 codevector_probs = codevector_probs.reshape(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs, mask_time_indices) codevector_probs = codevector_probs.reshape(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = jnp.expand_dims(codevector_probs, axis=-1) * self.codevectors codevectors = codevectors_per_group.reshape(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).reshape(batch_size, sequence_length, -1) return codevectors, perplexity class FlaxWav2Vec2Adapter(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): # hidden_states require down-projection if feature dims don't match if self.config.output_hidden_size != self.config.hidden_size: self.proj = nn.Dense( self.config.output_hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.proj_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) else: self.proj = self.proj_layer_norm = None self.layers = FlaxWav2Vec2AdapterLayersCollection(self.config, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): # down-project hidden_states if required if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) hidden_states = self.layers(hidden_states) return hidden_states class FlaxWav2Vec2AdapterLayer(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( features=2 * self.config.output_hidden_size, kernel_size=(self.config.adapter_kernel_size,), strides=(self.config.adapter_stride,), padding=((1, 1),), kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = nn.glu(hidden_states, axis=2) return hidden_states class FlaxWav2Vec2AdapterLayersCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [ FlaxWav2Vec2AdapterLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_adapter_layers) ] def __call__(self, hidden_states): for conv_layer in self.layers: hidden_states = conv_layer(hidden_states) return hidden_states class FlaxWav2Vec2PreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Wav2Vec2Config base_model_prefix: str = "wav2vec2" main_input_name = "input_values" module_class: nn.Module = None def __init__( self, config: Wav2Vec2Config, input_shape: Tuple = (1, 1024), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_values = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_values) params_rng, dropout_rng = jax.random.split(rng, 2) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, input_values, attention_mask, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, freeze_feature_encoder: bool = False, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_values.shape if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} return self.module.apply( inputs, jnp.array(input_values, dtype="f4"), jnp.array(attention_mask, dtype="i4"), mask_time_indices, not train, output_attentions, output_hidden_states, freeze_feature_encoder, return_dict, rngs=rngs, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): return self.module._get_feat_extract_output_lengths(input_lengths, add_adapter=add_adapter) class FlaxWav2Vec2Module(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.feature_extractor = FlaxWav2Vec2FeatureEncoder(self.config, dtype=self.dtype) self.feature_projection = FlaxWav2Vec2FeatureProjection(self.config, dtype=self.dtype) self.masked_spec_embed = self.param( "masked_spec_embed", jax.nn.initializers.uniform(), (self.config.hidden_size,) ) if self.config.do_stable_layer_norm: self.encoder = FlaxWav2Vec2StableLayerNormEncoder(self.config, dtype=self.dtype) else: raise NotImplementedError("``config.do_stable_layer_norm is False`` is currently not supported.") self.adapter = FlaxWav2Vec2Adapter(self.config, dtype=self.dtype) if self.config.add_adapter else None def __call__( self, input_values, attention_mask=None, mask_time_indices=None, deterministic=True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): extract_features = self.feature_extractor(input_values, freeze_feature_encoder=freeze_feature_encoder) # make sure that no loss is computed on padded inputs if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask( extract_features.shape[1], attention_mask, add_adapter=False ) hidden_states, extract_features = self.feature_projection(extract_features, deterministic=deterministic) if mask_time_indices is not None: # apply SpecAugment along time axis with given indices hidden_states = jnp.where( jnp.broadcast_to(mask_time_indices[:, :, None], hidden_states.shape), jnp.broadcast_to(self.masked_spec_embed[None, None, :], hidden_states.shape), hidden_states, ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if self.adapter is not None: hidden_states = self.adapter(hidden_states) if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return FlaxWav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: jnp.ndarray, add_adapter=None ): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(axis=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) batch_size = attention_mask.shape[0] attention_mask = jnp.zeros((batch_size, feature_vector_length), dtype=attention_mask.dtype) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask = attention_mask.at[jnp.arange(attention_mask.shape[0]), output_lengths - 1].set(1) attention_mask = jnp.flip(jnp.flip(attention_mask, -1).cumsum(-1), -1).astype("bool") return attention_mask @add_start_docstrings( "The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.", WAV_2_VEC_2_START_DOCSTRING, ) class FlaxWav2Vec2Model(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2Module FLAX_WAV2VEC2_MODEL_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoProcessor, FlaxWav2Vec2Model >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-large-lv60") >>> model = FlaxWav2Vec2Model.from_pretrained("facebook/wav2vec2-large-lv60") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor( ... ds["speech"][0], sampling_rate=16_000, return_tensors="np" ... ).input_values # Batch size 1 >>> hidden_states = model(input_values).last_hidden_state ``` """ overwrite_call_docstring( FlaxWav2Vec2Model, WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_MODEL_DOCSTRING, ) append_replace_return_docstrings( FlaxWav2Vec2Model, output_type=FlaxWav2Vec2BaseModelOutput, config_class=Wav2Vec2Config ) class FlaxWav2Vec2ForCTCModule(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.wav2vec2 = FlaxWav2Vec2Module(self.config, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.final_dropout) self.lm_head = nn.Dense( self.config.vocab_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, deterministic=True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): outputs = self.wav2vec2( input_values, attention_mask=attention_mask, mask_time_indices=mask_time_indices, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, freeze_feature_encoder=freeze_feature_encoder, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.lm_head(hidden_states) if not return_dict: return (logits,) + outputs[2:] return FlaxCausalLMOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None, ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths @add_start_docstrings( "Wav2Vec2 Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).", WAV_2_VEC_2_START_DOCSTRING, ) class FlaxWav2Vec2ForCTC(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2ForCTCModule FLAX_WAV2VEC2_FOR_CTC_DOCSTRING = """ Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import AutoProcessor, FlaxWav2Vec2ForCTC >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-large-960h-lv60") >>> model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor( ... ds["speech"][0], sampling_rate=16_000, return_tensors="np" ... ).input_values # Batch size 1 >>> logits = model(input_values).logits >>> predicted_ids = jnp.argmax(logits, axis=-1) >>> transcription = processor.decode(predicted_ids[0]) >>> # should give: "A MAN SAID TO THE UNIVERSE SIR I EXIST" ``` """ overwrite_call_docstring( FlaxWav2Vec2ForCTC, WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_FOR_CTC_DOCSTRING, ) append_replace_return_docstrings(FlaxWav2Vec2ForCTC, output_type=FlaxCausalLMOutput, config_class=Wav2Vec2Config) class FlaxWav2Vec2ForPreTrainingModule(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.wav2vec2 = FlaxWav2Vec2Module(self.config, dtype=self.dtype) self.dropout_features = nn.Dropout(self.config.feat_quantizer_dropout) self.quantizer = FlaxWav2Vec2GumbelVectorQuantizer(self.config, dtype=self.dtype) self.project_q = nn.Dense( self.config.proj_codevector_dim, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.project_hid = nn.Dense( self.config.proj_codevector_dim, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, gumbel_temperature: int = 1, deterministic: bool = True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): r""" Returns: Example: ```python ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wav2vec2( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, mask_time_indices=mask_time_indices, deterministic=deterministic, freeze_feature_encoder=freeze_feature_encoder, return_dict=return_dict, ) # project all transformed features (including masked) to final vq dim transformer_features = self.project_hid(outputs[0]) # quantize all (unmasked) extracted features and project to final vq dim extract_features = self.dropout_features(outputs[1], deterministic=deterministic) quantized_features, codevector_perplexity = self.quantizer( extract_features, mask_time_indices, deterministic=deterministic, temperature=gumbel_temperature ) quantized_features = self.project_q(quantized_features) if not return_dict: return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return FlaxWav2Vec2ForPreTrainingOutput( projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths @add_start_docstrings("""Wav2Vec2 Model with a quantizer and `VQ` head on top.""", WAV_2_VEC_2_START_DOCSTRING) class FlaxWav2Vec2ForPreTraining(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2ForPreTrainingModule @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) # overwrite since has `gumbel_temperature` input def __call__( self, input_values, attention_mask=None, mask_time_indices=None, gumbel_temperature: int = 1, params: dict = None, dropout_rng: jax.random.PRNGKey = None, gumbel_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, freeze_feature_encoder: bool = False, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_values.shape if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng if gumbel_rng is not None: rngs["gumbel"] = gumbel_rng inputs = {"params": params or self.params} return self.module.apply( inputs, jnp.array(input_values, dtype="f4"), jnp.array(attention_mask, dtype="i4"), mask_time_indices, gumbel_temperature, not train, output_attentions, output_hidden_states, freeze_feature_encoder, return_dict, rngs=rngs, ) FLAX_WAV2VEC2_FOR_PRETRAINING_DOCSTRING = """ Returns: Example: ```python >>> import optax >>> import numpy as np >>> import jax.numpy as jnp >>> from transformers import AutoFeatureExtractor, FlaxWav2Vec2ForPreTraining >>> from transformers.models.wav2vec2.modeling_flax_wav2vec2 import _compute_mask_indices >>> from datasets import load_dataset >>> import soundfile as sf >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-large-lv60") >>> model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = feature_extractor(ds["speech"][0], return_tensors="np").input_values # Batch size 1 >>> # compute masked indices >>> batch_size, raw_sequence_length = input_values.shape >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) >>> outputs = model(input_values, mask_time_indices=mask_time_indices) >>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) >>> cosine_sim = optax.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states) >>> # show that cosine similarity is much higher than random >>> assert np.asarray(cosine_sim)[mask_time_indices].mean() > 0.5 ``` """ overwrite_call_docstring( FlaxWav2Vec2ForPreTraining, WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_FOR_PRETRAINING_DOCSTRING, ) append_replace_return_docstrings( FlaxWav2Vec2ForPreTraining, output_type=FlaxWav2Vec2ForPreTrainingOutput, config_class=Wav2Vec2Config ) __all__ = ["FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel"]
transformers/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py", "repo_id": "transformers", "token_count": 24432 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Speech processor class for Whisper """ from ...processing_utils import ProcessorMixin class WhisperProcessor(ProcessorMixin): r""" Constructs a Whisper processor which wraps a Whisper feature extractor and a Whisper tokenizer into a single processor. [`WhisperProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`] and [`WhisperTokenizer`]. See the [`~WhisperProcessor.__call__`] and [`~WhisperProcessor.decode`] for more information. Args: feature_extractor (`WhisperFeatureExtractor`): An instance of [`WhisperFeatureExtractor`]. The feature extractor is a required input. tokenizer (`WhisperTokenizer`): An instance of [`WhisperTokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "WhisperFeatureExtractor" tokenizer_class = "WhisperTokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps) def __call__(self, *args, **kwargs): """ Forwards the `audio` argument to WhisperFeatureExtractor's [`~WhisperFeatureExtractor.__call__`] and the `text` argument to [`~WhisperTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. """ # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) audio = kwargs.pop("audio", None) sampling_rate = kwargs.pop("sampling_rate", None) text = kwargs.pop("text", None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to WhisperTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to WhisperTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) def get_prompt_ids(self, text: str, return_tensors="np"): return self.tokenizer.get_prompt_ids(text, return_tensors=return_tensors) __all__ = ["WhisperProcessor"]
transformers/src/transformers/models/whisper/processing_whisper.py/0
{ "file_path": "transformers/src/transformers/models/whisper/processing_whisper.py", "repo_id": "transformers", "token_count": 1478 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for ZoeDepth.""" import math from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union import numpy as np if TYPE_CHECKING: from .modeling_zoedepth import ZoeDepthDepthEstimatorOutput from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import PaddingMode, pad, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import ( TensorType, filter_out_non_signature_kwargs, is_torch_available, is_vision_available, logging, requires_backends, ) if is_vision_available(): import PIL if is_torch_available(): import torch from torch import nn logger = logging.get_logger(__name__) def get_resize_output_image_size( input_image: np.ndarray, output_size: Union[int, Iterable[int]], keep_aspect_ratio: bool, multiple: int, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[int, int]: def constrain_to_multiple_of(val, multiple, min_val=0): x = (np.round(val / multiple) * multiple).astype(int) if x < min_val: x = math.ceil(val / multiple) * multiple return x output_size = (output_size, output_size) if isinstance(output_size, int) else output_size input_height, input_width = get_image_size(input_image, input_data_format) output_height, output_width = output_size # determine new height and width scale_height = output_height / input_height scale_width = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width) < abs(1 - scale_height): # fit width scale_height = scale_width else: # fit height scale_width = scale_height new_height = constrain_to_multiple_of(scale_height * input_height, multiple=multiple) new_width = constrain_to_multiple_of(scale_width * input_width, multiple=multiple) return (new_height, new_width) class ZoeDepthImageProcessor(BaseImageProcessor): r""" Constructs a ZoeDepth image processor. Args: do_pad (`bool`, *optional*, defaults to `True`): Whether to apply pad the input. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overidden by `do_rescale` in `preprocess`. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overidden by `rescale_factor` in `preprocess`. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions. Can be overidden by `do_resize` in `preprocess`. size (`Dict[str, int]` *optional*, defaults to `{"height": 384, "width": 512}`): Size of the image after resizing. Size of the image after resizing. If `keep_aspect_ratio` is `True`, the image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions. If `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value. Can be overidden by `size` in `preprocess`. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Defines the resampling filter to use if resizing the image. Can be overidden by `resample` in `preprocess`. keep_aspect_ratio (`bool`, *optional*, defaults to `True`): If `True`, the image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions. This ensures that the image is scaled down as little as possible while still fitting within the desired output size. In case `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value by flooring the height and width to the nearest multiple of this value. Can be overidden by `keep_aspect_ratio` in `preprocess`. ensure_multiple_of (`int`, *optional*, defaults to 32): If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by flooring the height and width to the nearest multiple of this value. Works both with and without `keep_aspect_ratio` being set to `True`. Can be overidden by `ensure_multiple_of` in `preprocess`. """ model_input_names = ["pixel_values"] def __init__( self, do_pad: bool = True, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, keep_aspect_ratio: bool = True, ensure_multiple_of: int = 32, **kwargs, ) -> None: super().__init__(**kwargs) self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD size = size if size is not None else {"height": 384, "width": 512} size = get_size_dict(size) self.do_resize = do_resize self.size = size self.keep_aspect_ratio = keep_aspect_ratio self.ensure_multiple_of = ensure_multiple_of self.resample = resample def resize( self, image: np.ndarray, size: Dict[str, int], keep_aspect_ratio: bool = False, ensure_multiple_of: int = 1, resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is resized to a size that is a multiple of this value. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Target size of the output image. keep_aspect_ratio (`bool`, *optional*, defaults to `False`): If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. ensure_multiple_of (`int`, *optional*, defaults to 1): The image is resized to a size that is a multiple of this value. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size specified in `size`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(image) data_format = data_format if data_format is not None else input_data_format size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}") output_size = get_resize_output_image_size( image, output_size=(size["height"], size["width"]), keep_aspect_ratio=keep_aspect_ratio, multiple=ensure_multiple_of, input_data_format=input_data_format, ) height, width = output_size torch_image = torch.from_numpy(image).unsqueeze(0) torch_image = torch_image.permute(0, 3, 1, 2) if input_data_format == "channels_last" else torch_image # TODO support align_corners=True in image_transforms.resize requires_backends(self, "torch") resample_to_mode = {PILImageResampling.BILINEAR: "bilinear", PILImageResampling.BICUBIC: "bicubic"} mode = resample_to_mode[resample] resized_image = nn.functional.interpolate( torch_image, (int(height), int(width)), mode=mode, align_corners=True ) resized_image = resized_image.squeeze().numpy() resized_image = to_channel_dimension_format( resized_image, data_format, input_channel_dim=ChannelDimension.FIRST ) return resized_image def pad_image( self, image: np.array, mode: PaddingMode = PaddingMode.REFLECT, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Pad an image as done in the original ZoeDepth implementation. Padding fixes the boundary artifacts in the output depth map. Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image. This function pads the input image and crops the prediction back to the original size / view. Args: image (`np.ndarray`): Image to pad. mode (`PaddingMode`): The padding mode to use. Can be one of: - `"constant"`: pads with a constant value. - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ height, width = get_image_size(image, input_data_format) pad_height = int(np.sqrt(height / 2) * 3) pad_width = int(np.sqrt(width / 2) * 3) return pad( image, padding=((pad_height, pad_height), (pad_width, pad_width)), mode=mode, data_format=data_format, input_data_format=input_data_format, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_pad: bool = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_resize: bool = None, size: int = None, keep_aspect_ratio: bool = None, ensure_multiple_of: int = None, resample: PILImageResampling = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the input image. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. If `keep_aspect_ratio` is `True`, he image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions. If `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value. keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`): If `True` and `do_resize=True`, the image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions. This ensures that the image is scaled down as little as possible while still fitting within the desired output size. In case `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value by flooring the height and width to the nearest multiple of this value. ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`): If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by flooring the height and width to the nearest multiple of this value. Works both with and without `keep_aspect_ratio` being set to `True`. Can be overidden by `ensure_multiple_of` in `preprocess`. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_pad: images = [self.pad_image(image=image, input_data_format=input_data_format) for image in images] if do_resize: images = [ self.resize( image=image, size=size, resample=resample, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=ensure_multiple_of, input_data_format=input_data_format, ) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) def post_process_depth_estimation( self, outputs: "ZoeDepthDepthEstimatorOutput", source_sizes: Optional[Union[TensorType, List[Tuple[int, int]], None]] = None, target_sizes: Optional[Union[TensorType, List[Tuple[int, int]], None]] = None, outputs_flipped: Optional[Union["ZoeDepthDepthEstimatorOutput", None]] = None, do_remove_padding: Optional[Union[bool, None]] = None, ) -> List[Dict[str, TensorType]]: """ Converts the raw output of [`ZoeDepthDepthEstimatorOutput`] into final depth predictions and depth PIL images. Only supports PyTorch. Args: outputs ([`ZoeDepthDepthEstimatorOutput`]): Raw outputs of the model. source_sizes (`TensorType` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the source size (height, width) of each image in the batch before preprocessing. This argument should be dealt as "required" unless the user passes `do_remove_padding=False` as input to this function. target_sizes (`TensorType` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. outputs_flipped ([`ZoeDepthDepthEstimatorOutput`], *optional*): Raw outputs of the model from flipped input (averaged out in the end). do_remove_padding (`bool`, *optional*): By default ZoeDepth addes padding equal to `int(√(height / 2) * 3)` (and similarly for width) to fix the boundary artifacts in the output depth map, so we need remove this padding during post_processing. The parameter exists here in case the user changed the image preprocessing to not include padding. Returns: `List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth predictions. """ requires_backends(self, "torch") predicted_depth = outputs.predicted_depth if (outputs_flipped is not None) and (predicted_depth.shape != outputs_flipped.predicted_depth.shape): raise ValueError("Make sure that `outputs` and `outputs_flipped` have the same shape") if (target_sizes is not None) and (len(predicted_depth) != len(target_sizes)): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the predicted depth" ) if do_remove_padding is None: do_remove_padding = self.do_pad if source_sizes is None and do_remove_padding: raise ValueError( "Either `source_sizes` should be passed in, or `do_remove_padding` should be set to False" ) if (source_sizes is not None) and (len(predicted_depth) != len(source_sizes)): raise ValueError( "Make sure that you pass in as many source image sizes as the batch dimension of the logits" ) if outputs_flipped is not None: predicted_depth = (predicted_depth + torch.flip(outputs_flipped.predicted_depth, dims=[-1])) / 2 predicted_depth = predicted_depth.unsqueeze(1) # Zoe Depth model adds padding around the images to fix the boundary artifacts in the output depth map # The padding length is `int(np.sqrt(img_h/2) * fh)` for the height and similar for the width # fh (and fw respectively) are equal to '3' by default # Check [here](https://github.com/isl-org/ZoeDepth/blob/edb6daf45458569e24f50250ef1ed08c015f17a7/zoedepth/models/depth_model.py#L57) # for the original implementation. # In this section, we remove this padding to get the final depth image and depth prediction padding_factor_h = padding_factor_w = 3 results = [] target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes source_sizes = [None] * len(predicted_depth) if source_sizes is None else source_sizes for depth, target_size, source_size in zip(predicted_depth, target_sizes, source_sizes): # depth.shape = [1, H, W] if source_size is not None: pad_h = pad_w = 0 if do_remove_padding: pad_h = int(np.sqrt(source_size[0] / 2) * padding_factor_h) pad_w = int(np.sqrt(source_size[1] / 2) * padding_factor_w) depth = nn.functional.interpolate( depth.unsqueeze(1), size=[source_size[0] + 2 * pad_h, source_size[1] + 2 * pad_w], mode="bicubic", align_corners=False, ) if pad_h > 0: depth = depth[:, :, pad_h:-pad_h, :] if pad_w > 0: depth = depth[:, :, :, pad_w:-pad_w] depth = depth.squeeze(1) # depth.shape = [1, H, W] if target_size is not None: target_size = [target_size[0], target_size[1]] depth = nn.functional.interpolate( depth.unsqueeze(1), size=target_size, mode="bicubic", align_corners=False ) depth = depth.squeeze() # depth.shape = [H, W] results.append({"predicted_depth": depth}) return results __all__ = ["ZoeDepthImageProcessor"]
transformers/src/transformers/models/zoedepth/image_processing_zoedepth.py/0
{ "file_path": "transformers/src/transformers/models/zoedepth/image_processing_zoedepth.py", "repo_id": "transformers", "token_count": 11819 }
# Copyright 2022 The Impira Team and the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from typing import List, Optional, Tuple, Union import numpy as np from ..utils import ( ExplicitEnum, add_end_docstrings, is_pytesseract_available, is_torch_available, is_vision_available, logging, ) from .base import ChunkPipeline, build_pipeline_init_args from .question_answering import select_starts_ends if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES TESSERACT_LOADED = False if is_pytesseract_available(): TESSERACT_LOADED = True import pytesseract logger = logging.get_logger(__name__) # normalize_bbox() and apply_tesseract() are derived from apply_tesseract in models/layoutlmv3/feature_extraction_layoutlmv3.py. # However, because the pipeline may evolve from what layoutlmv3 currently does, it's copied (vs. imported) to avoid creating an # unnecessary dependency. def normalize_box(box, width, height): return [ int(1000 * (box[0] / width)), int(1000 * (box[1] / height)), int(1000 * (box[2] / width)), int(1000 * (box[3] / height)), ] def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: Optional[str]): """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.""" # apply OCR data = pytesseract.image_to_data(image, lang=lang, output_type="dict", config=tesseract_config) words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format actual_boxes = [] for x, y, w, h in zip(left, top, width, height): actual_box = [x, y, x + w, y + h] actual_boxes.append(actual_box) image_width, image_height = image.size # finally, normalize the bounding boxes normalized_boxes = [] for box in actual_boxes: normalized_boxes.append(normalize_box(box, image_width, image_height)) if len(words) != len(normalized_boxes): raise ValueError("Not as many words as there are bounding boxes") return words, normalized_boxes class ModelType(ExplicitEnum): LayoutLM = "layoutlm" LayoutLMv2andv3 = "layoutlmv2andv3" VisionEncoderDecoder = "vision_encoder_decoder" @add_end_docstrings(build_pipeline_init_args(has_image_processor=True, has_tokenizer=True)) class DocumentQuestionAnsweringPipeline(ChunkPipeline): # TODO: Update task_summary docs to include an example with document QA and then update the first sentence """ Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. The inputs/outputs are similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR'd words/boxes) as input instead of text context. Example: ```python >>> from transformers import pipeline >>> document_qa = pipeline(model="impira/layoutlm-document-qa") >>> document_qa( ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", ... question="What is the invoice number?", ... ) [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This document question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"document-question-answering"`. The models that this pipeline can use are models that have been fine-tuned on a document question answering task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=document-question-answering). """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.tokenizer is not None and not self.tokenizer.__class__.__name__.endswith("Fast"): raise ValueError( "`DocumentQuestionAnsweringPipeline` requires a fast tokenizer, but a slow tokenizer " f"(`{self.tokenizer.__class__.__name__}`) is provided." ) if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig": self.model_type = ModelType.VisionEncoderDecoder if self.model.config.encoder.model_type != "donut-swin": raise ValueError("Currently, the only supported VisionEncoderDecoder model is Donut") else: self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES) if self.model.config.__class__.__name__ == "LayoutLMConfig": self.model_type = ModelType.LayoutLM else: self.model_type = ModelType.LayoutLMv2andv3 def _sanitize_parameters( self, padding=None, doc_stride=None, max_question_len=None, lang: Optional[str] = None, tesseract_config: Optional[str] = None, max_answer_len=None, max_seq_len=None, top_k=None, handle_impossible_answer=None, timeout=None, **kwargs, ): preprocess_params, postprocess_params = {}, {} if padding is not None: preprocess_params["padding"] = padding if doc_stride is not None: preprocess_params["doc_stride"] = doc_stride if max_question_len is not None: preprocess_params["max_question_len"] = max_question_len if max_seq_len is not None: preprocess_params["max_seq_len"] = max_seq_len if lang is not None: preprocess_params["lang"] = lang if tesseract_config is not None: preprocess_params["tesseract_config"] = tesseract_config if timeout is not None: preprocess_params["timeout"] = timeout if top_k is not None: if top_k < 1: raise ValueError(f"top_k parameter should be >= 1 (got {top_k})") postprocess_params["top_k"] = top_k if max_answer_len is not None: if max_answer_len < 1: raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}") postprocess_params["max_answer_len"] = max_answer_len if handle_impossible_answer is not None: postprocess_params["handle_impossible_answer"] = handle_impossible_answer forward_params = {} if self.assistant_model is not None: forward_params["assistant_model"] = self.assistant_model if self.assistant_tokenizer is not None: forward_params["tokenizer"] = self.tokenizer forward_params["assistant_tokenizer"] = self.assistant_tokenizer return preprocess_params, forward_params, postprocess_params def __call__( self, image: Union["Image.Image", str], question: Optional[str] = None, word_boxes: Tuple[str, List[float]] = None, **kwargs, ): """ Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for LayoutLM-like models which require them as input. For Donut, no OCR is run. You can invoke the pipeline several ways: - `pipeline(image=image, question=question)` - `pipeline(image=image, question=question, word_boxes=word_boxes)` - `pipeline([{"image": image, "question": question}])` - `pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])` Args: image (`str` or `PIL.Image`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions. question (`str`): A question to ask of the document. word_boxes (`List[str, Tuple[float, float, float, float]]`, *optional*): A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the pipeline will use these words and boxes instead of running OCR on the image to derive them for models that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the pipeline without having to re-run it each time. top_k (`int`, *optional*, defaults to 1): The number of answers to return (will be chosen by order of likelihood). Note that we return less than top_k answers if there are not enough options available within the context. doc_stride (`int`, *optional*, defaults to 128): If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap. max_answer_len (`int`, *optional*, defaults to 15): The maximum length of predicted answers (e.g., only answers with a shorter length are considered). max_seq_len (`int`, *optional*, defaults to 384): The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using `doc_stride` as overlap) if needed. max_question_len (`int`, *optional*, defaults to 64): The maximum length of the question after tokenization. It will be truncated if needed. handle_impossible_answer (`bool`, *optional*, defaults to `False`): Whether or not we accept impossible as an answer. lang (`str`, *optional*): Language to use while running OCR. Defaults to english. tesseract_config (`str`, *optional*): Additional flags to pass to tesseract while running OCR. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: - **score** (`float`) -- The probability associated to the answer. - **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided `word_boxes`). - **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided `word_boxes`). - **answer** (`str`) -- The answer to the question. - **words** (`list[int]`) -- The index of each word/box pair that is in the answer """ if isinstance(question, str): inputs = {"question": question, "image": image} if word_boxes is not None: inputs["word_boxes"] = word_boxes else: inputs = image return super().__call__(inputs, **kwargs) def preprocess( self, input, padding="do_not_pad", doc_stride=None, max_seq_len=None, word_boxes: Tuple[str, List[float]] = None, lang=None, tesseract_config="", timeout=None, ): # NOTE: This code mirrors the code in question answering and will be implemented in a follow up PR # to support documents with enough tokens that overflow the model's window if max_seq_len is None: max_seq_len = self.tokenizer.model_max_length if doc_stride is None: doc_stride = min(max_seq_len // 2, 256) image = None image_features = {} if input.get("image", None) is not None: image = load_image(input["image"], timeout=timeout) if self.image_processor is not None: image_inputs = self.image_processor(images=image, return_tensors=self.framework) if self.framework == "pt": image_inputs = image_inputs.to(self.torch_dtype) image_features.update(image_inputs) elif self.feature_extractor is not None: image_features.update(self.feature_extractor(images=image, return_tensors=self.framework)) elif self.model_type == ModelType.VisionEncoderDecoder: raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor") words, boxes = None, None if not self.model_type == ModelType.VisionEncoderDecoder: if "word_boxes" in input: words = [x[0] for x in input["word_boxes"]] boxes = [x[1] for x in input["word_boxes"]] elif "words" in image_features and "boxes" in image_features: words = image_features.pop("words")[0] boxes = image_features.pop("boxes")[0] elif image is not None: if not TESSERACT_LOADED: raise ValueError( "If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract," " but pytesseract is not available" ) if TESSERACT_LOADED: words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config) else: raise ValueError( "You must provide an image or word_boxes. If you provide an image, the pipeline will automatically" " run OCR to derive words and boxes" ) if self.tokenizer.padding_side != "right": raise ValueError( "Document question answering only supports tokenizers whose padding side is 'right', not" f" {self.tokenizer.padding_side}" ) if self.model_type == ModelType.VisionEncoderDecoder: task_prompt = f'<s_docvqa><s_question>{input["question"]}</s_question><s_answer>' # Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py encoding = { "inputs": image_features["pixel_values"], "decoder_input_ids": self.tokenizer( task_prompt, add_special_tokens=False, return_tensors=self.framework ).input_ids, "return_dict_in_generate": True, } yield { **encoding, "p_mask": None, "word_ids": None, "words": None, "output_attentions": True, "is_last": True, } else: tokenizer_kwargs = {} if self.model_type == ModelType.LayoutLM: tokenizer_kwargs["text"] = input["question"].split() tokenizer_kwargs["text_pair"] = words tokenizer_kwargs["is_split_into_words"] = True else: tokenizer_kwargs["text"] = [input["question"]] tokenizer_kwargs["text_pair"] = [words] tokenizer_kwargs["boxes"] = [boxes] encoding = self.tokenizer( padding=padding, max_length=max_seq_len, stride=doc_stride, return_token_type_ids=True, truncation="only_second", return_overflowing_tokens=True, **tokenizer_kwargs, ) # TODO: check why slower `LayoutLMTokenizer` and `LayoutLMv2Tokenizer` don't have this key in outputs # FIXME: ydshieh and/or Narsil encoding.pop("overflow_to_sample_mapping", None) # We do not use this num_spans = len(encoding["input_ids"]) # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens) # This logic mirrors the logic in the question_answering pipeline p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)] for span_idx in range(num_spans): if self.framework == "pt": span_encoding = {k: torch.tensor(v[span_idx : span_idx + 1]) for (k, v) in encoding.items()} if "pixel_values" in image_features: span_encoding["image"] = image_features["pixel_values"] else: raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") input_ids_span_idx = encoding["input_ids"][span_idx] # keep the cls_token unmasked (some models use it to indicate unanswerable questions) if self.tokenizer.cls_token_id is not None: cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0] for cls_index in cls_indices: p_mask[span_idx][cls_index] = 0 # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000] # for SEP tokens, and the word's bounding box for words in the original document. if "boxes" not in tokenizer_kwargs: bbox = [] for input_id, sequence_id, word_id in zip( encoding.input_ids[span_idx], encoding.sequence_ids(span_idx), encoding.word_ids(span_idx), ): if sequence_id == 1: bbox.append(boxes[word_id]) elif input_id == self.tokenizer.sep_token_id: bbox.append([1000] * 4) else: bbox.append([0] * 4) if self.framework == "pt": span_encoding["bbox"] = torch.tensor(bbox).unsqueeze(0) elif self.framework == "tf": raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") yield { **span_encoding, "p_mask": p_mask[span_idx], "word_ids": encoding.word_ids(span_idx), "words": words, "is_last": span_idx == num_spans - 1, } def _forward(self, model_inputs, **generate_kwargs): p_mask = model_inputs.pop("p_mask", None) word_ids = model_inputs.pop("word_ids", None) words = model_inputs.pop("words", None) is_last = model_inputs.pop("is_last", False) if self.model_type == ModelType.VisionEncoderDecoder: # User-defined `generation_config` passed to the pipeline call take precedence if "generation_config" not in generate_kwargs: generate_kwargs["generation_config"] = self.generation_config model_outputs = self.model.generate(**model_inputs, **generate_kwargs) else: model_outputs = self.model(**model_inputs) model_outputs = dict(model_outputs.items()) model_outputs["p_mask"] = p_mask model_outputs["word_ids"] = word_ids model_outputs["words"] = words model_outputs["attention_mask"] = model_inputs.get("attention_mask", None) model_outputs["is_last"] = is_last return model_outputs def postprocess(self, model_outputs, top_k=1, **kwargs): if self.model_type == ModelType.VisionEncoderDecoder: answers = [self.postprocess_encoder_decoder_single(o) for o in model_outputs] else: answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs) answers = sorted(answers, key=lambda x: x.get("score", 0), reverse=True)[:top_k] return answers def postprocess_encoder_decoder_single(self, model_outputs, **kwargs): sequence = self.tokenizer.batch_decode(model_outputs["sequences"])[0] # TODO: A lot of this logic is specific to Donut and should probably be handled in the tokenizer # (see https://github.com/huggingface/transformers/pull/18414/files#r961747408 for more context). sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "") sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token ret = { "answer": None, } answer = re.search(r"<s_answer>(.*)</s_answer>", sequence) if answer is not None: ret["answer"] = answer.group(1).strip() return ret def postprocess_extractive_qa( self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, **kwargs ): min_null_score = 1000000 # large and positive answers = [] for output in model_outputs: words = output["words"] if self.framework == "pt" and output["start_logits"].dtype in (torch.bfloat16, torch.float16): output["start_logits"] = output["start_logits"].float() if self.framework == "pt" and output["end_logits"].dtype in (torch.bfloat16, torch.float16): output["end_logits"] = output["end_logits"].float() starts, ends, scores, min_null_score = select_starts_ends( start=output["start_logits"], end=output["end_logits"], p_mask=output["p_mask"], attention_mask=output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None, min_null_score=min_null_score, top_k=top_k, handle_impossible_answer=handle_impossible_answer, max_answer_len=max_answer_len, ) word_ids = output["word_ids"] for start, end, score in zip(starts, ends, scores): word_start, word_end = word_ids[start], word_ids[end] if word_start is not None and word_end is not None: answers.append( { "score": float(score), "answer": " ".join(words[word_start : word_end + 1]), "start": word_start, "end": word_end, } ) if handle_impossible_answer: answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0}) return answers
transformers/src/transformers/pipelines/document_question_answering.py/0
{ "file_path": "transformers/src/transformers/pipelines/document_question_answering.py", "repo_id": "transformers", "token_count": 10814 }
import enum import itertools import types from typing import Dict from ..utils import ModelOutput, add_end_docstrings, is_tf_available, is_torch_available from .base import Pipeline, build_pipeline_init_args if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from .pt_utils import KeyDataset if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES class ReturnType(enum.Enum): TENSORS = 0 NEW_TEXT = 1 FULL_TEXT = 2 class Chat: """This class is intended to just be used internally in this pipeline and not exposed to users. We convert chats to this format because the rest of the pipeline code tends to assume that lists of messages are actually a batch of samples rather than messages in the same conversation.""" def __init__(self, messages: Dict): for message in messages: if not ("role" in message and "content" in message): raise ValueError("When passing chat dicts as input, each dict must have a 'role' and 'content' key.") self.messages = messages @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) class TextGenerationPipeline(Pipeline): """ Language generation pipeline using any `ModelWithLMHead`. This pipeline predicts the words that will follow a specified text prompt. When the underlying model is a conversational model, it can also accept one or more chats, in which case the pipeline will operate in chat mode and will continue the chat(s) by adding its response(s). Each chat takes the form of a list of dicts, where each dict contains "role" and "content" keys. Examples: ```python >>> from transformers import pipeline >>> generator = pipeline(model="openai-community/gpt2") >>> generator("I can't believe you did such a ", do_sample=False) [{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}] >>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions. >>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False) ``` ```python >>> from transformers import pipeline >>> generator = pipeline(model="HuggingFaceH4/zephyr-7b-beta") >>> # Zephyr-beta is a conversational model, so let's pass it a chat instead of a single string >>> generator([{"role": "user", "content": "What is the capital of France? Answer in one word."}], do_sample=False, max_new_tokens=2) [{'generated_text': [{'role': 'user', 'content': 'What is the capital of France? Answer in one word.'}, {'role': 'assistant', 'content': 'Paris'}]}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about text generation parameters in [Text generation strategies](../generation_strategies) and [Text generation](text_generation). This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"text-generation"`. The models that this pipeline can use are models that have been trained with an autoregressive language modeling objective. See the list of available [text completion models](https://huggingface.co/models?filter=text-generation) and the list of [conversational models](https://huggingface.co/models?other=conversational) on [huggingface.co/models]. """ # Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia # in https://github.com/rusiaaman/XLNet-gen#methodology # and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e XL_PREFIX = """ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING_NAMES ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. prefix = None if self.prefix is not None: prefix = self.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. prefix = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. preprocess_params, forward_params, _ = self._sanitize_parameters(prefix=prefix, **self._forward_params) self._preprocess_params = {**self._preprocess_params, **preprocess_params} self._forward_params = {**self._forward_params, **forward_params} def _sanitize_parameters( self, return_full_text=None, return_tensors=None, return_text=None, return_type=None, clean_up_tokenization_spaces=None, prefix=None, handle_long_generation=None, stop_sequence=None, truncation=None, max_length=None, continue_final_message=None, **generate_kwargs, ): preprocess_params = {} add_special_tokens = False if "add_special_tokens" in generate_kwargs: add_special_tokens = preprocess_params["add_special_tokens"] = generate_kwargs.pop("add_special_tokens") if "padding" in generate_kwargs: preprocess_params["padding"] = generate_kwargs.pop("padding") if truncation is not None: preprocess_params["truncation"] = truncation if max_length is not None: preprocess_params["max_length"] = max_length generate_kwargs["max_length"] = max_length if prefix is not None: preprocess_params["prefix"] = prefix if prefix: prefix_inputs = self.tokenizer( prefix, padding=False, add_special_tokens=add_special_tokens, return_tensors=self.framework ) generate_kwargs["prefix_length"] = prefix_inputs["input_ids"].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" " [None, 'hole']" ) preprocess_params["handle_long_generation"] = handle_long_generation if continue_final_message is not None: preprocess_params["continue_final_message"] = continue_final_message preprocess_params.update(generate_kwargs) forward_params = generate_kwargs postprocess_params = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_full_text`") if return_tensors is not None: raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`") return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_tensors`") return_type = ReturnType.TENSORS if return_type is not None: postprocess_params["return_type"] = return_type if clean_up_tokenization_spaces is not None: postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces if continue_final_message is not None: postprocess_params["continue_final_message"] = continue_final_message if stop_sequence is not None: stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False) generate_kwargs["eos_token_id"] = stop_sequence_ids if self.assistant_model is not None: forward_params["assistant_model"] = self.assistant_model if self.assistant_tokenizer is not None: forward_params["tokenizer"] = self.tokenizer forward_params["assistant_tokenizer"] = self.assistant_tokenizer return preprocess_params, forward_params, postprocess_params # overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments def _parse_and_tokenize(self, *args, **kwargs): """ Parse arguments and tokenize """ # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"add_space_before_punct_symbol": True}) return super()._parse_and_tokenize(*args, **kwargs) def __call__(self, text_inputs, **kwargs): """ Complete the prompt(s) given as inputs. Args: text_inputs (`str`, `List[str]`, List[Dict[str, str]], or `List[List[Dict[str, str]]]`): One or several prompts (or one list of prompts) to complete. If strings or a list of string are passed, this pipeline will continue each prompt. Alternatively, a "chat", in the form of a list of dicts with "role" and "content" keys, can be passed, or a list of such chats. When chats are passed, the model's chat template will be used to format them before passing them to the model. return_tensors (`bool`, *optional*, defaults to `False`): Returns the tensors of predictions (as token indices) in the outputs. If set to `True`, the decoded text is not returned. return_text (`bool`, *optional*): Returns the decoded texts in the outputs. return_full_text (`bool`, *optional*, defaults to `True`): If set to `False` only added text is returned, otherwise the full text is returned. Cannot be specified at the same time as `return_text`. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to clean up the potential extra spaces in the text output. continue_final_message( `bool`, *optional*): This indicates that you want the model to continue the last message in the input chat rather than starting a new one, allowing you to "prefill" its response. By default this is `True` when the final message in the input chat has the `assistant` role and `False` otherwise, but you can manually override that behaviour by setting this flag. prefix (`str`, *optional*): Prefix added to prompt. handle_long_generation (`str`, *optional*): By default, this pipelines does not handle long generation (ones that exceed in one form or the other the model maximum length). There is no perfect way to adress this (more info :https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common strategies to work around that problem depending on your use case. - `None` : default strategy where nothing in particular happens - `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might truncate a lot of the prompt and not suitable when generation exceed the model capacity) generate_kwargs (`dict`, *optional*): Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./text_generation)). Return: A list or a list of lists of `dict`: Returns one of the following dictionaries (cannot return a combination of both `generated_text` and `generated_token_ids`): - **generated_text** (`str`, present when `return_text=True`) -- The generated text. - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the generated text. """ if isinstance( text_inputs, (list, tuple, types.GeneratorType, KeyDataset) if is_torch_available() else (list, tuple, types.GeneratorType), ): if isinstance(text_inputs, types.GeneratorType): text_inputs, _ = itertools.tee(text_inputs) text_inputs, first_item = (x for x in text_inputs), next(_) else: first_item = text_inputs[0] if isinstance(first_item, (list, tuple, dict)): # We have one or more prompts in list-of-dicts format, so this is chat mode if isinstance(first_item, dict): return super().__call__(Chat(text_inputs), **kwargs) else: chats = (Chat(chat) for chat in text_inputs) # 🐈 🐈 🐈 if isinstance(text_inputs, types.GeneratorType): return super().__call__(chats, **kwargs) else: return super().__call__(list(chats), **kwargs) return super().__call__(text_inputs, **kwargs) def preprocess( self, prompt_text, prefix="", handle_long_generation=None, add_special_tokens=None, truncation=None, padding=None, max_length=None, continue_final_message=None, **generate_kwargs, ): # Only set non-None tokenizer kwargs, so as to rely on the tokenizer's defaults tokenizer_kwargs = { "add_special_tokens": add_special_tokens, "truncation": truncation, "padding": padding, "max_length": max_length, } tokenizer_kwargs = {key: value for key, value in tokenizer_kwargs.items() if value is not None} if isinstance(prompt_text, Chat): tokenizer_kwargs.pop("add_special_tokens", None) # ignore add_special_tokens on chats # If the user passes a chat that ends in an assistant message, we treat it as a prefill by default # because very few models support multiple separate, consecutive assistant messages if continue_final_message is None: continue_final_message = prompt_text.messages[-1]["role"] == "assistant" inputs = self.tokenizer.apply_chat_template( prompt_text.messages, add_generation_prompt=not continue_final_message, continue_final_message=continue_final_message, return_dict=True, return_tensors=self.framework, **tokenizer_kwargs, ) else: inputs = self.tokenizer(prefix + prompt_text, return_tensors=self.framework, **tokenizer_kwargs) inputs["prompt_text"] = prompt_text if handle_long_generation == "hole": cur_len = inputs["input_ids"].shape[-1] if "max_new_tokens" in generate_kwargs: new_tokens = generate_kwargs["max_new_tokens"] else: new_tokens = generate_kwargs.get("max_length", self.generation_config.max_length) - cur_len if new_tokens < 0: raise ValueError("We cannot infer how many new tokens are expected") if cur_len + new_tokens > self.tokenizer.model_max_length: keep_length = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( "We cannot use `hole` to handle this generation the number of desired tokens exceeds the" " models max length" ) inputs["input_ids"] = inputs["input_ids"][:, -keep_length:] if "attention_mask" in inputs: inputs["attention_mask"] = inputs["attention_mask"][:, -keep_length:] return inputs def _forward(self, model_inputs, **generate_kwargs): input_ids = model_inputs["input_ids"] attention_mask = model_inputs.get("attention_mask", None) # Allow empty prompts if input_ids.shape[1] == 0: input_ids = None attention_mask = None in_b = 1 else: in_b = input_ids.shape[0] prompt_text = model_inputs.pop("prompt_text") # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. prefix_length = generate_kwargs.pop("prefix_length", 0) if prefix_length > 0: has_max_new_tokens = "max_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].max_new_tokens is not None ) if not has_max_new_tokens: generate_kwargs["max_length"] = generate_kwargs.get("max_length") or self.generation_config.max_length generate_kwargs["max_length"] += prefix_length has_min_new_tokens = "min_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # User-defined `generation_config` passed to the pipeline call take precedence if "generation_config" not in generate_kwargs: generate_kwargs["generation_config"] = self.generation_config output = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs) if isinstance(output, ModelOutput): generated_sequence = output.sequences other_outputs = {k: v for k, v in output.items() if k != "sequences"} out_b = generated_sequence.shape[0] if self.framework == "pt": for key, value in other_outputs.items(): if isinstance(value, torch.Tensor) and value.shape[0] == out_b: other_outputs[key] = value.reshape(in_b, out_b // in_b, *value.shape[1:]) if isinstance(value, tuple) and len(value[0]) == out_b: value = torch.stack(value).swapaxes(0, 1) other_outputs[key] = value elif self.framework == "tf": for key, value in other_outputs.items(): if isinstance(value, tf.Tensor) and value.shape[0] == out_b: other_outputs[key] = tf.reshape(value, (in_b, out_b // in_b, *value.shape[1:])) if isinstance(value, tuple) and len(value[0]) == out_b: value = tf.stack(value).swapaxes(0, 1) other_outputs[key] = value else: generated_sequence = output other_outputs = {} out_b = generated_sequence.shape[0] if self.framework == "pt": generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:]) elif self.framework == "tf": generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:])) model_outputs = { "generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text, } model_outputs.update(other_outputs) return model_outputs def postprocess( self, model_outputs, return_type=ReturnType.FULL_TEXT, clean_up_tokenization_spaces=True, continue_final_message=None, ): generated_sequence = model_outputs["generated_sequence"][0] input_ids = model_outputs["input_ids"] prompt_text = model_outputs["prompt_text"] generated_sequence = generated_sequence.numpy().tolist() records = [] other_outputs = model_outputs.get("additional_outputs", {}) splitted_keys = {} if other_outputs: if self.framework == "pt": for k, v in other_outputs.items(): if isinstance(v, torch.Tensor) and v.shape[0] == len(generated_sequence): splitted_keys[k] = v.numpy().tolist() elif self.framework == "tf": for k, v in other_outputs.items(): if isinstance(v, tf.Tensor) and v.shape[0] == len(generated_sequence): splitted_keys[k] = v.numpy().tolist() for idx, sequence in enumerate(generated_sequence): if return_type == ReturnType.TENSORS: record = {"generated_token_ids": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text text = self.tokenizer.decode( sequence, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: prompt_length = 0 else: prompt_length = len( self.tokenizer.decode( input_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) ) all_text = text[prompt_length:] if return_type == ReturnType.FULL_TEXT: if isinstance(prompt_text, str): all_text = prompt_text + all_text elif isinstance(prompt_text, Chat): if continue_final_message is None: # If the user passes a chat ending in an assistant message, we treat it as a prefill by # default because very few models support multiple separate, consecutive assistant messages continue_final_message = prompt_text.messages[-1]["role"] == "assistant" if continue_final_message: # With assistant prefill, concat onto the end of the last message all_text = list(prompt_text.messages)[:-1] + [ { "role": prompt_text.messages[-1]["role"], "content": prompt_text.messages[-1]["content"] + all_text, } ] else: # When we're not starting from a prefill, the output is a new assistant message all_text = list(prompt_text.messages) + [{"role": "assistant", "content": all_text}] record = {"generated_text": all_text} for key, values in splitted_keys.items(): record[key] = values[idx] records.append(record) return records
transformers/src/transformers/pipelines/text_generation.py/0
{ "file_path": "transformers/src/transformers/pipelines/text_generation.py", "repo_id": "transformers", "token_count": 10861 }
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, Dict, List, Union from .base import HfQuantizer if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from ..utils import is_accelerate_available, is_torch_available, logging if is_torch_available(): import torch logger = logging.get_logger(__name__) class BitNetHfQuantizer(HfQuantizer): """ 1.58-bit quantization from BitNet quantization method: Before loading: it converts the linear layers into BitLinear layers during loading. Checkout the paper introducing this method : https://arxiv.org/pdf/2402.17764 """ requires_parameters_quantization = False requires_calibration = True required_packages = ["accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not is_accelerate_available(): raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)") if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): raise ValueError( "Loading ternary weights from tf/flax is currently not supported, please make" " sure the weights are in PyTorch format." ) if not torch.cuda.is_available(): logger.warning_once( "You don't have a GPU available to load the model, the inference will be slow because of weight unpacking" ) return device_map = kwargs.get("device_map", None) if device_map is None: logger.warning_once( "You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set " "your model on a GPU device in order to run your model." ) elif device_map is not None: if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()): raise ValueError( "You are attempting to load a BitNet model with a device_map that contains a CPU or disk device." "This is not supported. Please remove the CPU or disk device from the device_map." ) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): return model def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from ..integrations import get_keys_to_not_convert, replace_with_bitnet_linear self.modules_to_not_convert = get_keys_to_not_convert(model) if self.quantization_config.modules_to_not_convert is not None: self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert) model = replace_with_bitnet_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config, pre_quantized=self.pre_quantized, ) def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": target_dtype = torch.int8 return target_dtype def is_serializable(self, safe_serialization=None): return True @property def is_trainable(self) -> bool: return False
transformers/src/transformers/quantizers/quantizer_bitnet.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_bitnet.py", "repo_id": "transformers", "token_count": 1669 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging logger = logging.get_logger(__name__) # TODO: should be moved to `utils` after refactoring of SageMakerTrainer def is_sagemaker_model_parallel_available(): # Get the sagemaker specific mp parameters from smp_options variable. smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}") try: # Parse it and check the field "partitions" is included, it is required for model parallel. smp_options = json.loads(smp_options) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". mpi_options = json.loads(mpi_options) if not mpi_options.get("sagemaker_mpi_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed") is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class SageMakerTrainingArguments(TrainingArguments): mp_parameters: str = field( default="", metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"}, ) def __post_init__(self): super().__post_init__() warnings.warn( "`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use " "`TrainingArguments` instead.", FutureWarning, ) @cached_property def _setup_devices(self) -> "torch.device": logger.info("PyTorch: setting up devices") if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( "torch.distributed process group is initialized, but local_rank == -1. " "In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" ) if self.no_cuda: device = torch.device("cpu") self._n_gpu = 0 elif is_sagemaker_model_parallel_available(): local_rank = smp.local_rank() device = torch.device("cuda", local_rank) self._n_gpu = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="smddp", timeout=self.ddp_timeout_delta) self.local_rank = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK")) device = torch.device("cuda", self.local_rank) self._n_gpu = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. self._n_gpu = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="nccl", timeout=self.ddp_timeout_delta) device = torch.device("cuda", self.local_rank) self._n_gpu = 1 if device.type == "cuda": torch.cuda.set_device(device) return device @property def world_size(self): if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def place_model_on_device(self): return not is_sagemaker_model_parallel_available() @property def _no_sync_in_gradient_accumulation(self): return False
transformers/src/transformers/sagemaker/training_args_sm.py/0
{ "file_path": "transformers/src/transformers/sagemaker/training_args_sm.py", "repo_id": "transformers", "token_count": 2130 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Collection of utils to be used by backbones and their components.""" import enum import inspect from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union if TYPE_CHECKING: from ..configuration_utils import PretrainedConfig class BackboneType(enum.Enum): TIMM = "timm" TRANSFORMERS = "transformers" def verify_out_features_out_indices( out_features: Optional[Iterable[str]], out_indices: Optional[Iterable[int]], stage_names: Optional[Iterable[str]] ): """ Verify that out_indices and out_features are valid for the given stage_names. """ if stage_names is None: raise ValueError("Stage_names must be set for transformers backbones") if out_features is not None: if not isinstance(out_features, (list,)): raise ValueError(f"out_features must be a list got {type(out_features)}") if any(feat not in stage_names for feat in out_features): raise ValueError(f"out_features must be a subset of stage_names: {stage_names} got {out_features}") if len(out_features) != len(set(out_features)): raise ValueError(f"out_features must not contain any duplicates, got {out_features}") if out_features != (sorted_feats := [feat for feat in stage_names if feat in out_features]): raise ValueError( f"out_features must be in the same order as stage_names, expected {sorted_feats} got {out_features}" ) if out_indices is not None: if not isinstance(out_indices, list): raise ValueError(f"out_indices must be a list, got {type(out_indices)}") # Convert negative indices to their positive equivalent: [-1,] -> [len(stage_names) - 1,] positive_indices = tuple(idx % len(stage_names) if idx < 0 else idx for idx in out_indices) if any(idx for idx in positive_indices if idx not in range(len(stage_names))): raise ValueError(f"out_indices must be valid indices for stage_names {stage_names}, got {out_indices}") if len(positive_indices) != len(set(positive_indices)): msg = f"out_indices must not contain any duplicates, got {out_indices}" msg += f"(equivalent to {positive_indices}))" if positive_indices != out_indices else "" raise ValueError(msg) if positive_indices != tuple(sorted(positive_indices)): sorted_negative = [idx for _, idx in sorted(zip(positive_indices, out_indices), key=lambda x: x[0])] raise ValueError( f"out_indices must be in the same order as stage_names, expected {sorted_negative} got {out_indices}" ) if out_features is not None and out_indices is not None: if len(out_features) != len(out_indices): raise ValueError("out_features and out_indices should have the same length if both are set") if out_features != [stage_names[idx] for idx in out_indices]: raise ValueError("out_features and out_indices should correspond to the same stages if both are set") def _align_output_features_output_indices( out_features: Optional[List[str]], out_indices: Optional[Union[List[int], Tuple[int]]], stage_names: List[str], ): """ Finds the corresponding `out_features` and `out_indices` for the given `stage_names`. The logic is as follows: - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the `out_indices`. - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the `out_features`. - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage. - `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned. Args: out_features (`List[str]`): The names of the features for the backbone to output. out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output. stage_names (`List[str]`): The names of the stages of the backbone. """ if out_indices is None and out_features is None: out_indices = [len(stage_names) - 1] out_features = [stage_names[-1]] elif out_indices is None and out_features is not None: out_indices = [stage_names.index(layer) for layer in out_features] elif out_features is None and out_indices is not None: out_features = [stage_names[idx] for idx in out_indices] return out_features, out_indices def get_aligned_output_features_output_indices( out_features: Optional[List[str]], out_indices: Optional[Union[List[int], Tuple[int]]], stage_names: List[str], ) -> Tuple[List[str], List[int]]: """ Get the `out_features` and `out_indices` so that they are aligned. The logic is as follows: - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the `out_indices`. - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the `out_features`. - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage. - `out_indices` and `out_features` set: they are verified to be aligned. Args: out_features (`List[str]`): The names of the features for the backbone to output. out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output. stage_names (`List[str]`): The names of the stages of the backbone. """ out_indices = list(out_indices) if out_indices is not None else None # First verify that the out_features and out_indices are valid verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names) output_features, output_indices = _align_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=stage_names ) # Verify that the aligned out_features and out_indices are valid verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names) return output_features, output_indices class BackboneMixin: backbone_type: Optional[BackboneType] = None def _init_timm_backbone(self, config) -> None: """ Initialize the backbone model from timm The backbone must already be loaded to self._backbone """ if getattr(self, "_backbone", None) is None: raise ValueError("self._backbone must be set before calling _init_timm_backbone") # These will diagree with the defaults for the transformers models e.g. for resnet50 # the transformer model has out_features = ['stem', 'stage1', 'stage2', 'stage3', 'stage4'] # the timm model has out_features = ['act', 'layer1', 'layer2', 'layer3', 'layer4'] self.stage_names = [stage["module"] for stage in self._backbone.feature_info.info] self.num_features = [stage["num_chs"] for stage in self._backbone.feature_info.info] # In some timm versions, out_indices reflects the input type of out_indices on the `create_model` call, # in later versions >= 1, it is always a tuple out_indices = list(self._backbone.feature_info.out_indices) out_features = self._backbone.feature_info.module_name() # We verify the out indices and out features are valid verify_out_features_out_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) self._out_features, self._out_indices = out_features, out_indices def _init_transformers_backbone(self, config) -> None: stage_names = getattr(config, "stage_names") out_features = getattr(config, "out_features", None) out_indices = getattr(config, "out_indices", None) self.stage_names = stage_names self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=stage_names ) # Number of channels for each stage. This is set in the transformer backbone model init self.num_features = None def _init_backbone(self, config) -> None: """ Method to initialize the backbone. This method is called by the constructor of the base class after the pretrained model weights have been loaded. """ self.config = config self.use_timm_backbone = getattr(config, "use_timm_backbone", False) self.backbone_type = BackboneType.TIMM if self.use_timm_backbone else BackboneType.TRANSFORMERS if self.backbone_type == BackboneType.TIMM: self._init_timm_backbone(config) elif self.backbone_type == BackboneType.TRANSFORMERS: self._init_transformers_backbone(config) else: raise ValueError(f"backbone_type {self.backbone_type} not supported.") @property def out_features(self): return self._out_features @out_features.setter def out_features(self, out_features: List[str]): """ Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. """ self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=None, stage_names=self.stage_names ) @property def out_indices(self): return self._out_indices @out_indices.setter def out_indices(self, out_indices: Union[Tuple[int], List[int]]): """ Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. """ self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=None, out_indices=out_indices, stage_names=self.stage_names ) @property def out_feature_channels(self): # the current backbones will output the number of channels for each stage # even if that stage is not in the out_features list. return {stage: self.num_features[i] for i, stage in enumerate(self.stage_names)} @property def channels(self): return [self.out_feature_channels[name] for name in self.out_features] def forward_with_filtered_kwargs(self, *args, **kwargs): signature = dict(inspect.signature(self.forward).parameters) filtered_kwargs = {k: v for k, v in kwargs.items() if k in signature} return self(*args, **filtered_kwargs) def forward( self, pixel_values, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ): raise NotImplementedError("This method should be implemented by the derived class.") def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to include the `out_features` and `out_indices` attributes. """ output = super().to_dict() output["out_features"] = output.pop("_out_features") output["out_indices"] = output.pop("_out_indices") return output class BackboneConfigMixin: """ A Mixin to support handling the `out_features` and `out_indices` attributes for the backbone configurations. """ @property def out_features(self): return self._out_features @out_features.setter def out_features(self, out_features: List[str]): """ Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. """ self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=None, stage_names=self.stage_names ) @property def out_indices(self): return self._out_indices @out_indices.setter def out_indices(self, out_indices: Union[Tuple[int], List[int]]): """ Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. """ self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=None, out_indices=out_indices, stage_names=self.stage_names ) def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to include the `out_features` and `out_indices` attributes. """ output = super().to_dict() output["out_features"] = output.pop("_out_features") output["out_indices"] = output.pop("_out_indices") return output def load_backbone(config): """ Loads the backbone model from a config object. If the config is from the backbone model itself, then we return a backbone model with randomly initialized weights. If the config is from the parent model of the backbone model itself, then we load the pretrained backbone weights if specified. """ from transformers import AutoBackbone, AutoConfig backbone_config = getattr(config, "backbone_config", None) use_timm_backbone = getattr(config, "use_timm_backbone", None) use_pretrained_backbone = getattr(config, "use_pretrained_backbone", None) backbone_checkpoint = getattr(config, "backbone", None) backbone_kwargs = getattr(config, "backbone_kwargs", None) backbone_kwargs = {} if backbone_kwargs is None else backbone_kwargs if backbone_kwargs and backbone_config is not None: raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") # If there is a backbone_config and a backbone checkpoint, and use_pretrained_backbone=False then the desired # behaviour is ill-defined: do you want to load from the checkpoint's config or the backbone_config? if backbone_config is not None and backbone_checkpoint is not None and use_pretrained_backbone is not None: raise ValueError("Cannot specify both config.backbone_config and config.backbone") # If any of thhe following are set, then the config passed in is from a model which contains a backbone. if ( backbone_config is None and use_timm_backbone is None and backbone_checkpoint is None and backbone_checkpoint is None ): return AutoBackbone.from_config(config=config, **backbone_kwargs) # config from the parent model that has a backbone if use_timm_backbone: if backbone_checkpoint is None: raise ValueError("config.backbone must be set if use_timm_backbone is True") # Because of how timm backbones were originally added to models, we need to pass in use_pretrained_backbone # to determine whether to load the pretrained weights. backbone = AutoBackbone.from_pretrained( backbone_checkpoint, use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, **backbone_kwargs, ) elif use_pretrained_backbone: if backbone_checkpoint is None: raise ValueError("config.backbone must be set if use_pretrained_backbone is True") backbone = AutoBackbone.from_pretrained(backbone_checkpoint, **backbone_kwargs) else: if backbone_config is None and backbone_checkpoint is None: raise ValueError("Either config.backbone_config or config.backbone must be set") if backbone_config is None: backbone_config = AutoConfig.from_pretrained(backbone_checkpoint, **backbone_kwargs) backbone = AutoBackbone.from_config(config=backbone_config) return backbone def verify_backbone_config_arguments( use_timm_backbone: bool, use_pretrained_backbone: bool, backbone: Optional[str], backbone_config: Optional[Union[dict, "PretrainedConfig"]], backbone_kwargs: Optional[dict], ): """ Verify that the config arguments to be passed to load_backbone are valid """ if backbone_config is not None and backbone is not None: raise ValueError("You can't specify both `backbone` and `backbone_config`.") if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.") if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None: raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
transformers/src/transformers/utils/backbone_utils.py/0
{ "file_path": "transformers/src/transformers/utils/backbone_utils.py", "repo_id": "transformers", "token_count": 6473 }
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class TFForcedBOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFForceTokensLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGenerationMixin(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLogitsProcessorList(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLogitsWarper(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMinLengthLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFNoBadWordsLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFNoRepeatNGramLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRepetitionPenaltyLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSuppressTokensLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTemperatureLogitsWarper(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTopKLogitsWarper(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTopPLogitsWarper(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class KerasMetricCallback(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class PushToHubCallback(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSequenceSummary(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSharedEmbeddings(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) def shape_list(*args, **kwargs): requires_backends(shape_list, ["tf"]) class TFAlbertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None TF_MODEL_FOR_CAUSAL_LM_MAPPING = None TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None TF_MODEL_FOR_MASK_GENERATION_MAPPING = None TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = None TF_MODEL_FOR_MASKED_LM_MAPPING = None TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None TF_MODEL_FOR_PRETRAINING_MAPPING = None TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None TF_MODEL_FOR_TEXT_ENCODING_MAPPING = None TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None TF_MODEL_FOR_VISION_2_SEQ_MAPPING = None TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = None TF_MODEL_MAPPING = None TF_MODEL_WITH_LM_HEAD_MAPPING = None class TFAutoModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForAudioClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForDocumentQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForMaskedImageModeling(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForMaskGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForNextSentencePrediction(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForSemanticSegmentation(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForSpeechSeq2Seq(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForTableQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForTextEncoding(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForVision2Seq(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForZeroShotImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelWithLMHead(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBartForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBartForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBartModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBartPretrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotSmallForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotSmallModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotSmallPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipForImageTextRetrieval(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipTextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipVisionModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCLIPModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCLIPTextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCLIPVisionModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextV2ForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextV2Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextV2PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCTRLForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCTRLLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCTRLModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCTRLPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCvtForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCvtModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCvtPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFData2VecVisionForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFData2VecVisionForSemanticSegmentation(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFData2VecVisionModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFData2VecVisionPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTForMaskedImageModeling(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEfficientFormerForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEfficientFormerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEfficientFormerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAdaptiveEmbedding(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRContextEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRPretrainedContextEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRPretrainedQuestionEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRPretrainedReader(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRQuestionEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRReader(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEncoderDecoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertWithLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelBaseModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2DoubleHeadsModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2ForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2LMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2MainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGroupViTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGroupViTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGroupViTTextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGroupViTVisionModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFHubertForCTC(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFHubertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFHubertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFIdeficsForVisionText2Text(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFIdeficsModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFIdeficsPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3ForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3ForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3ForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLEDForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLEDModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLEDPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertVisualFeatureEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMarianModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMarianMTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMarianPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMBartForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMBartModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMBartPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMistralForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMistralForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMistralModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMistralPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileViTForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileViTForSemanticSegmentation(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileViTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileViTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMT5EncoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMT5Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTDoubleHeadsModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOPTForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOPTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOPTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFPegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFPegasusModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFPegasusPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRagModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRagPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRagSequenceForGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRagTokenForGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRegNetForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRegNetModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRegNetPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFResNetForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFResNetModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFResNetPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSamModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSamPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerDecodeHead(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerForSemanticSegmentation(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSpeech2TextForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSpeech2TextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSpeech2TextPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwiftFormerForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwiftFormerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwiftFormerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwinForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwinForMaskedImageModeling(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwinModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwinPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFT5EncoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFT5Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFT5PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFVisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFVisionTextDualEncoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTMAEForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTMAEModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTMAEPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWav2Vec2ForCTC(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWav2Vec2ForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWav2Vec2Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWav2Vec2PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWhisperModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWhisperPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXGLMForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXGLMModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXGLMPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMWithLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class AdamWeightDecay(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class GradientAccumulator(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class WarmUp(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) def create_optimizer(*args, **kwargs): requires_backends(create_optimizer, ["tf"])
transformers/src/transformers/utils/dummy_tf_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_tf_objects.py", "repo_id": "transformers", "token_count": 27487 }
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: sentencepiece_model.proto # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name="sentencepiece_model.proto", package="sentencepiece", syntax="proto2", serialized_options=b"H\003", create_key=_descriptor._internal_create_key, serialized_pb=( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\xa1\n\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01' b" \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02" b" \x01(\t\x12\x41\n\nmodel_type\x18\x03" b" \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04" b" \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12" b' \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n' b" \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b" b" \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12" b' \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r' b" \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e" b" \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f" b" \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12" b" \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10" b" \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11" b" \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14" b" \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15" b" \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17" b" \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16" b" \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18" b" \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19" b" \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e" b" \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$" b" \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18" b' \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18"' b" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18)" b" \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+" b" \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18." b" \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30" b" \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87" b" \x12+\n\x1ctrain_extremely_large_corpus\x18\x31" b' \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01' b" \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03" b" \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12" b" \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06" b' \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01' b' \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01' b" \x01(\t\x12\x10\n\x08\x65xpected\x18\x02" b' \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01' b" \x03(\x0b\x32'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02" b" \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03" b" \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04" b" \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05" b" \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01" b" \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03" b' \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ), ) _TRAINERSPEC_MODELTYPE = _descriptor.EnumDescriptor( name="ModelType", full_name="sentencepiece.TrainerSpec.ModelType", filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="UNIGRAM", index=0, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="BPE", index=1, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="WORD", index=2, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="CHAR", index=3, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, serialized_start=1294, serialized_end=1347, ) _sym_db.RegisterEnumDescriptor(_TRAINERSPEC_MODELTYPE) _MODELPROTO_SENTENCEPIECE_TYPE = _descriptor.EnumDescriptor( name="Type", full_name="sentencepiece.ModelProto.SentencePiece.Type", filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="NORMAL", index=0, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="UNKNOWN", index=1, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="CONTROL", index=2, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="USER_DEFINED", index=3, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="BYTE", index=4, number=6, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="UNUSED", index=5, number=5, serialized_options=None, type=None, create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, serialized_start=2100, serialized_end=2184, ) _sym_db.RegisterEnumDescriptor(_MODELPROTO_SENTENCEPIECE_TYPE) _TRAINERSPEC = _descriptor.Descriptor( name="TrainerSpec", full_name="sentencepiece.TrainerSpec", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input", full_name="sentencepiece.TrainerSpec.input", index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_format", full_name="sentencepiece.TrainerSpec.input_format", index=1, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model_prefix", full_name="sentencepiece.TrainerSpec.model_prefix", index=2, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model_type", full_name="sentencepiece.TrainerSpec.model_type", index=3, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="vocab_size", full_name="sentencepiece.TrainerSpec.vocab_size", index=4, number=4, type=5, cpp_type=1, label=1, has_default_value=True, default_value=8000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="accept_language", full_name="sentencepiece.TrainerSpec.accept_language", index=5, number=5, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="self_test_sample_size", full_name="sentencepiece.TrainerSpec.self_test_sample_size", index=6, number=6, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="character_coverage", full_name="sentencepiece.TrainerSpec.character_coverage", index=7, number=10, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.9995), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_sentence_size", full_name="sentencepiece.TrainerSpec.input_sentence_size", index=8, number=11, type=4, cpp_type=4, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shuffle_input_sentence", full_name="sentencepiece.TrainerSpec.shuffle_input_sentence", index=9, number=19, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mining_sentence_size", full_name="sentencepiece.TrainerSpec.mining_sentence_size", index=10, number=12, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b"\030\001", file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="training_sentence_size", full_name="sentencepiece.TrainerSpec.training_sentence_size", index=11, number=13, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b"\030\001", file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="seed_sentencepiece_size", full_name="sentencepiece.TrainerSpec.seed_sentencepiece_size", index=12, number=14, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1000000, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="shrinking_factor", full_name="sentencepiece.TrainerSpec.shrinking_factor", index=13, number=15, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.75), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="max_sentence_length", full_name="sentencepiece.TrainerSpec.max_sentence_length", index=14, number=18, type=5, cpp_type=1, label=1, has_default_value=True, default_value=4192, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="num_threads", full_name="sentencepiece.TrainerSpec.num_threads", index=15, number=16, type=5, cpp_type=1, label=1, has_default_value=True, default_value=16, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="num_sub_iterations", full_name="sentencepiece.TrainerSpec.num_sub_iterations", index=16, number=17, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="max_sentencepiece_length", full_name="sentencepiece.TrainerSpec.max_sentencepiece_length", index=17, number=20, type=5, cpp_type=1, label=1, has_default_value=True, default_value=16, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="split_by_unicode_script", full_name="sentencepiece.TrainerSpec.split_by_unicode_script", index=18, number=21, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="split_by_number", full_name="sentencepiece.TrainerSpec.split_by_number", index=19, number=23, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="split_by_whitespace", full_name="sentencepiece.TrainerSpec.split_by_whitespace", index=20, number=22, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="treat_whitespace_as_suffix", full_name="sentencepiece.TrainerSpec.treat_whitespace_as_suffix", index=21, number=24, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="split_digits", full_name="sentencepiece.TrainerSpec.split_digits", index=22, number=25, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="control_symbols", full_name="sentencepiece.TrainerSpec.control_symbols", index=23, number=30, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="user_defined_symbols", full_name="sentencepiece.TrainerSpec.user_defined_symbols", index=24, number=31, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="required_chars", full_name="sentencepiece.TrainerSpec.required_chars", index=25, number=36, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="byte_fallback", full_name="sentencepiece.TrainerSpec.byte_fallback", index=26, number=35, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="vocabulary_output_piece_score", full_name="sentencepiece.TrainerSpec.vocabulary_output_piece_score", index=27, number=32, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="hard_vocab_limit", full_name="sentencepiece.TrainerSpec.hard_vocab_limit", index=28, number=33, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="use_all_vocab", full_name="sentencepiece.TrainerSpec.use_all_vocab", index=29, number=34, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="unk_id", full_name="sentencepiece.TrainerSpec.unk_id", index=30, number=40, type=5, cpp_type=1, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bos_id", full_name="sentencepiece.TrainerSpec.bos_id", index=31, number=41, type=5, cpp_type=1, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="eos_id", full_name="sentencepiece.TrainerSpec.eos_id", index=32, number=42, type=5, cpp_type=1, label=1, has_default_value=True, default_value=2, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="pad_id", full_name="sentencepiece.TrainerSpec.pad_id", index=33, number=43, type=5, cpp_type=1, label=1, has_default_value=True, default_value=-1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="unk_piece", full_name="sentencepiece.TrainerSpec.unk_piece", index=34, number=45, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"<unk>".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bos_piece", full_name="sentencepiece.TrainerSpec.bos_piece", index=35, number=46, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"<s>".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="eos_piece", full_name="sentencepiece.TrainerSpec.eos_piece", index=36, number=47, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"</s>".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="pad_piece", full_name="sentencepiece.TrainerSpec.pad_piece", index=37, number=48, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b"<pad>".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="unk_surface", full_name="sentencepiece.TrainerSpec.unk_surface", index=38, number=44, type=9, cpp_type=9, label=1, has_default_value=True, default_value=b" \342\201\207 ".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_extremely_large_corpus", full_name="sentencepiece.TrainerSpec.train_extremely_large_corpus", index=39, number=49, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[ _TRAINERSPEC_MODELTYPE, ], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=45, serialized_end=1358, ) _NORMALIZERSPEC = _descriptor.Descriptor( name="NormalizerSpec", full_name="sentencepiece.NormalizerSpec", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", full_name="sentencepiece.NormalizerSpec.name", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="precompiled_charsmap", full_name="sentencepiece.NormalizerSpec.precompiled_charsmap", index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="add_dummy_prefix", full_name="sentencepiece.NormalizerSpec.add_dummy_prefix", index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="remove_extra_whitespaces", full_name="sentencepiece.NormalizerSpec.remove_extra_whitespaces", index=3, number=4, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="escape_whitespaces", full_name="sentencepiece.NormalizerSpec.escape_whitespaces", index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="normalization_rule_tsv", full_name="sentencepiece.NormalizerSpec.normalization_rule_tsv", index=5, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=1361, serialized_end=1570, ) _SELFTESTDATA_SAMPLE = _descriptor.Descriptor( name="Sample", full_name="sentencepiece.SelfTestData.Sample", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input", full_name="sentencepiece.SelfTestData.Sample.input", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="expected", full_name="sentencepiece.SelfTestData.Sample.expected", index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto2", extension_ranges=[], oneofs=[], serialized_start=1641, serialized_end=1682, ) _SELFTESTDATA = _descriptor.Descriptor( name="SelfTestData", full_name="sentencepiece.SelfTestData", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="samples", full_name="sentencepiece.SelfTestData.samples", index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[ _SELFTESTDATA_SAMPLE, ], enum_types=[], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=1572, serialized_end=1693, ) _MODELPROTO_SENTENCEPIECE = _descriptor.Descriptor( name="SentencePiece", full_name="sentencepiece.ModelProto.SentencePiece", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="piece", full_name="sentencepiece.ModelProto.SentencePiece.piece", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="score", full_name="sentencepiece.ModelProto.SentencePiece.score", index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="type", full_name="sentencepiece.ModelProto.SentencePiece.type", index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[ _MODELPROTO_SENTENCEPIECE_TYPE, ], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=1985, serialized_end=2195, ) _MODELPROTO = _descriptor.Descriptor( name="ModelProto", full_name="sentencepiece.ModelProto", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="pieces", full_name="sentencepiece.ModelProto.pieces", index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="trainer_spec", full_name="sentencepiece.ModelProto.trainer_spec", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="normalizer_spec", full_name="sentencepiece.ModelProto.normalizer_spec", index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="self_test_data", full_name="sentencepiece.ModelProto.self_test_data", index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="denormalizer_spec", full_name="sentencepiece.ModelProto.denormalizer_spec", index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[ _MODELPROTO_SENTENCEPIECE, ], enum_types=[], serialized_options=None, is_extendable=True, syntax="proto2", extension_ranges=[ (200, 536870912), ], oneofs=[], serialized_start=1696, serialized_end=2206, ) _TRAINERSPEC.fields_by_name["model_type"].enum_type = _TRAINERSPEC_MODELTYPE _TRAINERSPEC_MODELTYPE.containing_type = _TRAINERSPEC _SELFTESTDATA_SAMPLE.containing_type = _SELFTESTDATA _SELFTESTDATA.fields_by_name["samples"].message_type = _SELFTESTDATA_SAMPLE _MODELPROTO_SENTENCEPIECE.fields_by_name["type"].enum_type = _MODELPROTO_SENTENCEPIECE_TYPE _MODELPROTO_SENTENCEPIECE.containing_type = _MODELPROTO _MODELPROTO_SENTENCEPIECE_TYPE.containing_type = _MODELPROTO_SENTENCEPIECE _MODELPROTO.fields_by_name["pieces"].message_type = _MODELPROTO_SENTENCEPIECE _MODELPROTO.fields_by_name["trainer_spec"].message_type = _TRAINERSPEC _MODELPROTO.fields_by_name["normalizer_spec"].message_type = _NORMALIZERSPEC _MODELPROTO.fields_by_name["self_test_data"].message_type = _SELFTESTDATA _MODELPROTO.fields_by_name["denormalizer_spec"].message_type = _NORMALIZERSPEC DESCRIPTOR.message_types_by_name["TrainerSpec"] = _TRAINERSPEC DESCRIPTOR.message_types_by_name["NormalizerSpec"] = _NORMALIZERSPEC DESCRIPTOR.message_types_by_name["SelfTestData"] = _SELFTESTDATA DESCRIPTOR.message_types_by_name["ModelProto"] = _MODELPROTO _sym_db.RegisterFileDescriptor(DESCRIPTOR) TrainerSpec = _reflection.GeneratedProtocolMessageType( "TrainerSpec", (_message.Message,), { "DESCRIPTOR": _TRAINERSPEC, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.TrainerSpec) }, ) _sym_db.RegisterMessage(TrainerSpec) NormalizerSpec = _reflection.GeneratedProtocolMessageType( "NormalizerSpec", (_message.Message,), { "DESCRIPTOR": _NORMALIZERSPEC, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.NormalizerSpec) }, ) _sym_db.RegisterMessage(NormalizerSpec) SelfTestData = _reflection.GeneratedProtocolMessageType( "SelfTestData", (_message.Message,), { "Sample": _reflection.GeneratedProtocolMessageType( "Sample", (_message.Message,), { "DESCRIPTOR": _SELFTESTDATA_SAMPLE, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData.Sample) }, ), "DESCRIPTOR": _SELFTESTDATA, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData) }, ) _sym_db.RegisterMessage(SelfTestData) _sym_db.RegisterMessage(SelfTestData.Sample) ModelProto = _reflection.GeneratedProtocolMessageType( "ModelProto", (_message.Message,), { "SentencePiece": _reflection.GeneratedProtocolMessageType( "SentencePiece", (_message.Message,), { "DESCRIPTOR": _MODELPROTO_SENTENCEPIECE, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.ModelProto.SentencePiece) }, ), "DESCRIPTOR": _MODELPROTO, "__module__": "sentencepiece_model_pb2", # @@protoc_insertion_point(class_scope:sentencepiece.ModelProto) }, ) _sym_db.RegisterMessage(ModelProto) _sym_db.RegisterMessage(ModelProto.SentencePiece) DESCRIPTOR._options = None _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # @@protoc_insertion_point(module_scope)
transformers/src/transformers/utils/sentencepiece_model_pb2.py/0
{ "file_path": "transformers/src/transformers/utils/sentencepiece_model_pb2.py", "repo_id": "transformers", "token_count": 28261 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import uuid import pytest from transformers.agents.agent_types import AgentText from transformers.agents.agents import ( AgentMaxIterationsError, CodeAgent, ManagedAgent, ReactCodeAgent, ReactJsonAgent, Toolbox, ) from transformers.agents.default_tools import PythonInterpreterTool from transformers.testing_utils import require_torch def get_new_path(suffix="") -> str: directory = tempfile.mkdtemp() return os.path.join(directory, str(uuid.uuid4()) + suffix) def fake_react_json_llm(messages, stop_sequences=None, grammar=None) -> str: prompt = str(messages) if "special_marker" not in prompt: return """ Thought: I should multiply 2 by 3.6452. special_marker Action: { "action": "python_interpreter", "action_input": {"code": "2*3.6452"} } """ else: # We're at step 2 return """ Thought: I can now answer the initial question Action: { "action": "final_answer", "action_input": {"answer": "7.2904"} } """ def fake_react_code_llm(messages, stop_sequences=None, grammar=None) -> str: prompt = str(messages) if "special_marker" not in prompt: return """ Thought: I should multiply 2 by 3.6452. special_marker Code: ```py result = 2**3.6452 ```<end_code> """ else: # We're at step 2 return """ Thought: I can now answer the initial question Code: ```py final_answer(7.2904) ```<end_code> """ def fake_react_code_llm_error(messages, stop_sequences=None) -> str: prompt = str(messages) if "special_marker" not in prompt: return """ Thought: I should multiply 2 by 3.6452. special_marker Code: ```py print = 2 ```<end_code> """ else: # We're at step 2 return """ Thought: I can now answer the initial question Code: ```py final_answer("got an error") ```<end_code> """ def fake_react_code_functiondef(messages, stop_sequences=None) -> str: prompt = str(messages) if "special_marker" not in prompt: return """ Thought: Let's define the function. special_marker Code: ```py import numpy as np def moving_average(x, w): return np.convolve(x, np.ones(w), 'valid') / w ```<end_code> """ else: # We're at step 2 return """ Thought: I can now answer the initial question Code: ```py x, w = [0, 1, 2, 3, 4, 5], 2 res = moving_average(x, w) final_answer(res) ```<end_code> """ def fake_code_llm_oneshot(messages, stop_sequences=None, grammar=None) -> str: return """ Thought: I should multiply 2 by 3.6452. special_marker Code: ```py result = python_interpreter(code="2*3.6452") final_answer(result) ``` """ def fake_code_llm_no_return(messages, stop_sequences=None, grammar=None) -> str: return """ Thought: I should multiply 2 by 3.6452. special_marker Code: ```py result = python_interpreter(code="2*3.6452") print(result) ``` """ class AgentTests(unittest.TestCase): def test_fake_code_agent(self): agent = CodeAgent(tools=[PythonInterpreterTool()], llm_engine=fake_code_llm_oneshot) output = agent.run("What is 2 multiplied by 3.6452?") assert isinstance(output, str) assert output == "7.2904" def test_fake_react_json_agent(self): agent = ReactJsonAgent(tools=[PythonInterpreterTool()], llm_engine=fake_react_json_llm) output = agent.run("What is 2 multiplied by 3.6452?") assert isinstance(output, str) assert output == "7.2904" assert agent.logs[0]["task"] == "What is 2 multiplied by 3.6452?" assert agent.logs[1]["observation"] == "7.2904" assert agent.logs[1]["rationale"].strip() == "Thought: I should multiply 2 by 3.6452. special_marker" assert ( agent.logs[2]["llm_output"] == """ Thought: I can now answer the initial question Action: { "action": "final_answer", "action_input": {"answer": "7.2904"} } """ ) def test_fake_react_code_agent(self): agent = ReactCodeAgent(tools=[PythonInterpreterTool()], llm_engine=fake_react_code_llm) output = agent.run("What is 2 multiplied by 3.6452?") assert isinstance(output, float) assert output == 7.2904 assert agent.logs[0]["task"] == "What is 2 multiplied by 3.6452?" assert agent.logs[2]["tool_call"] == { "tool_arguments": "final_answer(7.2904)", "tool_name": "code interpreter", } def test_react_code_agent_code_errors_show_offending_lines(self): agent = ReactCodeAgent(tools=[PythonInterpreterTool()], llm_engine=fake_react_code_llm_error) output = agent.run("What is 2 multiplied by 3.6452?") assert isinstance(output, AgentText) assert output == "got an error" assert "Evaluation stopped at line 'print = 2' because of" in str(agent.logs) def test_setup_agent_with_empty_toolbox(self): ReactJsonAgent(llm_engine=fake_react_json_llm, tools=[]) def test_react_fails_max_iterations(self): agent = ReactCodeAgent( tools=[PythonInterpreterTool()], llm_engine=fake_code_llm_no_return, # use this callable because it never ends max_iterations=5, ) agent.run("What is 2 multiplied by 3.6452?") assert len(agent.logs) == 7 assert type(agent.logs[-1]["error"]) is AgentMaxIterationsError @require_torch def test_init_agent_with_different_toolsets(self): toolset_1 = [] agent = ReactCodeAgent(tools=toolset_1, llm_engine=fake_react_code_llm) assert ( len(agent.toolbox.tools) == 1 ) # when no tools are provided, only the final_answer tool is added by default toolset_2 = [PythonInterpreterTool(), PythonInterpreterTool()] agent = ReactCodeAgent(tools=toolset_2, llm_engine=fake_react_code_llm) assert ( len(agent.toolbox.tools) == 2 ) # deduplication of tools, so only one python_interpreter tool is added in addition to final_answer toolset_3 = Toolbox(toolset_2) agent = ReactCodeAgent(tools=toolset_3, llm_engine=fake_react_code_llm) assert ( len(agent.toolbox.tools) == 2 ) # same as previous one, where toolset_3 is an instantiation of previous one # check that add_base_tools will not interfere with existing tools with pytest.raises(KeyError) as e: agent = ReactJsonAgent(tools=toolset_3, llm_engine=fake_react_json_llm, add_base_tools=True) assert "already exists in the toolbox" in str(e) # check that python_interpreter base tool does not get added to code agents agent = ReactCodeAgent(tools=[], llm_engine=fake_react_code_llm, add_base_tools=True) assert len(agent.toolbox.tools) == 7 # added final_answer tool + 6 base tools (excluding interpreter) def test_function_persistence_across_steps(self): agent = ReactCodeAgent( tools=[], llm_engine=fake_react_code_functiondef, max_iterations=2, additional_authorized_imports=["numpy"] ) res = agent.run("ok") assert res[0] == 0.5 def test_init_managed_agent(self): agent = ReactCodeAgent(tools=[], llm_engine=fake_react_code_functiondef) managed_agent = ManagedAgent(agent, name="managed_agent", description="Empty") assert managed_agent.name == "managed_agent" assert managed_agent.description == "Empty" def test_agent_description_gets_correctly_inserted_in_system_prompt(self): agent = ReactCodeAgent(tools=[], llm_engine=fake_react_code_functiondef) managed_agent = ManagedAgent(agent, name="managed_agent", description="Empty") manager_agent = ReactCodeAgent( tools=[], llm_engine=fake_react_code_functiondef, managed_agents=[managed_agent] ) assert "You can also give requests to team members." not in agent.system_prompt assert "<<managed_agents_descriptions>>" not in agent.system_prompt assert "You can also give requests to team members." in manager_agent.system_prompt
transformers/tests/agents/test_agents.py/0
{ "file_path": "transformers/tests/agents/test_agents.py", "repo_id": "transformers", "token_count": 3397 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import subprocess from os.path import dirname from parameterized import parameterized from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa from transformers import is_torch_available from transformers.testing_utils import ( TestCasePlus, backend_device_count, execute_subprocess_async, get_tests_dir, require_deepspeed, require_torch_accelerator, slow, torch_device, ) from transformers.trainer_utils import set_seed if is_torch_available(): from tests.trainer.test_trainer import ( # noqa RegressionModelConfig, RegressionPreTrainedModel, get_regression_trainer, ) set_seed(42) FIXTURE_DIRECTORY = get_tests_dir("fixtures") ROOT_DIRECTORY = os.path.join(dirname(get_tests_dir())) DS_TESTS_DIRECTORY = dirname(os.path.abspath(__file__)) # default torch.distributed port DEFAULT_MASTER_PORT = "10999" T5_SMALL = "google-t5/t5-small" # *** Working Models *** ALBERT_TINY = "hf-internal-testing/tiny-albert" BART_TINY = "sshleifer/bart-tiny-random" BERT_TINY = "hf-internal-testing/tiny-bert" BIGBIRD_PEGASUS_TINY = "hf-internal-testing/tiny-random-bigbird_pegasus" BIG_BIRD_TINY = "hf-internal-testing/tiny-random-big_bird" BLENDERBOT_TINY = "hf-internal-testing/tiny-random-blenderbot" BLOOM_TINY = "bigscience/bigscience-small-testing" DEBERTA_TINY = "hf-internal-testing/tiny-random-deberta" DEBERTA_V2_TINY = "hf-internal-testing/tiny-random-deberta-v2" DISTILBERT_TINY = "sshleifer/tiny-distilbert-base-cased" ELECTRA_TINY = "hf-internal-testing/tiny-electra" FLAUBERT_TINY = "hf-internal-testing/tiny-random-flaubert" FSMT_TINY = "stas/tiny-wmt19-en-de" FUNNEL_TINY = "hf-internal-testing/tiny-random-funnel" GPT2_TINY = "sshleifer/tiny-gpt2" GPTJ_TINY = "hf-internal-testing/tiny-random-gptj" GPT_NEO_TINY = "hf-internal-testing/tiny-random-gpt_neo" LAYOUTLM_TINY = "hf-internal-testing/tiny-layoutlm" LED_TINY = "hf-internal-testing/tiny-random-led" LONGFORMER_TINY = "hf-internal-testing/tiny-random-longformer" M2M_100_TINY = "stas/tiny-m2m_100" # hf tiny model is unsuitable MARIAN_TINY = "sshleifer/tiny-marian-en-de" MBART_TINY = "sshleifer/tiny-mbart" MOBILEBERT_TINY = "hf-internal-testing/tiny-random-mobilebert" MPNET_TINY = "hf-internal-testing/tiny-random-mpnet" PEGASUS_TINY = "stas/pegasus-cnn_dailymail-tiny-random" PROPHETNET_TINY = "hf-internal-testing/tiny-random-prophetnet" ROBERTA_TINY = "sshleifer/tiny-distilroberta-base" SQUEEZEBERT_TINY = "hf-internal-testing/tiny-random-squeezebert" T5_TINY = "patrickvonplaten/t5-tiny-random" T5_V1_TINY = "hf-internal-testing/tiny-random-t5-v1.1" VIT_TINY = "hf-internal-testing/tiny-random-vit" XLM_ROBERTA_TINY = "hf-internal-testing/tiny-xlm-roberta" XLNET_TINY = "sshleifer/tiny-xlnet-base-cased" # *** To Fix *** # *** tiny model issues *** # missing model files: MT5_TINY = "hf-internal-testing/tiny-random-mt5" CAMEMBERT_TINY = "hf-internal-testing/tiny-random-camembert" OPENAI_GPT_TINY = "hf-internal-testing/tiny-random-openai-gpt" # missing tokenizer files CONVBERT_TINY = "hf-internal-testing/tiny-random-convbert" LAYOUTLMV2_TINY = "hf-internal-testing/tiny-random-layoutlmv2" HUBERT_TINY = "hf-internal-testing/tiny-random-hubert" # issues with tokenizer CTRL_TINY = "hf-internal-testing/tiny-random-ctrl" TRANSFO_XL_TINY = "hf-internal-testing/tiny-random-transfo-xl" # same as Salesforce/ctrl # other issues with tiny models IBERT_TINY = "hf-internal-testing/tiny-random-ibert" # multiple issues with either mlm/qa/clas REFORMER_TINY = "hf-internal-testing/tiny-random-reformer" # multiple issues with either mlm/qa/clas # *** Lacking official examples to test with *** # or not working with examples DPR_TINY = "hf-internal-testing/tiny-random-dpr" # - "dpr" examples/research_projects/rag-end2end-retriever/ RAG_TINY = "hf-internal-testing/tiny-random-rag" # - "rag" research_projects LUKE_TINY = "" # - "luke" Entities classes - no plan to make such example LXMERT_TINY = "hf-internal-testing/tiny-random-lxmert" # - "lxmert" doesn't work with run_qa.py CLIP_TINY = "hf-internal-testing/tiny-random-clip" # - "clip" nothing under pytorch examples - XXX: Suraj is working on adding some - check by end of Sep SPEECH_TO_TEXT_TINY = "hf-internal-testing/tiny-random-speech_to_text" # - "speech_to_text", nothing under pytorch examples # *** Reactive mode *** # models with low usage, unstable API, things about to change - do nothing about the following until someone runs into a problem TAPAS_TINY = "hf-internal-testing/tiny-random-tapas" # additional notes on tapas # 1. "Table must be of type pd.DataFrame" failure # TODO: new models to add: # def get_launcher(distributed=False): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1 master_port = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) return f"deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}".split() def make_task_cmds(): data_dir_samples = f"{FIXTURE_DIRECTORY}/tests_samples" data_dir_wmt = f"{data_dir_samples}/wmt_en_ro" data_dir_xsum = f"{data_dir_samples}/xsum" args_main = """ --do_train --max_train_samples 4 --per_device_train_batch_size 2 --num_train_epochs 1 --fp16 --report_to none --overwrite_output_dir """.split() # try to cover as many models as possible once (it's enough to run on one task per model) # but need a tiny model for each # # should have "{model_type.upper()}_TINY" corresponding vars defined, e.g., T5_TINY, etc. tasks2models = { "trans": [ "bart", "fsmt", "m2m_100", "marian", "mbart", "t5", "t5_v1", # "mt5", missing model files ], "sum": [ "pegasus", ], "clm": [ "big_bird", "bigbird_pegasus", "blenderbot", "bloom", "gpt2", "gpt_neo", "gptj", "xlm-roberta", "prophetnet", # "camembert", missing model files ], "mlm": [ "albert", "deberta", "deberta-v2", "distilbert", "electra", "flaubert", "funnel", "layoutlm", # "reformer", # multiple issues with either mlm/qa/clas ], "qa": [ "led", "longformer", "mobilebert", "mpnet", "roberta", "squeezebert", # "convbert", # missing tokenizer files # "layoutlmv2", missing model files ], "clas": [ "bert", "xlnet", # "hubert", # missing tokenizer files # "ibert", # multiple issues with either mlm/qa/clas # "transfo-xl", # tokenizer issues as Salesforce/ctrl # "Salesforce/ctrl", # tokenizer issues # "openai-community/openai-gpt", missing model files # "tapas", multiple issues ], "img_clas": [ "vit", ], } scripts_dir = f"{ROOT_DIRECTORY}/examples/pytorch" tasks = { "trans": f""" {scripts_dir}/translation/run_translation.py --train_file {data_dir_wmt}/train.json --source_lang en --target_lang ro --max_source_length 12 --max_target_length 12 """, "sum": f""" {scripts_dir}/summarization/run_summarization.py --train_file {data_dir_xsum}/sample.json --max_source_length 12 --max_target_length 12 --lang en """, "clm": f""" {scripts_dir}/language-modeling/run_clm.py --train_file {FIXTURE_DIRECTORY}/sample_text.txt --block_size 8 """, "mlm": f""" {scripts_dir}/language-modeling/run_mlm.py --train_file {FIXTURE_DIRECTORY}/sample_text.txt """, "qa": f""" {scripts_dir}/question-answering/run_qa.py --train_file {data_dir_samples}/SQUAD/sample.json """, "clas": f""" {scripts_dir}/text-classification/run_glue.py --train_file {data_dir_samples}/MRPC/train.csv --max_seq_length 12 --task_name MRPC """, "img_clas": f""" {scripts_dir}/image-classification/run_image_classification.py --dataset_name hf-internal-testing/cats_vs_dogs_sample --trust_remote_code --remove_unused_columns False --max_steps 10 --image_processor_name {DS_TESTS_DIRECTORY}/vit_feature_extractor.json --label_column_name labels """, } launcher = get_launcher(distributed=True) cmds = {} for task, args in tasks.items(): args = args.split() for model in tasks2models[task]: model_name = globals()[f"{model.upper().replace('-', '_')}_TINY"] args_model = f"--model_name_or_path {model_name}".split() cmds[f"{task}_{model}"] = launcher + args + args_model + args_main # # generation special case # if task == "gen": # launcher = f"deepspeed --num_nodes 1 --num_gpus 1".split() # args_model += f"--model_type {model}".split() # cmds[f"{task}_{model}"] = launcher + args + args_model # else: return cmds task_cmds = make_task_cmds() ZERO2 = "zero2" ZERO3 = "zero3" stages = [ZERO2, ZERO3] # future preparation: # for now test just fp16, as these tests are quite slow # FP16 = "fp16" # BF16 = "bf16" # # dtypes = [FP16] # so just hardcoding --fp16 for now # if is_torch_bf16_gpu_available(): # dtypes += [BF16] def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" # Cartesian-product of zero stages with models to test params = list(itertools.product(stages, task_cmds.keys())) @slow @require_deepspeed @require_torch_accelerator class TestDeepSpeedModelZoo(TestCasePlus): """This class is for testing via an external script - can do multiple gpus""" def get_task_cmd(self, task, stage): # return a ready to run train cmd if task not in task_cmds: raise ValueError(f"don't know of task {task}, have {task_cmds.keys()}") cmd = task_cmds[task] args_ds = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split() output_dir = self.get_auto_remove_tmp_dir() args_out = f"--output_dir {output_dir}".split() cmd += args_ds + args_out return cmd, output_dir @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_zero_to_fp32(self, stage, task): # testing the ability to do a run followed by recovery of full fp32 weights cmd, output_dir = self.get_task_cmd(task, stage) # 1. generate the checkpoint cmd += "--save_steps 1".split() # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] + cmd)); die execute_subprocess_async(cmd, env=self.get_env()) # 2. test that the fp32 weights get reconsolidated chkpt_dir = f"{output_dir}/checkpoint-1" recovered_model_path = f"{chkpt_dir}/out.bin" cmd = f"{chkpt_dir}/zero_to_fp32.py {chkpt_dir} {recovered_model_path}" # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die subprocess.check_call(cmd, shell=True) assert os.path.exists(recovered_model_path), f"{recovered_model_path} was not found" # possibly could also test that the resulting saved model is usable but given that we use # random models we won't know if it's any good
transformers/tests/deepspeed/test_model_zoo.py/0
{ "file_path": "transformers/tests/deepspeed/test_model_zoo.py", "repo_id": "transformers", "token_count": 5773 }
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import floats_tensor, ids_tensor if is_torch_available(): import torch from transformers.generation import ( BeamHypotheses, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, PhrasalConstraint, ) class BeamSearchTester: def __init__( self, parent, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_beam_scorer(self, **kwargs): return BeamSearchScorer( batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer beam_scorer = self.prepare_beam_scorer(do_early_stopping=True) beam_hyp = beam_scorer._beam_hyps[0] self.parent.assertEqual(len(beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init beam_scorer = self.prepare_beam_scorer(do_early_stopping=False) beam_hyp = beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_beam_scorer_update(self, input_ids, next_tokens, next_indices, next_scores): # check too many eos tokens beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): beam_scorer.process(input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id) # check all batches are done beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) # beam scorer should be done self.parent.assertTrue(beam_scorer.is_done) # check beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx offset = torch.div( torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams, rounding_mode="floor" ) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer expected_beam_indices = list(range(10)) for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) self.parent.assertListEqual( expected_beam_indices + [correct_idx], torch.tensor(beam_scorer._beam_hyps[batch_idx].beams[0][2]).tolist(), ) def check_beam_scores_finalize(self, input_ids, next_tokens, next_indices, next_scores): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 beam_scorer = self.prepare_beam_scorer(num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False) # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned beam_scorer.num_beam_hyps_to_keep = self.num_beams sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) class ConstrainedBeamSearchTester: def __init__( self, parent, constraints=None, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep if constraints is None: force_tokens = torch.randint(10, 50, (1, 2))[0].tolist() disjunctive_tokens = torch.randint(10, 50, (2, 2)).tolist() constraints = [PhrasalConstraint(force_tokens), DisjunctiveConstraint(disjunctive_tokens)] self.constraints = constraints # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_constrained_beam_scorer(self, **kwargs): return ConstrainedBeamSearchScorer( constraints=kwargs.get("constraints", self.constraints), batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) scores_for_all_vocab, _ = ( -floats_tensor((self.batch_size * self.num_beams, self.vocab_size)).to(torch_device) ).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=True) beam_hyp = constrained_beam_scorer._beam_hyps[0] self.parent.assertEqual(len(constrained_beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=False) beam_hyp = constrained_beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_constrained_beam_scorer_update( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # check too many eos tokens constrained_beam_scorer = self.prepare_constrained_beam_scorer() stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # check all batches are done constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # beam scorer should be done self.parent.assertTrue(constrained_beam_scorer.is_done) # check constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx offset = torch.div( torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams, rounding_mode="floor" ) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), constrained_beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) def check_constrained_beam_scorer_finalize( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 # for testing finalize, we do want to have fulfilled constraints stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False ) constraints = constrained_beam_scorer.constraints # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # test that the constraint is indeed fulfilled for output, constraint in [(s, c) for s in sequences for c in constraints]: forced_token_ids = constraint.token_ids if isinstance(forced_token_ids[0], list): # disjunctive case flag = False for token_ids in forced_token_ids: if self._check_sequence_inside_sequence(output, token_ids): flag = True break self.parent.assertEqual(flag, True) else: self.parent.assertEqual(self._check_sequence_inside_sequence(output, forced_token_ids), True) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned # constrained_beam_scorer.num_beam_hyps_to_keep = self.num_beams constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=self.num_beams, length_penalty=1.0, do_early_stopping=False ) sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break return flag @require_torch class BeamSearchTest(unittest.TestCase): def setUp(self): self.beam_search_tester = BeamSearchTester(self) def test_beam_hypotheses(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_hypotheses(*inputs) def test_beam_scorer_update(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scorer_update(*inputs) def test_beam_scorer_finalize(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scores_finalize(*inputs) @require_torch class ConstrainedBeamSearchTest(unittest.TestCase): def setUp(self): self.constrained_beam_search_tester = ConstrainedBeamSearchTester(self) def test_constrained_beam_hypotheses(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_beam_hypotheses(*inputs) def test_constrained_beam_scorer_update(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_update(*inputs) def test_constrained_beam_scorer_finalize(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_finalize(*inputs)
transformers/tests/generation/test_beam_search.py/0
{ "file_path": "transformers/tests/generation/test_beam_search.py", "repo_id": "transformers", "token_count": 11152 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class BarkProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "suno/bark-small" self.tmpdirname = tempfile.mkdtemp() self.voice_preset = "en_speaker_1" self.input_string = "This is a test string" self.speaker_embeddings_dict_path = "speaker_embeddings_path.json" self.speaker_embeddings_directory = "speaker_embeddings" def get_tokenizer(self, **kwargs): return AutoTokenizer.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() processor = BarkProcessor(tokenizer=tokenizer) processor.save_pretrained(self.tmpdirname) processor = BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) @slow def test_save_load_pretrained_additional_features(self): processor = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, ) processor.save_pretrained( self.tmpdirname, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, speaker_embeddings_directory=self.speaker_embeddings_directory, ) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") processor = BarkProcessor.from_pretrained( self.tmpdirname, self.speaker_embeddings_dict_path, bos_token="(BOS)", eos_token="(EOS)", ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) def test_speaker_embeddings(self): processor = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, ) seq_len = 35 nb_codebooks_coarse = 2 nb_codebooks_total = 8 voice_preset = { "semantic_prompt": np.ones(seq_len), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)), "fine_prompt": np.ones((nb_codebooks_total, seq_len)), } # test providing already loaded voice_preset inputs = processor(text=self.input_string, voice_preset=voice_preset) processed_voice_preset = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(key, np.array([])).tolist()) # test loading voice preset from npz file tmpfilename = os.path.join(self.tmpdirname, "file.npz") np.savez(tmpfilename, **voice_preset) inputs = processor(text=self.input_string, voice_preset=tmpfilename) processed_voice_preset = inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(key, np.array([])).tolist()) # test loading voice preset from the hub inputs = processor(text=self.input_string, voice_preset=self.voice_preset) def test_tokenizer(self): tokenizer = self.get_tokenizer() processor = BarkProcessor(tokenizer=tokenizer) encoded_processor = processor(text=self.input_string) encoded_tok = tokenizer( self.input_string, padding="max_length", max_length=256, add_special_tokens=False, return_attention_mask=True, return_token_type_ids=False, ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key].squeeze().tolist())
transformers/tests/models/bark/test_processor_bark.py/0
{ "file_path": "transformers/tests/models/bark/test_processor_bark.py", "repo_id": "transformers", "token_count": 1928 }
# coding=utf-8 # Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch BridgeTower model.""" import tempfile import unittest import numpy as np from transformers import ( BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, is_torch_available, is_vision_available, ) from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, ) if is_vision_available(): from PIL import Image from transformers import BridgeTowerProcessor class BridgeTowerTextModelTester: def __init__( self, parent, hidden_act="gelu", hidden_size=64, initializer_factor=1, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=2, intermediate_size=128, tie_word_embeddings=False, output_hidden_states=False, ): self.parent = parent self.hidden_act = hidden_act self.hidden_size = hidden_size self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.tie_word_embeddings = tie_word_embeddings self.vocab_size = 99 self.seq_length = 4 self.batch_size = 1 self.is_training = False self.output_hidden_states = output_hidden_states def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_ids, attention_mask def get_config(self): return BridgeTowerTextConfig( hidden_act=self.hidden_act, hidden_size=self.hidden_size, initializer_factor=self.initializer_factor, layer_norm_eps=self.layer_norm_eps, num_attention_heads=self.num_attention_heads, num_hidden_layers=self.num_hidden_layers, intermediate_size=self.intermediate_size, tie_word_embeddings=self.tie_word_embeddings, output_hidden_states=self.output_hidden_states, vocab_size=self.vocab_size, ) class BridgeTowerImageModelTester: def __init__( self, parent, hidden_size=64, initializer_factor=1, layer_norm_eps=1e-05, num_hidden_layers=2, init_layernorm_from_vision_encoder=False, output_hidden_states=False, image_size=64, ): self.parent = parent self.hidden_size = hidden_size self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.num_hidden_layers = num_hidden_layers self.init_layernorm_from_vision_encoder = init_layernorm_from_vision_encoder self.num_channels = 3 self.num_image_features = 17 self.batch_size = 1 self.image_size = image_size self.is_training = False self.output_hidden_states = output_hidden_states def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) pixel_mask = random_attention_mask([self.batch_size, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values, pixel_mask def get_config(self): return BridgeTowerVisionConfig( hidden_size=self.hidden_size, initializer_factor=self.initializer_factor, layer_norm_eps=self.layer_norm_eps, num_hidden_layers=self.num_hidden_layers, init_layernorm_from_vision_encoder=self.init_layernorm_from_vision_encoder, num_channels=self.num_channels, num_image_features=self.num_image_features, batch_size=self.batch_size, image_size=self.image_size, is_training=self.is_training, output_hidden_states=self.output_hidden_states, ) class BridgeTowerModelTester: def __init__( self, parent, text_kwargs=None, vision_kwargs=None, share_cross_modal_transformer_layers=True, share_link_tower_layers=False, link_tower_type="add", init_layernorm_from_vision_encoder=False, contrastive_hidden_size=512, logit_scale_init_value=2.6592, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=128, ): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = BridgeTowerTextModelTester(parent, **text_kwargs) self.vision_model_tester = BridgeTowerImageModelTester(parent, **vision_kwargs) self.share_cross_modal_transformer_layers = share_cross_modal_transformer_layers self.share_link_tower_layers = share_link_tower_layers self.link_tower_type = link_tower_type self.init_layernorm_from_vision_encoder = init_layernorm_from_vision_encoder self.contrastive_hidden_size = contrastive_hidden_size self.logit_scale_init_value = logit_scale_init_value self.batch_size = 1 self.expected_num_hidden_layers = 8 self.is_training = False self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values, pixel_mask = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return (config, input_ids, attention_mask, pixel_values, pixel_mask) def get_config(self): return BridgeTowerConfig.from_text_vision_configs( text_config=self.text_model_tester.get_config(), vision_config=self.vision_model_tester.get_config(), share_cross_modal_transformer_layers=self.share_cross_modal_transformer_layers, share_link_tower_layers=self.share_link_tower_layers, link_tower_type=self.link_tower_type, init_layernorm_from_vision_encoder=self.init_layernorm_from_vision_encoder, contrastive_hidden_size=self.contrastive_hidden_size, logit_scale_init_value=self.logit_scale_init_value, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, ) def create_and_check_model( self, config, input_ids, attention_mask, pixel_values, pixel_mask, ): model = BridgeTowerModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values) self.parent.assertEqual( result["text_features"].shape, (self.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size), ) self.parent.assertEqual( result["image_features"].shape, (self.batch_size, self.vision_model_tester.num_image_features, self.vision_model_tester.hidden_size), ) self.parent.assertEqual( result["pooler_output"].shape, (self.batch_size, self.text_model_tester.hidden_size + self.vision_model_tester.hidden_size), ) def create_and_check_for_image_and_text_retrieval( self, config, input_ids, attention_mask, pixel_values, pixel_mask, ): bridgetower_itm_output_last_dimension = 2 model = BridgeTowerForImageAndTextRetrieval(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, bridgetower_itm_output_last_dimension)) def create_and_check_for_masked_language_modeling( self, config, input_ids, attention_mask, pixel_values, pixel_mask, ): model = BridgeTowerForMaskedLM(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(input_ids, attention_mask=attention_mask, pixel_values=pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.text_model_tester.seq_length, self.text_model_tester.vocab_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, attention_mask, pixel_values, pixel_mask) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "pixel_mask": pixel_mask, } return config, inputs_dict @require_torch class BridgeTowerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( BridgeTowerModel, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerForContrastiveLearning, ) if is_torch_available() else () ) pipeline_model_mapping = {"feature-extraction": BridgeTowerModel} if is_torch_available() else {} is_training = False test_headmasking = False test_pruning = False test_torchscript = False test_resize_embeddings = False has_attentions = False @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_cpu_offload(self): pass @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload(self): pass @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_model_parallelism(self): pass # function to extract meaningful tensor from output per different model_class def extract_output(self, outputs, model_class): return outputs["pooler_output"] if model_class == "BridgeTowerModel" else outputs["logits"] def setUp(self): self.model_tester = BridgeTowerModelTester(self) self.config_tester = ConfigTester(self, config_class=BridgeTowerConfig, hidden_size=37, vocab_size=99) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_and_text_retrieval(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_and_text_retrieval(*config_and_inputs) def test_for_masked_language_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_language_modeling(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "BridgeTower/bridgetower-base" model = BridgeTowerModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_save_load_fast_init_from_base(self): # Override as it is a slow test on this model super().test_save_load_fast_init_from_base() # Override as extracting meaningful tensor from output is different for BridgeTower def test_save_load(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**input_dict) out_2 = self.extract_output(outputs, model_class.__name__) out_2 = out_2.cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) with torch.no_grad(): after_outputs = model(**input_dict) # Make sure we don't have nans out_1 = self.extract_output(after_outputs, model_class.__name__) out_1 = out_1.cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) # Override this as `hidden states output` is different for BridgeTower def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states_text, hidden_states_vision, hidden_states_cross = ( outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states ) expected_num_layers = self.model_tester.expected_num_hidden_layers self.assertEqual( sum((len(hidden_states_text), len(hidden_states_vision), len(hidden_states_cross))), expected_num_layers, ) seq_length = self.model_tester.text_model_tester.seq_length num_image_features = self.model_tester.vision_model_tester.num_image_features self.assertListEqual( list(hidden_states_text[0].shape[-2:]), [seq_length, self.model_tester.text_model_tester.hidden_size], ) self.assertListEqual( list(hidden_states_vision[0].shape), [num_image_features, 1, self.model_tester.vision_model_tester.hidden_size], ) self.assertListEqual( list(hidden_states_cross[0][0].shape[-2:]), [seq_length, self.model_tester.text_model_tester.hidden_size], ) self.assertListEqual( list(hidden_states_cross[0][1].shape[-2:]), [num_image_features, self.model_tester.vision_model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # Override as `hidden states output` is different for BridgeTower def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0][0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0][0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) # override as the `logit_scale` parameter initilization is different for BRIDGE TOWER def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale": self.assertAlmostEqual( param.data.item(), config.logit_scale_init_value, delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="""Bridge Tower does not have input/output embeddings. So this test is not applicable.""") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="""Bridge Tower does not have input/output embeddings. Thus this test is not applicable.""") def test_inputs_embeds(self): pass @unittest.skip(reason="Bridge Tower does not use inputs_embeds") def test_inputs_embeds_matches_input_ids(self): pass # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class BridgeTowerModelIntegrationTest(unittest.TestCase): @cached_property def default_processor(self): return ( BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") if is_vision_available() else None ) @slow def test_image_and_text_retrieval(self): model = BridgeTowerForImageAndTextRetrieval.from_pretrained("BridgeTower/bridgetower-base-itm-mlm").to( torch_device ) model.eval() processor = self.default_processor image = prepare_img() text = "a bunch of cats laying on a tower." inputs = processor(image, text, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 2]) self.assertEqual(outputs.logits.shape, expected_shape) self.assertTrue(outputs.logits[0, 1].item() > outputs.logits[0, 0].item()) # verify loss inputs["labels"] = torch.ones(1, dtype=torch.long, device=torch_device) inputs = inputs.to(torch_device) with torch.no_grad(): outputs = model(**inputs) self.assertAlmostEqual(outputs.loss.item(), 0.5108, places=4) @slow def test_masked_language_modeling(self): model = BridgeTowerForMaskedLM.from_pretrained("BridgeTower/bridgetower-base-itm-mlm").to(torch_device) model.eval() processor = self.default_processor image = prepare_img() text = "a bunch of <mask> laying on a tower." inputs = processor(image, text, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 11, 50265]) self.assertEqual(outputs.logits.shape, expected_shape) # verify predicted word predicted_id = outputs.logits.argmax(dim=-1).squeeze(0).tolist()[4] self.assertTrue(processor.decode([predicted_id]) == " cats") # verify loss inputs["labels"] = inputs["input_ids"].clone() inputs = inputs.to(torch_device) with torch.no_grad(): outputs = model(**inputs) self.assertAlmostEqual(outputs.loss.item(), 5.7373, places=4) @slow def test_constrastive_learning(self): model = BridgeTowerForContrastiveLearning.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc").to( torch_device ) model.eval() processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc") image = prepare_img() text = "a bunch of cats laying on a tower." inputs = processor(image, text, padding=True, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs, output_hidden_states=True, return_loss=True) # verify the logits expected_shape = torch.Size([1, 3, 512]) self.assertEqual(outputs.logits.shape, expected_shape) @slow @require_torch class BridgeTowerModelTrainingTest(unittest.TestCase): all_training_supported_model_classes = ( (BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerForContrastiveLearning) if is_torch_available() else () ) def setUp(self): self.model_tester = BridgeTowerModelTester(self) self.config_tester = ConfigTester(self, config_class=BridgeTowerConfig, hidden_size=37, vocab_size=99) def _prepare_inputs_for_training(self, model_class): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if model_class == BridgeTowerForMaskedLM: inputs_dict["labels"] = inputs_dict["input_ids"] elif model_class == BridgeTowerForImageAndTextRetrieval: inputs_dict["labels"] = ids_tensor([1], 2) elif model_class == BridgeTowerForContrastiveLearning: inputs_dict["return_loss"] = True return config, inputs_dict def _get_non_used_layer_names(self, model_class): non_used_layer_names = ["text_model.pooler"] if model_class == BridgeTowerForMaskedLM: non_used_layer_names = non_used_layer_names + [ # This number `1` actually depends on the number of layers in `cross_modal_image_layers` (by minus 1) "cross_modal_image_layers.1", "cross_modal_image_pooler", "cross_modal_text_pooler", ] return non_used_layer_names def _is_layer_used(self, model_class, layer_name): non_used_layer_names = self._get_non_used_layer_names(model_class) for non_used_layer_name in non_used_layer_names: if non_used_layer_name in layer_name: return False return True def test_training(self): for model_class in self.all_training_supported_model_classes: config, inputs_dict = self._prepare_inputs_for_training(model_class) model = model_class(config) model.to(torch_device) model.train() loss = model(**inputs_dict).loss loss.backward() # verify the gradients of used layers' weight are not None for name, param in model.named_parameters(): if self._is_layer_used(model_class, name): self.assertIsNotNone(param.grad, f"Gradients should not be None - got {param.grad} for {name}") @slow def test_inference_interpolate_pos_encoding(self): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. model_name = "BridgeTower/bridgetower-base" model = BridgeTowerModel.from_pretrained(model_name).to(torch_device) image_processor = BridgeTowerProcessor.from_pretrained(model_name, size={"shortest_edge": 180}) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(text="what's in the image", images=image, return_tensors="pt").to(torch_device) # interpolate_pos_encodiung false should return value error with self.assertRaises(ValueError, msg="doesn't match model"): with torch.no_grad(): model(**inputs, interpolate_pos_encoding=False) # forward pass with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=True) # verify the logits expected_shape = torch.Size((1, 122, 768)) self.assertEqual(outputs.image_features.shape, expected_shape) expected_slice = torch.tensor( [[-0.6518, 0.4978, -0.4544], [-2.6672, -0.0843, -0.4210], [-2.4510, -0.1002, -0.3458]] ).to(torch_device) torch.testing.assert_close(outputs.image_features[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/bridgetower/test_modeling_bridgetower.py/0
{ "file_path": "transformers/tests/models/bridgetower/test_modeling_bridgetower.py", "repo_id": "transformers", "token_count": 11946 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch chameleon model.""" import tempfile import unittest from transformers import ChameleonProcessor, LlamaTokenizer from transformers.testing_utils import get_tests_dir from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import ChameleonImageProcessor SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") class ChameleonProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = ChameleonProcessor def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = ChameleonImageProcessor() tokenizer = LlamaTokenizer(vocab_file=SAMPLE_VOCAB) tokenizer.pad_token_id = 0 tokenizer.sep_token_id = 1 processor = self.processor_class(image_processor=image_processor, tokenizer=tokenizer) processor.save_pretrained(self.tmpdirname)
transformers/tests/models/chameleon/test_processor_chameleon.py/0
{ "file_path": "transformers/tests/models/chameleon/test_processor_chameleon.py", "repo_id": "transformers", "token_count": 499 }
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the FlauBERT tokenizer.""" import json import os import unittest from transformers import FlaubertTokenizer from transformers.models.flaubert.tokenization_flaubert import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class FlaubertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "flaubert/flaubert_base_cased" tokenizer_class = FlaubertTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "i</w>", "lo", "low", "ne", "new", "er</w>", "low</w>", "lowest</w>", "new</w>", "newer</w>", "wider</w>", "<unk>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["n e 300", "ne w 301", "e r</w> 302", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) # Copied from transformers.tests.models.xlm.test_tokenization_xlm.XLMTokenizationTest.test_full_tokenizer def test_full_tokenizer(self): tokenizer = self.get_tokenizer() text = "lower newer" bpe_tokens = ["l", "o", "w", "er</w>", "new", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [0, 1, 2, 18, 17, 18, 24] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow # Copied from transformers.tests.models.xlm.test_tokenization_xlm.XLMTokenizationTest.test_sequence_builders def test_sequence_builders(self): tokenizer = FlaubertTokenizer.from_pretrained("flaubert/flaubert_base_cased") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) print(encoded_sentence) print(encoded_sentence) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_2 + [1]
transformers/tests/models/flaubert/test_tokenization_flaubert.py/0
{ "file_path": "transformers/tests/models/flaubert/test_tokenization_flaubert.py", "repo_id": "transformers", "token_count": 1352 }
# coding=utf-8 # Copyright 2020 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class FunnelTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "funnel-transformer/small" tokenizer_class = FunnelTokenizer rust_tokenizer_class = FunnelTokenizerFast test_rust_tokenizer = True space_between_special_tokens = True def setUp(self): super().setUp() vocab_tokens = [ "<unk>", "<cls>", "<sep>", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_tokenizer(self, **kwargs): return FunnelTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "UNwant\u00e9d,running" output_text = "unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("UNwant\u00e9d,running") self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) def test_token_type_ids(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: inputs = tokenizer("UNwant\u00e9d,running") sentence_len = len(inputs["input_ids"]) - 1 self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len) inputs = tokenizer("UNwant\u00e9d,running", "UNwant\u00e9d,running") self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len + [1] * sentence_len)
transformers/tests/models/funnel/test_tokenization_funnel.py/0
{ "file_path": "transformers/tests/models/funnel/test_tokenization_funnel.py", "repo_id": "transformers", "token_count": 1277 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch GraniteMoe model.""" import unittest from parameterized import parameterized from transformers import AutoTokenizer, GraniteMoeConfig, is_torch_available, set_seed from transformers.testing_utils import ( require_read_token, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): import torch from transformers import ( GraniteMoeForCausalLM, GraniteMoeModel, ) from transformers.models.granitemoe.modeling_granitemoe import ( GraniteMoeRotaryEmbedding, ) class GraniteMoeModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return GraniteMoeConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = GraniteMoeModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = GraniteMoeModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = GraniteMoeForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = GraniteMoeForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class GraniteMoeModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ( ( GraniteMoeModel, GraniteMoeForCausalLM, ) if is_torch_available() else () ) all_generative_model_classes = (GraniteMoeForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": GraniteMoeModel, "text-generation": GraniteMoeForCausalLM, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = False # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] def setUp(self): self.model_tester = GraniteMoeModelTester(self) self.config_tester = ConfigTester(self, config_class=GraniteMoeConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip("GraniteMoe buffers include complex numbers, which breaks this test") def test_save_load_fast_init_from_base(self): pass @parameterized.expand([("linear",), ("dynamic",)]) def test_model_rope_scaling_from_config(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = GraniteMoeModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = GraniteMoeModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) def test_model_rope_scaling(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() scaling_factor = 10 short_input_length = 10 long_input_length = int(config.max_position_embeddings * 1.5) # Inputs x = torch.randn(1, dtype=torch.float32, device=torch_device) # used exlusively to get the dtype and the device position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device) position_ids_short = position_ids_short.unsqueeze(0) position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device) position_ids_long = position_ids_long.unsqueeze(0) # Sanity check original RoPE original_rope = GraniteMoeRotaryEmbedding(config=config).to(torch_device) original_cos_short, original_sin_short = original_rope(x, position_ids_short) original_cos_long, original_sin_long = original_rope(x, position_ids_long) torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :]) torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :]) # Sanity check linear RoPE scaling # New position "x" should match original position with index "x/scaling_factor" config.rope_scaling = {"type": "linear", "factor": scaling_factor} linear_scaling_rope = GraniteMoeRotaryEmbedding(config=config).to(torch_device) linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short) linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long) torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :]) torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :]) for new_position in range(0, long_input_length, scaling_factor): original_position = int(new_position // scaling_factor) torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :]) torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :]) # Sanity check Dynamic NTK RoPE scaling # Scaling should only be observed after a long input is fed. We can observe that the frequencies increase # with scaling_factor (or that `inv_freq` decreases) config.rope_scaling = {"type": "dynamic", "factor": scaling_factor} ntk_scaling_rope = GraniteMoeRotaryEmbedding(config=config).to(torch_device) ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short) ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long) torch.testing.assert_close(ntk_cos_short, original_cos_short) torch.testing.assert_close(ntk_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_sin_long, original_sin_long) self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all()) # Sanity check Yarn RoPE scaling # Scaling should be over the entire input config.rope_scaling = {"type": "yarn", "factor": scaling_factor} yarn_scaling_rope = GraniteMoeRotaryEmbedding(config=config).to(torch_device) yarn_cos_short, yarn_sin_short = yarn_scaling_rope(x, position_ids_short) yarn_cos_long, yarn_sin_long = yarn_scaling_rope(x, position_ids_long) torch.testing.assert_close(yarn_cos_short, yarn_cos_long[:, :short_input_length, :]) torch.testing.assert_close(yarn_sin_short, yarn_sin_long[:, :short_input_length, :]) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_cos_short, original_cos_short) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_sin_long, original_sin_long) @require_torch_gpu class GraniteMoeIntegrationTest(unittest.TestCase): # This variable is used to determine which CUDA device are we using for our runners (A10 or T4) # Depending on the hardware we get different logits / generations cuda_compute_capability_major_version = None @classmethod def setUpClass(cls): if is_torch_available() and torch.cuda.is_available(): # 8 is for A100 / A10 and 7 for T4 cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0] @slow @require_read_token def test_model_3b_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = GraniteMoeForCausalLM.from_pretrained("ibm/PowerMoE-3b", device_map="auto") with torch.no_grad(): out = model(torch.tensor([input_ids]).to(torch_device)) # fmt: off # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[-2.2122, -1.6632, -2.9269, -2.3344, -2.0143, -3.0146, -2.6839, -2.5610]]) torch.testing.assert_close(EXPECTED_MEAN.to(torch_device), out.logits.float().mean(-1), rtol=1e-2, atol=1e-2) # slicing logits[0, 0, 0:15] EXPECTED_SLICE = torch.tensor([[4.8785, -2.2890, -2.2892, -2.2885, -2.2890, -3.5007, -2.2897, -2.2892, -2.2895, -2.2891, -2.2887, -2.2882, -2.2889, -2.2898, -2.2892]]) # fmt: on self.assertTrue( torch.allclose( EXPECTED_SLICE.to(torch_device), out.logits[0, 0, :15].float(), atol=1e-3, rtol=1e-3, ) ) @slow def test_model_3b_generation(self): # ground truth text generated with dola_layers="low", repetition_penalty=1.2 EXPECTED_TEXT_COMPLETION = ( "Simply put, the theory of relativity states that \n$$\n\\frac{d^2x^\\mu}{d\\tau^2} = " "\\frac{1}{c^2}\\frac{d^2x^\\mu}{dt^2}\n$$\nwhere $x^\\mu$ is a four-vector, $\\tau$ is the proper time" ) prompt = "Simply put, the theory of relativity states that " tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b") model = GraniteMoeForCausalLM.from_pretrained("ibm/PowerMoE-3b", device_map="auto") model_inputs = tokenizer(prompt, return_tensors="pt").to(model.device) # greedy generation outputs generated_ids = model.generate(**model_inputs, max_new_tokens=64, top_p=None, temperature=1, do_sample=False) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
transformers/tests/models/granitemoe/test_modeling_granitemoe.py/0
{ "file_path": "transformers/tests/models/granitemoe/test_modeling_granitemoe.py", "repo_id": "transformers", "token_count": 8881 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import inspect import math import os import tempfile import unittest import numpy as np import pytest from transformers import is_tf_available from transformers.testing_utils import is_pt_tf_cross_test, require_soundfile, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import HubertConfig, TFHubertForCTC, TFHubertModel, Wav2Vec2Processor from transformers.models.hubert.modeling_tf_hubert import _compute_mask_indices @require_tf class TFHubertModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, # this is most likely not correctly set yet intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = tf.cast(ids_tensor([self.batch_size, self.seq_length], 32768), tf.float32) / 32768.0 attention_mask = tf.ones_like(input_values) config = HubertConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, do_stable_layer_norm=self.do_stable_layer_norm, ) return config, input_values, attention_mask def create_and_check_model(self, config, input_values, attention_mask): model = TFHubertModel(config) result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 config.layerdrop = 0.0 model = TFHubertModel(config) input_values = input_values[:3] attention_mask = tf.ones_like(input_values) input_lengths = tf.constant([input_values.shape[-1] // i for i in [4, 2, 1]]) length_mask = tf.sequence_mask(input_lengths, dtype=tf.float32) # convert values that are over input_lengths to padding input_values = input_values * length_mask attention_mask = attention_mask * length_mask batch_outputs = model(input_values, attention_mask=attention_mask, training=False).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice, training=False).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(np.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = TFHubertForCTC(config) input_values = input_values[:3] attention_mask = tf.ones_like(input_values) input_lengths = tf.constant([input_values.shape[-1] // i for i in [4, 2, 1]]) max_length_labels = model.hubert._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) length_mask = tf.sequence_mask(input_lengths, dtype=tf.float32) # convert values that are over input_lengths to padding input_values = input_values * length_mask attention_mask = attention_mask * length_mask model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss self.parent.assertTrue(abs(labels.shape[0] * mean_loss - sum_loss) < 1e-2) def check_training(self, config, input_values, *args): model = TFHubertForCTC(config) # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = tf.constant([input_values.shape[-1] // i for i in [4, 2, 1]]) max_length_labels = model.hubert._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) length_mask = tf.sequence_mask(input_lengths, dtype=tf.float32) input_values = input_values * length_mask pad_size = max(max_length_labels) - labels.shape[1] labels = tf.pad(labels, ((0, 0), (0, pad_size)), constant_values=-100) loss = model(input_values, labels=labels, training=True).loss self.parent.assertFalse(tf.math.is_inf(loss)) def check_labels_out_of_vocab(self, config, input_values, *args): model = TFHubertForCTC(config) input_lengths = tf.constant([input_values.shape[-1] // i for i in [4, 2, 1]]) max_length_labels = model.hubert._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_tf class TFHubertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFHubertModel, TFHubertForCTC) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFHubertModel} if is_tf_available() else {} test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFHubertModelTester(self) self.config_tester = ConfigTester(self, config_class=HubertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() # overwrite because input_values != input_ids def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values"] self.assertListEqual(arg_names[:1], expected_arg_names) # overwrite because input_values != input_ids def test_keyword_and_dict_args(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs_dict = model(inputs) inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_values = inputs_keywords.pop("input_values", None) outputs_keywords = model(input_values, **inputs_keywords) output_dict = outputs_dict[0].numpy() output_keywords = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_hidden_states_output(config, inputs_dict, model_class): model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) hidden_states = outputs.hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.output_seq_length, self.model_tester.hidden_size], ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(config, inputs_dict, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(config, inputs_dict, model_class) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Hubert has no input embeddings") def test_inputs_embeds(self): pass @unittest.skip(reason="Hubert has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Hubert has no input embeddings") def test_model_common_attributes(self): pass @slow def test_model_from_pretrained(self): model = TFHubertModel.from_pretrained("facebook/hubert-base-ls960") self.assertIsNotNone(model) @unittest.skip(reason="Fix me! Hubert hits OOM errors when loss is computed on full batch") def test_dataset_conversion(self): # TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC pass @unittest.skip(reason="Fix me! Hubert hits OOM errors when loss is computed on full batch") def test_keras_fit(self): # TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC pass @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): # We override the base test here to skip loss calculation for Hubert models because the loss is massive with # the default labels and frequently overflows to inf or exceeds numerical tolerances between TF/PT import torch import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. # TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it. self._make_attention_mask_non_null(inputs_dict) pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) tf_model = model_class(config) pt_model = pt_model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class) # Check we can load pt model in tf and vice-versa with model => model functions tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # Check we can load pt model in tf and vice-versa with checkpoint => model functions with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) @require_tf class TFHubertRobustModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFHubertModel, TFHubertForCTC) if is_tf_available() else () test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFHubertModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True, scope="robust", ) self.config_tester = ConfigTester(self, config_class=HubertConfig, hidden_size=37) # overwrite because input_values != input_ids def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values"] self.assertListEqual(arg_names[:1], expected_arg_names) # overwrite because input_values != input_ids def test_keyword_and_dict_args(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) inputs = self._prepare_for_class(inputs_dict, model_class) outputs_dict = model(inputs) inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_values = inputs_keywords.pop("input_values", None) outputs_keywords = model(input_values, **inputs_keywords) output_dict = outputs_dict[0].numpy() output_keywords = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_hidden_states_output(config, inputs_dict, model_class): model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) hidden_states = outputs.hidden_states self.assertEqual(config.output_attentions, False) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.output_seq_length, self.model_tester.hidden_size], ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(config, inputs_dict, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(config, inputs_dict, model_class) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Hubert has no input embeddings") def test_inputs_embeds(self): pass @unittest.skip(reason="Hubert has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Hubert has no input embeddings or get_input_embeddings method") def test_model_common_attributes(self): pass @slow def test_model_from_pretrained(self): model = TFHubertModel.from_pretrained("facebook/hubert-large-ls960-ft") self.assertIsNotNone(model) @unittest.skip(reason="Fix me! Hubert hits OOM errors when loss is computed on full batch") def test_dataset_conversion(self): # TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC pass @unittest.skip(reason="Fix me! Hubert hits OOM errors when loss is computed on full batch") def test_keras_fit(self): # TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC pass @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): # We override the base test here to skip loss calculation for Hubert models because the loss is massive with # the default labels and frequently overflows to inf or exceeds numerical tolerances between TF/PT import torch import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. # TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it. self._make_attention_mask_non_null(inputs_dict) pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) tf_model = model_class(config) pt_model = pt_model_class(config) tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class) # Check we can load pt model in tf and vice-versa with model => model functions tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) # Check we can load pt model in tf and vice-versa with checkpoint => model functions with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict) @require_tf class TFHubertUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) self.assertListEqual( tf.reduce_sum(mask, -1).numpy().tolist(), [mask_prob * sequence_length for _ in range(batch_size)] ) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in tf.reduce_sum(mask, -1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) @require_tf @slow @require_soundfile class TFHubertModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_ctc_normal(self): model = TFHubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft") processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="tf", sampling_rate=16000).input_values logits = model(input_values).logits predicted_ids = tf.argmax(logits, axis=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_normal_batched(self): model = TFHubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft") processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft", do_lower_case=True) input_speech = self._load_datasamples(2) input_values = processor(input_speech, return_tensors="tf", padding=True, sampling_rate=16000).input_values logits = model(input_values).logits predicted_ids = tf.argmax(logits, axis=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_robust_batched(self): model = TFHubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft") processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="tf", padding=True, sampling_rate=16000) input_values = inputs.input_values attention_mask = inputs.attention_mask logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = tf.argmax(logits, axis=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around" " him with the thousands of spectators were trivialities not worth thinking about", "his instant of panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
transformers/tests/models/hubert/test_modeling_tf_hubert.py/0
{ "file_path": "transformers/tests/models/hubert/test_modeling_tf_hubert.py", "repo_id": "transformers", "token_count": 12352 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch LayoutLMv3 model.""" import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMv3Config, LayoutLMv3ForQuestionAnswering, LayoutLMv3ForSequenceClassification, LayoutLMv3ForTokenClassification, LayoutLMv3Model, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMv3ImageProcessor class LayoutLMv3ModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=4, patch_size=2, text_seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=36, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, coordinate_size=6, shape_size=6, num_labels=3, num_choices=4, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.text_seq_length = text_seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.coordinate_size = coordinate_size self.shape_size = shape_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.range_bbox = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) self.text_seq_length = text_seq_length self.image_seq_length = (image_size // patch_size) ** 2 + 1 self.seq_length = self.text_seq_length + self.image_seq_length def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.text_seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels) config = LayoutLMv3Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def create_and_check_model( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv3Model(config=config) model.to(torch_device) model.eval() # text + image result = model(input_ids, pixel_values=pixel_values) result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids ) result = model(input_ids, bbox=bbox, pixel_values=pixel_values, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, pixel_values=pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # text only result = model(input_ids) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only result = model(pixel_values=pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) ) def create_and_check_for_sequence_classification( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv3ForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv3ForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv3ForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class LayoutLMv3ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = False test_mismatched_shapes = False all_model_classes = ( ( LayoutLMv3Model, LayoutLMv3ForSequenceClassification, LayoutLMv3ForTokenClassification, LayoutLMv3ForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"document-question-answering": LayoutLMv3ForQuestionAnswering, "feature-extraction": LayoutLMv3Model} if is_torch_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def setUp(self): self.model_tester = LayoutLMv3ModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMv3Config, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=torch_device, ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "microsoft/layoutlmv3-base" model = LayoutLMv3Model.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class LayoutLMv3ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return LayoutLMv3ImageProcessor(apply_ocr=False) if is_vision_available() else None @slow def test_inference_no_head(self): model = LayoutLMv3Model.from_pretrained("microsoft/layoutlmv3-base").to(torch_device) image_processor = self.default_image_processor image = prepare_img() pixel_values = image_processor(images=image, return_tensors="pt").pixel_values.to(torch_device) input_ids = torch.tensor([[1, 2]]) bbox = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0) # forward pass outputs = model( input_ids=input_ids.to(torch_device), bbox=bbox.to(torch_device), pixel_values=pixel_values.to(torch_device), ) # verify the logits expected_shape = torch.Size((1, 199, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(torch_device) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/layoutlmv3/test_modeling_layoutlmv3.py/0
{ "file_path": "transformers/tests/models/layoutlmv3/test_modeling_layoutlmv3.py", "repo_id": "transformers", "token_count": 7608 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LlavaOnevisionImageProcessor if is_torchvision_available(): from transformers import LlavaOnevisionImageProcessorFast, LlavaOnevisionVideoProcessor class LlavaOnevisionImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=20, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD, do_convert_rgb=True, ): super().__init__() size = size if size is not None else {"height": 20, "width": 20} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.prepare_image_inputs def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) # Copied from tests.models.llava_next_video.test_image_processing_llava_next_video.LlavaNextVideoProcessingTester.prepare_video_inputs def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): images = prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) # let's simply copy the frames to fake a long video-clip if numpify or torchify: videos = [] for image in images: if numpify: video = image[None, ...].repeat(8, 0) else: video = image[None, ...].repeat(8, 1, 1, 1) videos.append(video) else: videos = [] for pil_image in images: videos.append([pil_image] * 8) return videos @require_torch @require_vision class LlavaOnevisionImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = LlavaOnevisionImageProcessor if is_vision_available() else None fast_image_processing_class = LlavaOnevisionImageProcessorFast if is_torchvision_available() else None video_processing_class = LlavaOnevisionVideoProcessor if is_vision_available() else None # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.setUp with CLIP->LlavaOnevision def setUp(self): super().setUp() self.image_processor_tester = LlavaOnevisionImageProcessingTester(self) @property # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.image_processor_dict def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "image_grid_pinpoints")) def test_video_processor_properties(self): image_processing = self.video_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 20, "width": 20}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"shortest_edge": 42}) def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) @unittest.skip( reason="LlavaOnevisionImageProcessor doesn't treat 4 channel PIL and numpy consistently yet" ) # FIXME raushan def test_call_numpy_4_channels(self): pass def test_nested_input(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test batched as a list of images encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched as a nested list of images, where each sublist is one batch image_inputs_nested = [image_inputs[:3], image_inputs[3:]] encoded_images_nested = image_processing(image_inputs_nested, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape) # Image processor should return same pixel values, independently of input format self.assertTrue((encoded_images_nested == encoded_images).all()) def test_call_pil_video(self): # Initialize image_processing video_processing = self.video_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=True) for video in video_inputs: self.assertIsInstance(video[0], Image.Image) encoded_videos = video_processing(video_inputs[0], return_tensors="pt").pixel_values_videos expected_output_video_shape = (1, 8, 3, 20, 20) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = video_processing(video_inputs, return_tensors="pt").pixel_values_videos expected_output_video_shape = (7, 8, 3, 20, 20) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) def test_call_numpy_video(self): # Initialize image_processing video_processing = self.video_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=True, numpify=True) for video in video_inputs: self.assertIsInstance(video, np.ndarray) encoded_videos = video_processing(video_inputs[0], return_tensors="pt").pixel_values_videos expected_output_video_shape = (1, 8, 3, 20, 20) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = video_processing(video_inputs, return_tensors="pt").pixel_values_videos expected_output_video_shape = (7, 8, 3, 20, 20) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) def test_call_pytorch_video(self): # Initialize image_processing video_processing = self.video_processing_class(**self.image_processor_dict) # create random PyTorch tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=True, torchify=True) for video in video_inputs: self.assertIsInstance(video, torch.Tensor) # Test not batched input encoded_videos = video_processing(video_inputs[0], return_tensors="pt").pixel_values_videos expected_output_video_shape = (1, 8, 3, 20, 20) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) # Test batched encoded_videos = video_processing(video_inputs, return_tensors="pt").pixel_values_videos expected_output_video_shape = (7, 8, 3, 20, 20) self.assertEqual(tuple(encoded_videos.shape), expected_output_video_shape) @unittest.skip( reason="LlavaOnevisionImageProcessorFast doesn't compile (infinitely) when using class transforms" ) # FIXME yoni def test_can_compile_fast_image_processor(self): pass
transformers/tests/models/llava_onevision/test_image_processing_llava_onevision.py/0
{ "file_path": "transformers/tests/models/llava_onevision/test_image_processing_llava_onevision.py", "repo_id": "transformers", "token_count": 5826 }
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class OpenAIGPTTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "openai-community/openai-gpt" """Tests OpenAIGPTTokenizer that uses BERT BasicTokenizer.""" tokenizer_class = OpenAIGPTTokenizer rust_tokenizer_class = OpenAIGPTTokenizerFast test_rust_tokenizer = True test_seq2seq = False def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): return "lower newer", "lower newer" def test_full_tokenizer(self): tokenizer = OpenAIGPTTokenizer(self.vocab_file, self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_padding(self, max_length=15): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Simple input s = "This is a simple input" s2 = ["This is a simple input 1", "This is a simple input 2"] p = ("This is a simple input", "This is a pair") p2 = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding="max_length", ) # Pair input self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding="max_length", ) @unittest.skip(reason="tokenizer has no padding token") def test_padding_different_model_input_name(self): pass @require_ftfy @require_spacy @require_tokenizers class OpenAIGPTTokenizationTestWithSpacy(OpenAIGPTTokenizationTest): """Tests OpenAIGPTTokenizer that uses SpaCy and ftfy.""" pass
transformers/tests/models/openai/test_tokenization_openai.py/0
{ "file_path": "transformers/tests/models/openai/test_tokenization_openai.py", "repo_id": "transformers", "token_count": 2438 }
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch PaliGemma model.""" import unittest from transformers import ( PaliGemmaConfig, PaliGemmaForConditionalGeneration, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( require_torch, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch if is_vision_available(): pass class PaliGemma2VisionText2TextModelTester: def __init__( self, parent, ignore_index=-100, image_token_index=0, projector_hidden_act="gelu", seq_length=25, vision_feature_select_strategy="default", vision_feature_layer=-1, projection_dim=32, text_config={ "model_type": "gemma2", "seq_length": 128, "is_training": True, # "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "num_key_value_heads": 1, "head_dim": 8, "intermediate_size": 37, "hidden_activation": "gelu_pytorch_tanh", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 512, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 1, }, is_training=True, vision_config={ "use_labels": True, "image_size": 20, "patch_size": 5, "num_image_tokens": 4, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_key_value_heads": 1, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, use_cache=False, ): self.parent = parent self.ignore_index = ignore_index # `image_token_index` is set to 0 to pass "resize_embeddings" test, do not modify self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.text_config = text_config self.vision_config = vision_config self.seq_length = seq_length self.projection_dim = projection_dim self.pad_token_id = text_config["pad_token_id"] self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.num_channels = vision_config["num_channels"] self.image_size = vision_config["image_size"] self.encoder_seq_length = seq_length self.use_cache = use_cache def get_config(self): return PaliGemmaConfig( text_config=self.text_config, vision_config=self.vision_config, ignore_index=self.ignore_index, image_token_index=self.image_token_index, projector_hidden_act=self.projector_hidden_act, projection_dim=self.projection_dim, vision_feature_select_strategy=self.vision_feature_select_strategy, vision_feature_layer=self.vision_feature_layer, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(self.pad_token_id).to(torch_device) # set the 16 first tokens to be image, and ensure that no other tokens are image tokens # do not change this unless you modified image size or patch size input_ids[input_ids == config.image_token_index] = self.pad_token_id input_ids[:, :16] = config.image_token_index inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "labels": input_ids, "token_type_ids": torch.zeros_like(input_ids), } return config, inputs_dict @require_torch class PaliGemma2ForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `PaliGemmaForConditionalGeneration`. """ all_model_classes = (PaliGemmaForConditionalGeneration,) if is_torch_available() else () all_generative_model_classes = (PaliGemmaForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = {"image-text-to-text": PaliGemmaForConditionalGeneration} fx_compatible = False test_pruning = False test_torchscript = False test_head_masking = False _is_composite = True def setUp(self): self.model_tester = PaliGemma2VisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=PaliGemmaConfig, has_text_modality=False) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs # while some other models require pixel_values to be present def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] inputs_embeds = model.get_input_embeddings()(input_ids) with torch.no_grad(): out_ids = model(input_ids=input_ids, **inputs)[0] out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0] torch.testing.assert_close(out_embeds, out_ids) # Copied from tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_mismatching_num_image_tokens def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images don't match number of image tokens in the text. Also we need to test multi-image cases when one prompr has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) _ = model(**input_dict) # successfull forward with no modifications # remove one image but leave the image token in text input_dict["pixel_values"] = input_dict["pixel_values"][-1:, ...] with self.assertRaises(ValueError): _ = model(**input_dict) # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = input_dict["input_ids"][:1] pixel_values = input_dict["pixel_values"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model(input_ids=input_ids, pixel_values=pixel_values) # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) _ = model(input_ids=input_ids, pixel_values=pixel_values) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_cpu_offload(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_disk_offload_bin(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_disk_offload_safetensors(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_model_parallelism(self): pass @unittest.skip( reason="PaliGemmma's SigLip encoder uses the same initialization scheme as the Flax original implementation" ) def test_initialization(self): pass # TODO extend valid outputs to include this test @Molbap @unittest.skip(reason="PaliGemma has currently one output format.") def test_model_outputs_equivalence(self): pass # TODO fix the loss = nan in the testing configuration chosen @Molbap @unittest.skip(reason="Edge case giving loss nan values in testing configuration.") def test_determinism(self): pass @unittest.skip(reason="PaliGemma does not use feedforward chunking.") def test_feed_forward_chunking(self): pass @unittest.skip(reason="PaliGemma does not support low_cpu_mem_usage.") def test_save_load_low_cpu_mem_usage(self): pass @unittest.skip(reason="PaliGemma does not support low_cpu_mem_usage.") def test_save_load_low_cpu_mem_usage_checkpoints(self): pass @unittest.skip(reason="PaliGemma does not support low_cpu_mem_usage.") def test_save_load_low_cpu_mem_usage_no_safetensors(self): pass @unittest.skip( reason="VLMs doen't accept inputs embeds and pixel values at the same time. So if the test passed for bacbone LM, it passes for VLM also" ) def test_generate_from_inputs_embeds_with_static_cache(self): pass @unittest.skip("FlashAttention only support fp16 and bf16 data type") def test_flash_attn_2_fp32_ln(self): pass @unittest.skip( "VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test" ) def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): pass # TODO (joao, raushan): fix me -- the problem is in `cache_position[0] == 0`, i.e. dynamic control flow @unittest.skip("PaliGemma is not compatible with end-to-end generation compilation") def test_generate_compile_model_forward(self): pass @unittest.skip("Low memory will be removed soon so no need to fix it") def test_beam_search_low_memory(self): pass
transformers/tests/models/paligemma2/test_modeling_paligemma2.py/0
{ "file_path": "transformers/tests/models/paligemma2/test_modeling_paligemma2.py", "repo_id": "transformers", "token_count": 5767 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Persimmon model.""" import gc import unittest from parameterized import parameterized from transformers import PersimmonConfig, is_torch_available, set_seed from transformers.testing_utils import ( backend_empty_cache, require_bitsandbytes, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, PersimmonForCausalLM, PersimmonForSequenceClassification, PersimmonForTokenClassification, PersimmonModel, ) from transformers.models.persimmon.modeling_persimmon import PersimmonRotaryEmbedding # Copied from tests.models.llama.test_modeling_llama.LlamaModelTester with Llama->Persimmon class PersimmonModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return PersimmonConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = PersimmonModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = PersimmonModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = PersimmonForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = PersimmonForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class PersimmonModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (PersimmonModel, PersimmonForCausalLM, PersimmonForSequenceClassification, PersimmonForTokenClassification) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": PersimmonModel, "text-classification": PersimmonForSequenceClassification, "token-classification": PersimmonForTokenClassification, # TODO (ydshieh): check why these two fail. Fix them or skip them in a better way. # "text-generation": PersimmonForCausalLM, # "zero-shot": PersimmonForSequenceClassification, } if is_torch_available() else {} ) all_generative_model_classes = (PersimmonForCausalLM,) if is_torch_available() else () test_headmasking = False test_pruning = False # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.setUp with Llama->Persimmon def setUp(self): self.model_tester = PersimmonModelTester(self) self.config_tester = ConfigTester(self, config_class=PersimmonConfig, hidden_size=37) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_config def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model_various_embeddings def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model with Llama->Persimmon,llama->persimmon def test_persimmon_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = PersimmonForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_single_label with Llama->Persimmon,llama->persimmon def test_persimmon_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = PersimmonForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model_for_multi_label with Llama->Persimmon,llama->persimmon def test_persimmon_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = PersimmonForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_token_classification_model with Llama->Persimmon,llama->persimmon def test_persimmon_token_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) token_labels = ids_tensor([self.model_tester.batch_size, self.model_tester.seq_length], config.num_labels) model = PersimmonForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=token_labels) self.assertEqual( result.logits.shape, (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels), ) @unittest.skip(reason="Persimmon buffers include complex numbers, which breaks this test") # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_save_load_fast_init_from_base def test_save_load_fast_init_from_base(self): pass @parameterized.expand([("linear",), ("dynamic",)]) # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model_rope_scaling_from_config with Llama->Persimmon def test_model_rope_scaling_from_config(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = PersimmonModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = PersimmonModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) # Copied from tests.models.gpt_neox.test_modeling_gpt_neox.GPTNeoXModelTest.test_model_rope_scaling with GPTNeoX->Persimmon def test_model_rope_scaling(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() scaling_factor = 10 short_input_length = 10 long_input_length = int(config.max_position_embeddings * 1.5) # Inputs x = torch.randn(1, dtype=torch.float32, device=torch_device) # used exlusively to get the dtype and the device position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device) position_ids_short = position_ids_short.unsqueeze(0) position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device) position_ids_long = position_ids_long.unsqueeze(0) # Sanity check original RoPE original_rope = PersimmonRotaryEmbedding(config).to(torch_device) original_cos_short, original_sin_short = original_rope(x, position_ids_short) original_cos_long, original_sin_long = original_rope(x, position_ids_long) torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :]) torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :]) # Sanity check linear RoPE scaling # New position "x" should match original position with index "x/scaling_factor" config.rope_scaling = {"type": "linear", "factor": scaling_factor} linear_scaling_rope = PersimmonRotaryEmbedding(config).to(torch_device) linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short) linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long) torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :]) torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :]) for new_position in range(0, long_input_length, scaling_factor): original_position = int(new_position // scaling_factor) torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :]) torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :]) # Sanity check Dynamic NTK RoPE scaling # Scaling should only be observed after a long input is fed. We can observe that the frequencies increase # with scaling_factor (or that `inv_freq` decreases) config.rope_scaling = {"type": "dynamic", "factor": scaling_factor} ntk_scaling_rope = PersimmonRotaryEmbedding(config).to(torch_device) ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short) ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long) torch.testing.assert_close(ntk_cos_short, original_cos_short) torch.testing.assert_close(ntk_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_sin_long, original_sin_long) self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all()) @require_torch class PersimmonIntegrationTest(unittest.TestCase): @slow @require_torch_accelerator @require_bitsandbytes def test_model_8b_chat_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = PersimmonForCausalLM.from_pretrained( "adept/persimmon-8b-chat", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.float16 ) out = model(torch.tensor([input_ids], device=torch_device)).logits.float() EXPECTED_MEAN = torch.tensor( [[-11.4726, -11.1495, -11.2694, -11.2223, -10.9452, -11.0663, -11.0031, -11.1028]] ) # change dtype to `torch.float32` before calling `mean` to avoid `nan` values torch.testing.assert_close(out.cpu().to(torch.float32).mean(-1), EXPECTED_MEAN, rtol=1e-4, atol=1e-4) # fmt: off EXPECTED_SLICE = torch.tensor( [-16.9062, -16.9062, -16.9062, -16.9062, -16.8906, -16.9062, -16.9531, -16.9062, -16.9062, -16.9062, -16.9531, -16.9062, -16.9531, -16.9062, -16.9062, -16.9062, -16.9062, -16.9062, -16.9531, -16.9062, -16.9062, -16.9062, -16.9062, -16.9062, -16.9062, -16.9531, -16.9062, -16.9531, -16.9062, -16.9062], dtype=torch.float16 ) # fmt: on torch.testing.assert_close(out.cpu()[0, 0, :30], EXPECTED_SLICE, rtol=1e-5, atol=1e-5) backend_empty_cache(torch_device) del model gc.collect() @slow @require_torch_accelerator @require_torch_fp16 @require_bitsandbytes def test_model_8b_chat_greedy_generation(self): EXPECTED_TEXT_COMPLETION = """human: Simply put, the theory of relativity states that?\n\nadept: The theory of relativity states that the laws of physics are the same for all observers, regardless of their relative motion.""" prompt = "human: Simply put, the theory of relativity states that?\n\nadept:" tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-chat", use_fast=False) input_ids = tokenizer.encode(prompt, return_tensors="pt").to(torch_device) model = PersimmonForCausalLM.from_pretrained( "adept/persimmon-8b-chat", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.float16 ) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=64) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) backend_empty_cache(torch_device) del model gc.collect()
transformers/tests/models/persimmon/test_modeling_persimmon.py/0
{ "file_path": "transformers/tests/models/persimmon/test_modeling_persimmon.py", "repo_id": "transformers", "token_count": 10307 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import requests import torch from transformers.testing_utils import require_vision from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from PIL import Image from transformers import PixtralProcessor @require_vision class PixtralProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = PixtralProcessor @classmethod def setUpClass(cls): cls.url_0 = "https://www.ilankelman.org/stopsigns/australia.jpg" cls.image_0 = Image.open(requests.get(cls.url_0, stream=True).raw) cls.url_1 = "http://images.cocodataset.org/val2017/000000039769.jpg" cls.image_1 = Image.open(requests.get(cls.url_1, stream=True).raw) cls.url_2 = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg" cls.image_2 = Image.open(requests.get(cls.url_2, stream=True).raw) def setUp(self): self.tmpdirname = tempfile.mkdtemp() processor = PixtralProcessor.from_pretrained("mistral-community/pixtral-12b") processor.save_pretrained(self.tmpdirname) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_chat_template(self): processor = self.processor_class.from_pretrained(self.tmpdirname) expected_prompt = "<s>[INST][IMG]What is shown in this image?[/INST]" messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What is shown in this image?"}, ], }, ] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True) self.assertEqual(expected_prompt, formatted_prompt) def test_image_token_filling(self): processor = self.processor_class.from_pretrained(self.tmpdirname) # Important to check with non square image image = torch.randint(0, 2, (3, 500, 316)) expected_image_tokens = 640 image_token_index = 10 messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What is shown in this image?"}, ], }, ] inputs = processor( text=[processor.apply_chat_template(messages)], images=[image], return_tensors="pt", ) image_tokens = (inputs["input_ids"] == image_token_index).sum().item() self.assertEqual(expected_image_tokens, image_tokens) def test_processor_with_single_image(self): processor = self.processor_class.from_pretrained(self.tmpdirname) prompt_string = "USER: [IMG]\nWhat's the content of the image? ASSISTANT:" # Make small for checking image token expansion processor.image_processor.size = {"longest_edge": 30} processor.image_processor.patch_size = {"height": 2, "width": 2} # Test passing in an image inputs_image = processor(text=prompt_string, images=self.image_0, return_tensors="pt") self.assertIn("input_ids", inputs_image) self.assertTrue(len(inputs_image["input_ids"]) == 1) self.assertIsInstance(inputs_image["input_ids"], torch.Tensor) self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor) self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([1, 3, 32, 32])) # fmt: off input_ids = inputs_image["input_ids"] self.assertEqual( input_ids[0].tolist(), # Equivalent to "USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the content of the image? ASSISTANT:" [21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 4701, 1307, 1278, 3937, 1063, 1349, 4290, 16002, 41150, 1058] ) # fmt: on # Test passing in a url inputs_url = processor(text=prompt_string, images=self.url_0, return_tensors="pt") self.assertIn("input_ids", inputs_url) self.assertTrue(len(inputs_url["input_ids"]) == 1) self.assertIsInstance(inputs_url["input_ids"], torch.Tensor) self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor) self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([1, 3, 32, 32])) # fmt: off input_ids = inputs_url["input_ids"] self.assertEqual( input_ids[0].tolist(), # Equivalent to "USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the content of the image? ASSISTANT:" [21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 4701, 1307, 1278, 3937, 1063, 1349, 4290, 16002, 41150, 1058] ) # fmt: on # Test passing inputs as a single list inputs_image = processor(text=prompt_string, images=[self.image_0], return_tensors="pt") self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([1, 3, 32, 32])) # fmt: off self.assertEqual( inputs_image["input_ids"][0].tolist(), [21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 4701, 1307, 1278, 3937, 1063, 1349, 4290, 16002, 41150, 1058] ) # fmt: on # Test as nested single list inputs_image = processor(text=prompt_string, images=[[self.image_0]], return_tensors="pt") self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([1, 3, 32, 32])) # fmt: off self.assertEqual( inputs_image["input_ids"][0].tolist(), [21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 4701, 1307, 1278, 3937, 1063, 1349, 4290, 16002, 41150, 1058] ) # fmt: on def test_processor_with_multiple_images_single_list(self): processor = self.processor_class.from_pretrained(self.tmpdirname) prompt_string = "USER: [IMG][IMG]\nWhat's the difference between these two images? ASSISTANT:" # Make small for checking image token expansion processor.image_processor.size = {"longest_edge": 30} processor.image_processor.patch_size = {"height": 2, "width": 2} # Test passing in an image inputs_image = processor(text=prompt_string, images=[self.image_0, self.image_1], return_tensors="pt") self.assertIn("input_ids", inputs_image) self.assertTrue(len(inputs_image["input_ids"]) == 1) self.assertIsInstance(inputs_image["input_ids"], torch.Tensor) self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor) self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([2, 3, 32, 32])) # fmt: off input_ids = inputs_image["input_ids"] self.assertEqual( input_ids[0].tolist(), # Equivalent to ["USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END][IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the difference between these two images? ASSISTANT:"] [21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058] ) # fmt: on # Test passing in a url inputs_url = processor(text=prompt_string, images=[self.url_0, self.url_1], return_tensors="pt") self.assertIn("input_ids", inputs_url) self.assertTrue(len(inputs_url["input_ids"]) == 1) self.assertIsInstance(inputs_url["input_ids"], torch.Tensor) self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor) self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([2, 3, 32, 32])) # fmt: off input_ids = inputs_url["input_ids"] self.assertEqual( input_ids[0].tolist(), # Equivalent to ["USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END][IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the difference between these two images? ASSISTANT:"] [21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058] ) # fmt: on # Test passing in as a nested list inputs_url = processor(text=prompt_string, images=[[self.image_0, self.image_1]], return_tensors="pt") self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([2, 3, 32, 32])) # fmt: off self.assertEqual( inputs_url["input_ids"][0].tolist(), [21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058] ) # fmt: on def test_processor_with_multiple_images_multiple_lists(self): processor = self.processor_class.from_pretrained(self.tmpdirname) prompt_string = [ "USER: [IMG][IMG]\nWhat's the difference between these two images? ASSISTANT:", "USER: [IMG]\nWhat's the content of the image? ASSISTANT:", ] processor.tokenizer.pad_token = "</s>" image_inputs = [[self.image_0, self.image_1], [self.image_2]] # Make small for checking image token expansion processor.image_processor.size = {"longest_edge": 30} processor.image_processor.patch_size = {"height": 2, "width": 2} # Test passing in an image inputs_image = processor(text=prompt_string, images=image_inputs, return_tensors="pt", padding=True) self.assertIn("input_ids", inputs_image) self.assertTrue(len(inputs_image["input_ids"]) == 2) self.assertIsInstance(inputs_image["input_ids"], torch.Tensor) self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor) self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([3, 3, 32, 32])) # fmt: off input_ids = inputs_image["input_ids"] self.assertEqual( input_ids[0].tolist(), # Equivalent to ["USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END][IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the difference between these two images? ASSISTANT:"] [21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058] ) # fmt: on # Test passing in a url inputs_url = processor(text=prompt_string, images=image_inputs, return_tensors="pt", padding=True) self.assertIn("input_ids", inputs_url) self.assertTrue(len(inputs_url["input_ids"]) == 2) self.assertIsInstance(inputs_url["input_ids"], torch.Tensor) self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor) self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([3, 3, 32, 32])) # fmt: off input_ids = inputs_url["input_ids"] self.assertEqual( input_ids[0].tolist(), # Equivalent to ["USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END][IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the difference between these two images? ASSISTANT:"] [21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058] ) # fmt: on # Test passing as a single flat list inputs_image = processor( text=prompt_string, images=[self.image_0, self.image_1, self.image_2], return_tensors="pt", padding=True ) self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([3, 3, 32, 32])) # fmt: off self.assertEqual( inputs_image["input_ids"][0].tolist(), [21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058] ) # fmt: on def test_processor_returns_full_length_batches(self): # to avoid https://github.com/huggingface/transformers/issues/34204 processor = self.processor_class.from_pretrained(self.tmpdirname) prompt_string = [ "USER: [IMG]\nWhat's the content of the image? ASSISTANT:", ] * 5 processor.tokenizer.pad_token = "</s>" image_inputs = [[self.image_0]] * 5 # Make small for checking image token expansion processor.image_processor.size = {"longest_edge": 30} processor.image_processor.patch_size = {"height": 2, "width": 2} # Test passing in an image inputs_image = processor(text=prompt_string, images=image_inputs, return_tensors="pt", padding=True) self.assertIn("input_ids", inputs_image) self.assertTrue(len(inputs_image["input_ids"]) == 5) self.assertTrue(len(inputs_image["pixel_values"]) == 5)
transformers/tests/models/pixtral/test_processor_pixtral.py/0
{ "file_path": "transformers/tests/models/pixtral/test_processor_pixtral.py", "repo_id": "transformers", "token_count": 6179 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SegFormer model.""" import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class SegformerConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class SegformerModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[1, 1, 1, 1], sr_ratios=[8, 4, 2, 1], hidden_sizes=[8, 8, 16, 16], downsampling_rates=[1, 4, 8, 16], num_attention_heads=[1, 1, 2, 2], is_training=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.num_attention_heads = num_attention_heads self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return SegformerConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = SegformerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def create_and_check_for_image_segmentation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = SegformerForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) result = model(pixel_values, labels=labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss, 0.0) def create_and_check_for_binary_image_segmentation(self, config, pixel_values, labels): config.num_labels = 1 model = SegformerForSemanticSegmentation(config=config) model.to(torch_device) model.eval() labels = torch.randint(0, 1, (self.batch_size, self.image_size, self.image_size)).to(torch_device) result = model(pixel_values, labels=labels) self.parent.assertGreater(result.loss, 0.0) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SegformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "image-feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) fx_compatible = True test_head_masking = False test_pruning = False test_resize_embeddings = False def setUp(self): self.model_tester = SegformerModelTester(self) self.config_tester = SegformerConfigTester(self, config_class=SegformerConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_binary_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) @unittest.skip(reason="SegFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="SegFormer does not have get_input_embeddings method and get_output_embeddings methods") def test_model_get_set_embeddings(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = sum(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) # verify the first attentions (first block, first layer) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) # verify the last attentions (last block, last layer) expected_seq_len = (self.model_tester.image_size // 32) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:]), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) # verify the first attentions (first block, first layer) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_encoder_blocks self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): model_name = "nvidia/segformer-b0-finetuned-ade-512-512" model = SegformerModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class SegformerModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_segmentation_ade(self): # only resize + normalize image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to( torch_device ) image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_image_segmentation_city(self): # only resize + normalize image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(torch_device) image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3, :3, :3], expected_slice, rtol=1e-1, atol=1e-1) @slow def test_post_processing_semantic_segmentation(self): # only resize + normalize image_processor = SegformerImageProcessor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to( torch_device ) image = prepare_img() encoded_inputs = image_processor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) outputs.logits = outputs.logits.detach().cpu() segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) expected_shape = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((128, 128)) self.assertEqual(segmentation[0].shape, expected_shape)
transformers/tests/models/segformer/test_modeling_segformer.py/0
{ "file_path": "transformers/tests/models/segformer/test_modeling_segformer.py", "repo_id": "transformers", "token_count": 7789 }
# coding=utf-8 # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers import SqueezeBertTokenizer, SqueezeBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class SqueezeBertTokenizationTest(BertTokenizationTest): tokenizer_class = SqueezeBertTokenizer rust_tokenizer_class = SqueezeBertTokenizerFast test_rust_tokenizer = True from_pretrained_id = "squeezebert/squeezebert-uncased" def get_rust_tokenizer(self, **kwargs): return SqueezeBertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) @slow def test_sequence_builders(self): tokenizer = SqueezeBertTokenizer.from_pretrained("squeezebert/squeezebert-mnli-headless") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [ tokenizer.sep_token_id ]
transformers/tests/models/squeezebert/test_tokenization_squeezebert.py/0
{ "file_path": "transformers/tests/models/squeezebert/test_tokenization_squeezebert.py", "repo_id": "transformers", "token_count": 669 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the TF 2.0 Swin model.""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import SwinConfig from transformers.testing_utils import require_tf, require_vision, slow, to_2tuple from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.modeling_tf_utils import keras from transformers.models.swin.modeling_tf_swin import ( TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, ) if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class TFSwinModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, ) -> None: self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.type_sequence_label_size = type_sequence_label_size self.encoder_stride = encoder_stride def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return SwinConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = TFSwinModel(config=config) result = model(pixel_values) expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = TFSwinForMaskedImageModeling(config=config) result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images config.num_channels = 1 model = TFSwinForMaskedImageModeling(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFSwinForImageClassification(config) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = TFSwinForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFSwinModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFSwinModel, TFSwinForImageClassification, TFSwinForMaskedImageModeling, ) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFSwinModel, "image-classification": TFSwinForImageClassification} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFSwinModelTester(self) self.config_tester = ConfigTester(self, config_class=SwinConfig, embed_dim=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="Swin does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), keras.layers.Layer) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Dense)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Swin has a different seq_length patch_size = to_2tuple(config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = tf.reshape(reshaped_hidden_states[0], (batch_size, num_channels, height * width)) reshaped_hidden_states = tf.transpose(reshaped_hidden_states, (0, 2, 1)) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = to_2tuple(self.model_tester.image_size) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_inputs_requiring_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = to_2tuple(self.model_tester.image_size) patch_size = to_2tuple(config.patch_size) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @slow def test_model_from_pretrained(self): model_name = "microsoft/swin-tiny-patch4-window7-224" model = TFSwinModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_vision @require_tf class TFSwinModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = TFSwinForImageClassification.from_pretrained("microsoft/swin-tiny-patch4-window7-224") image_processor = self.default_image_processor image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.0948, -0.6454, -0.0921]) self.assertTrue(np.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/swin/test_modeling_tf_swin.py/0
{ "file_path": "transformers/tests/models/swin/test_modeling_tf_swin.py", "repo_id": "transformers", "token_count": 6978 }
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest import numpy as np import pandas as pd from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TapasConfig, is_torch_available, ) from transformers.models.auto import get_values from transformers.testing_utils import require_tensorflow_probability, require_torch, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, ) from transformers.models.tapas.modeling_tapas import ( IndexMap, ProductIndexMap, flatten, gather, range_index_map, reduce_max, reduce_mean, reduce_sum, ) class TapasModelTester: """You can also import this e.g from .test_modeling_tapas import TapasModelTester""" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, max_position_embeddings=512, type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10], type_sequence_label_size=2, positive_weight=10.0, num_aggregation_labels=4, num_labels=2, aggregation_loss_importance=0.8, use_answer_as_supervision=True, answer_loss_importance=0.001, use_normalized_answer_loss=False, huber_loss_delta=25.0, temperature=1.0, agg_temperature=1.0, use_gumbel_for_cells=False, use_gumbel_for_agg=False, average_approximation_function="ratio", cell_selection_preference=0.5, answer_loss_cutoff=100, max_num_rows=64, max_num_columns=32, average_logits_per_cell=True, select_one_column=True, allow_empty_column_selection=False, init_cell_selection_weights_to_zero=True, reset_position_index_per_cell=True, disable_per_token_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.max_position_embeddings = max_position_embeddings self.type_vocab_sizes = type_vocab_sizes self.type_sequence_label_size = type_sequence_label_size self.positive_weight = positive_weight self.num_aggregation_labels = num_aggregation_labels self.num_labels = num_labels self.aggregation_loss_importance = aggregation_loss_importance self.use_answer_as_supervision = use_answer_as_supervision self.answer_loss_importance = answer_loss_importance self.use_normalized_answer_loss = use_normalized_answer_loss self.huber_loss_delta = huber_loss_delta self.temperature = temperature self.agg_temperature = agg_temperature self.use_gumbel_for_cells = use_gumbel_for_cells self.use_gumbel_for_agg = use_gumbel_for_agg self.average_approximation_function = average_approximation_function self.cell_selection_preference = cell_selection_preference self.answer_loss_cutoff = answer_loss_cutoff self.max_num_rows = max_num_rows self.max_num_columns = max_num_columns self.average_logits_per_cell = average_logits_per_cell self.select_one_column = select_one_column self.allow_empty_column_selection = allow_empty_column_selection self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero self.reset_position_index_per_cell = reset_position_index_per_cell self.disable_per_token_loss = disable_per_token_loss self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).to(torch_device) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]).to(torch_device) token_type_ids = [] for type_vocab_size in self.type_vocab_sizes: token_type_ids.append(ids_tensor(shape=[self.batch_size, self.seq_length], vocab_size=type_vocab_size)) token_type_ids = torch.stack(token_type_ids, dim=2).to(torch_device) sequence_labels = None token_labels = None labels = None numeric_values = None numeric_values_scale = None float_answer = None aggregation_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size).to(torch_device) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels).to(torch_device) labels = ids_tensor([self.batch_size, self.seq_length], vocab_size=2).to(torch_device) numeric_values = floats_tensor([self.batch_size, self.seq_length]).to(torch_device) numeric_values_scale = floats_tensor([self.batch_size, self.seq_length]).to(torch_device) float_answer = floats_tensor([self.batch_size]).to(torch_device) aggregation_labels = ids_tensor([self.batch_size], self.num_aggregation_labels).to(torch_device) config = self.get_config() return ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) def get_config(self): return TapasConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_sizes=self.type_vocab_sizes, initializer_range=self.initializer_range, positive_weight=self.positive_weight, num_aggregation_labels=self.num_aggregation_labels, num_labels=self.num_labels, aggregation_loss_importance=self.aggregation_loss_importance, use_answer_as_supervision=self.use_answer_as_supervision, answer_loss_importance=self.answer_loss_importance, use_normalized_answer_loss=self.use_normalized_answer_loss, huber_loss_delta=self.huber_loss_delta, temperature=self.temperature, agg_temperature=self.agg_temperature, use_gumbel_for_cells=self.use_gumbel_for_cells, use_gumbel_for_agg=self.use_gumbel_for_agg, average_approximation_function=self.average_approximation_function, cell_selection_preference=self.cell_selection_preference, answer_loss_cutoff=self.answer_loss_cutoff, max_num_rows=self.max_num_rows, max_num_columns=self.max_num_columns, average_logits_per_cell=self.average_logits_per_cell, select_one_column=self.select_one_column, allow_empty_column_selection=self.allow_empty_column_selection, init_cell_selection_weights_to_zero=self.init_cell_selection_weights_to_zero, reset_position_index_per_cell=self.reset_position_index_per_cell, disable_per_token_loss=self.disable_per_token_loss, ) def create_and_check_model( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TapasModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TapasForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): # inference: without aggregation head (SQA). Model only returns logits sqa_config = copy.copy(config) sqa_config.num_aggregation_labels = 0 sqa_config.use_answer_as_supervision = False model = TapasForQuestionAnswering(config=sqa_config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) # inference: with aggregation head (WTQ, WikiSQL-supervised). Model returns logits and aggregation logits model = TapasForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) # training: can happen in 3 main ways # case 1: conversational (SQA) model = TapasForQuestionAnswering(config=sqa_config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=labels, ) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) # case 2: weak supervision for aggregation (WTQ) model = TapasForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=labels, numeric_values=numeric_values, numeric_values_scale=numeric_values_scale, float_answer=float_answer, ) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) # case 3: strong supervision for aggregation (WikiSQL-supervised) wikisql_config = copy.copy(config) wikisql_config.use_answer_as_supervision = False model = TapasForQuestionAnswering(config=wikisql_config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=labels, aggregation_labels=aggregation_labels, ) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): config.num_labels = self.num_labels model = TapasForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class TapasModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TapasModel, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, ) if is_torch_available() else None ) pipeline_model_mapping = ( { "feature-extraction": TapasModel, "fill-mask": TapasForMaskedLM, "table-question-answering": TapasForQuestionAnswering, "text-classification": TapasForSequenceClassification, "zero-shot": TapasForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = True test_head_masking = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class in get_values(MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["aggregation_labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["numeric_values"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.float, device=torch_device, ) inputs_dict["numeric_values_scale"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.float, device=torch_device, ) inputs_dict["float_answer"] = torch.zeros( self.model_tester.batch_size, dtype=torch.float, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(MODEL_FOR_MASKED_LM_MAPPING), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True def setUp(self): self.model_tester = TapasModelTester(self) self.config_tester = ConfigTester(self, config_class=TapasConfig, dim=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) @require_tensorflow_probability @unittest.skip(reason="tfp is not defined even if installed. FIXME @Arthur in a followup PR!") def test_pt_tf_model_equivalence(self): pass @unittest.skip(reason="tfp is not defined even if installed. FIXME @Arthur in a followup PR!") def test_tf_from_pt_safetensors(self): pass def prepare_tapas_single_inputs_for_inference(): # Here we prepare a single table-question pair to test TAPAS inference on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], } queries = "Which footballer is 33 years old?" table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_inference(): # Here we prepare a batch of 2 table-question pairs to test TAPAS inference on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "How many goals does Ronaldo have?"] table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_training(): # Here we prepare a DIFFERENT batch of 2 table-question pairs to test TAPAS training on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "What's the total number of goals?"] table = pd.DataFrame.from_dict(data) answer_coordinates = [[(0, 0)], [(0, 2), (1, 2)]] answer_text = [["Lionel Messi"], ["1462"]] float_answer = [float("NaN"), float("1462")] return table, queries, answer_coordinates, answer_text, float_answer @require_torch class TapasModelIntegrationTest(unittest.TestCase): @cached_property def default_tokenizer(self): return TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") @slow def test_inference_no_head(self): # ideally we want to test this with the weights of tapas_inter_masklm_base_reset, # but since it's not straightforward to do this with the TF 1 implementation, we test it with # the weights of the WTQ base model (i.e. tapas_wtq_wikisql_sqa_inter_masklm_base_reset) model = TapasModel.from_pretrained("google/tapas-base-finetuned-wtq").to(torch_device) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) # test the sequence output expected_slice = torch.tensor( [ [ [-0.141581565, -0.599805772, 0.747186482], [-0.143664181, -0.602008104, 0.749218345], [-0.15169853, -0.603363097, 0.741370678], ] ], device=torch_device, ) torch.testing.assert_close(outputs.last_hidden_state[:, :3, :3], expected_slice, rtol=0.0005, atol=0.0005) # test the pooled output expected_slice = torch.tensor([[0.987518311, -0.970520139, -0.994303405]], device=torch_device) torch.testing.assert_close(outputs.pooler_output[:, :3], expected_slice, rtol=0.0005, atol=0.0005) @unittest.skip(reason="Model not available yet") def test_inference_masked_lm(self): pass # TapasForQuestionAnswering has 3 possible ways of being fine-tuned: # - conversational set-up (SQA) # - weak supervision for aggregation (WTQ, WikiSQL) # - strong supervision for aggregation (WikiSQL-supervised) # We test all of them: @slow def test_inference_question_answering_head_conversational(self): # note that google/tapas-base-finetuned-sqa should correspond to tapas_sqa_inter_masklm_base_reset model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-sqa").to(torch_device) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = torch.Size((1, 21)) self.assertEqual(logits.shape, expected_shape) expected_tensor = torch.tensor( [ [ -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -9997.22461, -16.2628059, -10004.082, 15.4330549, 15.4330549, 15.4330549, -9990.42, -16.3270779, -16.3270779, -16.3270779, -16.3270779, -16.3270779, -10004.8506, ] ], device=torch_device, ) torch.testing.assert_close(logits, expected_tensor, rtol=0.015, atol=0.015) @slow def test_inference_question_answering_head_conversational_absolute_embeddings(self): # note that google/tapas-small-finetuned-sqa should correspond to tapas_sqa_inter_masklm_small_reset # however here we test the version with absolute position embeddings model = TapasForQuestionAnswering.from_pretrained("google/tapas-small-finetuned-sqa", revision="no_reset").to( torch_device ) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = torch.Size((1, 21)) self.assertEqual(logits.shape, expected_shape) expected_tensor = torch.tensor( [ [ -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -10014.7793, -18.8419304, -10018.0391, 17.7848816, 17.7848816, 17.7848816, -9981.02832, -16.4005489, -16.4005489, -16.4005489, -16.4005489, -16.4005489, -10013.4736, ] ], device=torch_device, ) torch.testing.assert_close(logits, expected_tensor, rtol=0.01, atol=0.01) @slow def test_inference_question_answering_head_weak_supervision(self): # note that google/tapas-base-finetuned-wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq").to(torch_device) tokenizer = self.default_tokenizer # let's test on a batch table, queries = prepare_tapas_batch_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt") inputs_on_device = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs_on_device) # test the logits logits = outputs.logits expected_shape = torch.Size((2, 28)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ [-160.375504, -160.375504, -160.375504, -10072.3965, -10070.9414, -10094.9736], [-9861.6123, -9861.6123, -9861.6123, -9861.6123, -9891.01172, 146.600677], ], device=torch_device, ) torch.testing.assert_close(logits[:, -6:], expected_slice, rtol=0.4, atol=0.4) # test the aggregation logits logits_aggregation = outputs.logits_aggregation expected_shape = torch.Size((2, 4)) self.assertEqual(logits_aggregation.shape, expected_shape) expected_tensor = torch.tensor( [[18.8545208, -9.76614857, -6.3128891, -2.93525243], [-4.05782509, 40.0351, -5.35329962, 23.3978653]], device=torch_device, ) torch.testing.assert_close(logits_aggregation, expected_tensor, rtol=0.001, atol=0.001) # test the predicted answer coordinates and aggregation indices EXPECTED_PREDICTED_ANSWER_COORDINATES = [[(0, 0)], [(1, 2)]] EXPECTED_PREDICTED_AGGREGATION_INDICES = [0, 1] predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( inputs, outputs.logits.detach().cpu(), outputs.logits_aggregation.detach().cpu() ) self.assertEqual(EXPECTED_PREDICTED_ANSWER_COORDINATES, predicted_answer_coordinates) self.assertEqual(EXPECTED_PREDICTED_AGGREGATION_INDICES, predicted_aggregation_indices) @slow def test_training_question_answering_head_weak_supervision(self): # note that google/tapas-base-finetuned-wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq").to(torch_device) model.to(torch_device) # normally we should put the model in training mode but it's a pain to do this with the TF 1 implementation tokenizer = self.default_tokenizer # let's test on a batch table, queries, answer_coordinates, answer_text, float_answer = prepare_tapas_batch_inputs_for_training() inputs = tokenizer( table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, padding="longest", return_tensors="pt", ) # prepare data (created by the tokenizer) and move to torch_device input_ids = inputs["input_ids"].to(torch_device) attention_mask = inputs["attention_mask"].to(torch_device) token_type_ids = inputs["token_type_ids"].to(torch_device) labels = inputs["labels"].to(torch_device) numeric_values = inputs["numeric_values"].to(torch_device) numeric_values_scale = inputs["numeric_values_scale"].to(torch_device) # the answer should be prepared by the user float_answer = torch.FloatTensor(float_answer).to(torch_device) # forward pass to get loss + logits: with torch.no_grad(): outputs = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels, numeric_values=numeric_values, numeric_values_scale=numeric_values_scale, float_answer=float_answer, ) # test the loss loss = outputs.loss expected_loss = torch.tensor(3.3527612686157227e-08, device=torch_device) torch.testing.assert_close(loss, expected_loss, rtol=1e-6, atol=1e-6) # test the logits on the first example logits = outputs.logits expected_shape = torch.Size((2, 29)) self.assertEqual(logits.shape, expected_shape) expected_slice = torch.tensor( [ -160.0156, -160.0156, -160.0156, -160.0156, -160.0156, -10072.2266, -10070.8896, -10092.6006, -10092.6006, ], device=torch_device, ) torch.testing.assert_close(logits[0, -9:], expected_slice, rtol=1e-6, atol=1e-6) # test the aggregation logits on the second example logits_aggregation = outputs.logits_aggregation expected_shape = torch.Size((2, 4)) self.assertEqual(logits_aggregation.shape, expected_shape) expected_slice = torch.tensor([-4.0538, 40.0304, -5.3554, 23.3965], device=torch_device) torch.testing.assert_close(logits_aggregation[1, -4:], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_question_answering_head_strong_supervision(self): # note that google/tapas-base-finetuned-wikisql-supervised should correspond to tapas_wikisql_sqa_inter_masklm_base_reset model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wikisql-supervised").to( torch_device ) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = torch.Size((1, 21)) self.assertEqual(logits.shape, expected_shape) expected_tensor = torch.tensor( [ [ -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -18.6185989, -10008.7969, 17.6355762, 17.6355762, 17.6355762, -10002.4404, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -10007.0977, ] ], device=torch_device, ) torch.testing.assert_close(logits, expected_tensor, rtol=0.02, atol=0.02) # test the aggregation logits logits_aggregation = outputs.logits_aggregation expected_shape = torch.Size((1, 4)) self.assertEqual(logits_aggregation.shape, expected_shape) expected_tensor = torch.tensor( [[16.5659733, -3.06624889, -2.34152961, -0.970244825]], device=torch_device ) # PyTorch model outputs [[16.5679, -3.0668, -2.3442, -0.9674]] torch.testing.assert_close(logits_aggregation, expected_tensor, rtol=0.003, atol=0.003) @slow def test_inference_classification_head(self): # note that google/tapas-base-finetuned-tabfact should correspond to tapas_tabfact_inter_masklm_base_reset model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact").to(torch_device) tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) # test the classification logits logits = outputs.logits expected_shape = torch.Size((1, 2)) self.assertEqual(logits.shape, expected_shape) expected_tensor = torch.tensor( [[0.795137286, 9.5572]], device=torch_device ) # Note that the PyTorch model outputs [[0.8057, 9.5281]] torch.testing.assert_close(outputs.logits, expected_tensor, rtol=0.05, atol=0.05) @require_torch class TapasUtilitiesTest(unittest.TestCase): def _prepare_tables(self): """Prepares two tables, both with three distinct rows. The first table has two columns: 1.0, 2.0 | 3.0 2.0, 0.0 | 1.0 1.0, 3.0 | 4.0 The second table has three columns: 1.0 | 2.0 | 3.0 2.0 | 0.0 | 1.0 1.0 | 3.0 | 4.0 Returns: SegmentedTensors with the tables. """ values = torch.tensor( [ [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], ] ) row_index = IndexMap( indices=torch.tensor( [ [[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 0, 0], [1, 1, 1], [2, 2, 2]], ] ), num_segments=3, batch_dims=1, ) col_index = IndexMap( indices=torch.tensor( [ [[0, 0, 1], [0, 0, 1], [0, 0, 1]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]], ] ), num_segments=3, batch_dims=1, ) return values, row_index, col_index def test_product_index(self): _, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_index_proj = cell_index.project_outer(cell_index) col_index_proj = cell_index.project_inner(cell_index) ind = cell_index.indices self.assertEqual(cell_index.num_segments, 9) # Projections should give back the original indices. # we use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(row_index.indices.numpy(), row_index_proj.indices.numpy()) self.assertEqual(row_index.num_segments, row_index_proj.num_segments) self.assertEqual(row_index.batch_dims, row_index_proj.batch_dims) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(col_index.indices.numpy(), col_index_proj.indices.numpy()) self.assertEqual(col_index.batch_dims, col_index_proj.batch_dims) # The first and second "column" are identified in the first table. for i in range(3): self.assertEqual(ind[0, i, 0], ind[0, i, 1]) self.assertNotEqual(ind[0, i, 0], ind[0, i, 2]) # All rows are distinct in the first table. for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 and j != j_2: self.assertNotEqual(ind[0, i, j], ind[0, i_2, j_2]) # All cells are distinct in the second table. for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 or j != j_2: self.assertNotEqual(ind[1, i, j], ind[1, i_2, j_2]) def test_flatten(self): _, row_index, col_index = self._prepare_tables() row_index_flat = flatten(row_index) col_index_flat = flatten(col_index) shape = [3, 4, 5] batched_index = IndexMap(indices=torch.zeros(shape).type(torch.LongTensor), num_segments=1, batch_dims=3) batched_index_flat = flatten(batched_index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal( row_index_flat.indices.numpy(), [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5] ) np.testing.assert_array_equal( col_index_flat.indices.numpy(), [0, 0, 1, 0, 0, 1, 0, 0, 1, 3, 4, 5, 3, 4, 5, 3, 4, 5] ) self.assertEqual(batched_index_flat.num_segments.numpy(), np.prod(shape)) np.testing.assert_array_equal(batched_index_flat.indices.numpy(), range(np.prod(shape))) def test_range_index_map(self): batch_shape = [3, 4] num_segments = 5 index = range_index_map(batch_shape, num_segments) self.assertEqual(num_segments, index.num_segments) self.assertEqual(2, index.batch_dims) indices = index.indices # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(list(indices.size()), [3, 4, 5]) for i in range(batch_shape[0]): for j in range(batch_shape[1]): # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(indices[i, j, :].numpy(), range(num_segments)) def test_reduce_sum(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_sum, _ = reduce_sum(values, row_index) col_sum, _ = reduce_sum(values, col_index) cell_sum, _ = reduce_sum(values, cell_index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose(row_sum.numpy(), [[6.0, 3.0, 8.0], [6.0, 3.0, 8.0]]) np.testing.assert_allclose(col_sum.numpy(), [[9.0, 8.0, 0.0], [4.0, 5.0, 8.0]]) np.testing.assert_allclose( cell_sum.numpy(), [[3.0, 3.0, 0.0, 2.0, 1.0, 0.0, 4.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0]], ) def test_reduce_mean(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_mean, _ = reduce_mean(values, row_index) col_mean, _ = reduce_mean(values, col_index) cell_mean, _ = reduce_mean(values, cell_index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose( row_mean.numpy(), [[6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0], [6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0]] ) np.testing.assert_allclose(col_mean.numpy(), [[9.0 / 6.0, 8.0 / 3.0, 0.0], [4.0 / 3.0, 5.0 / 3.0, 8.0 / 3.0]]) np.testing.assert_allclose( cell_mean.numpy(), [ [3.0 / 2.0, 3.0, 0.0, 2.0 / 2.0, 1.0, 0.0, 4.0 / 2.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0], ], ) def test_reduce_max(self): values = torch.as_tensor([2.0, 1.0, 0.0, 3.0]) index = IndexMap(indices=torch.as_tensor([0, 1, 0, 1]), num_segments=2) maximum, _ = reduce_max(values, index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(maximum.numpy(), [2, 3]) def test_reduce_sum_vectorized(self): values = torch.as_tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]]) index = IndexMap(indices=torch.as_tensor([[0, 0, 1]]), num_segments=2, batch_dims=0) sums, new_index = reduce_sum(values, index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose(sums.numpy(), [3.0, 3.0]) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(new_index.indices.numpy(), [0, 1]) np.testing.assert_array_equal(new_index.num_segments.numpy(), 2) np.testing.assert_array_equal(new_index.batch_dims, 0) def test_gather(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) # Compute sums and then gather. The result should have the same shape as # the original table and each element should contain the sum the values in # its cell. sums, _ = reduce_sum(values, cell_index) cell_sum = gather(sums, cell_index) assert cell_sum.size() == values.size() # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_allclose( cell_sum.numpy(), [[[3.0, 3.0, 3.0], [2.0, 2.0, 1.0], [4.0, 4.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]]], ) def test_gather_vectorized(self): values = torch.as_tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) index = IndexMap(indices=torch.as_tensor([[0, 1], [1, 0]]), num_segments=2, batch_dims=1) result = gather(values, index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(result.numpy(), [[[1, 2], [3, 4]], [[7, 8], [5, 6]]])
transformers/tests/models/tapas/test_modeling_tapas.py/0
{ "file_path": "transformers/tests/models/tapas/test_modeling_tapas.py", "repo_id": "transformers", "token_count": 22206 }