Spaces:
Sleeping
Sleeping
# coding=utf-8 | |
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. | |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
from dataclasses import dataclass | |
from typing import Optional, Tuple, Union | |
import numpy as np | |
import tensorflow as tf | |
from .file_utils import ModelOutput | |
from .utils import logging | |
logger = logging.get_logger(__name__) | |
class TFGreedySearchDecoderOnlyOutput(ModelOutput): | |
""" | |
Base class for outputs of decoder-only generation models using greedy search. | |
Args: | |
sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): | |
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or | |
shorter if all batches finished early due to the :obj:`eos_token_id`. | |
scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): | |
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) | |
at each generation step. :obj:`(max_length-input_ids.shape[-1],)`-shaped tuple of :obj:`tf.Tensor` with | |
each tensor of shape :obj:`(batch_size, config.vocab_size)`). | |
attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. | |
hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size, generated_length, hidden_size)`. | |
""" | |
sequences: tf.Tensor = None | |
scores: Optional[Tuple[tf.Tensor]] = None | |
attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
class TFGreedySearchEncoderDecoderOutput(ModelOutput): | |
""" | |
Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention | |
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the | |
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) | |
Args: | |
sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): | |
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or | |
shorter if all batches finished early due to the :obj:`eos_token_id`. | |
scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): | |
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) | |
at each generation step. :obj:`(max_length-1,)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape | |
:obj:`(batch_size, config.vocab_size)`). | |
encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple of :obj:`tf.Tensor` (one for each layer of the decoder) of shape :obj:`(batch_size, num_heads, | |
sequence_length, sequence_length)`. | |
encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of | |
shape :obj:`(batch_size, sequence_length, hidden_size)`. | |
decoder_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. | |
cross_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. | |
decoder_hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size, generated_length, hidden_size)`. | |
""" | |
sequences: tf.Tensor = None | |
scores: Optional[Tuple[tf.Tensor]] = None | |
encoder_attentions: Optional[Tuple[tf.Tensor]] = None | |
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None | |
decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
class TFSampleDecoderOnlyOutput(ModelOutput): | |
""" | |
Base class for outputs of decoder-only generation models using sampling. | |
Args: | |
sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): | |
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or | |
shorter if all batches finished early due to the :obj:`eos_token_id`. | |
scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): | |
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) | |
at each generation step. :obj:`(max_length-input_ids.shape[-1],)`-shaped tuple of :obj:`tf.Tensor` with | |
each tensor of shape :obj:`(batch_size*num_return_sequences, config.vocab_size)`). | |
attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(num_return_sequences*batch_size, num_heads, generated_length, | |
sequence_length)`. | |
hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(num_return_sequences*batch_size, generated_length, hidden_size)`. | |
""" | |
sequences: tf.Tensor = None | |
scores: Optional[Tuple[tf.Tensor]] = None | |
attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
class TFSampleEncoderDecoderOutput(ModelOutput): | |
""" | |
Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of | |
the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states | |
attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) | |
Args: | |
sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): | |
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or | |
shorter if all batches finished early due to the :obj:`eos_token_id`. | |
scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): | |
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) | |
at each generation step. :obj:`(max_length-1,)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape | |
:obj:`(batch_size*num_return_sequences, config.vocab_size)`). | |
encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple of :obj:`tf.Tensor` (one for each layer of the decoder) of shape | |
:obj:`(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)`. | |
encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of | |
shape :obj:`(batch_size*num_return_sequences, sequence_length, hidden_size)`. | |
decoder_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, num_heads, generated_length, | |
sequence_length)`. | |
cross_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. | |
decoder_hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, generated_length, hidden_size)`. | |
""" | |
sequences: tf.Tensor = None | |
scores: Optional[Tuple[tf.Tensor]] = None | |
encoder_attentions: Optional[Tuple[tf.Tensor]] = None | |
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None | |
decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
class TFBeamSearchDecoderOnlyOutput(ModelOutput): | |
""" | |
Base class for outputs of decoder-only generation models using beam search. | |
Args: | |
sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): | |
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or | |
shorter if all batches finished early due to the :obj:`eos_token_id`. | |
sequences_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): | |
Final beam scores of the generated ``sequences``. | |
scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): | |
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log | |
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam | |
. :obj:`(max_length-input_ids.shape[-1],)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape | |
:obj:`(batch_size*num_beams*num_return_sequences, config.vocab_size)`). | |
attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length, sequence_length)`. | |
hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, generated_length, | |
hidden_size)`. | |
""" | |
sequences: tf.Tensor = None | |
sequences_scores: Optional[tf.Tensor] = None | |
scores: Optional[Tuple[tf.Tensor]] = None | |
attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
class TFBeamSearchEncoderDecoderOutput(ModelOutput): | |
""" | |
Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights | |
of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states | |
attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) | |
Args: | |
sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): | |
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or | |
shorter if all batches finished early due to the :obj:`eos_token_id`. | |
sequences_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): | |
Final beam scores of the generated ``sequences``. | |
scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): | |
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log | |
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam | |
. :obj:`(max_length-1,)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape | |
:obj:`(batch_size*num_beams, config.vocab_size)`). | |
attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple of :obj:`tf.Tensor` (one for each layer of the decoder) of shape :obj:`(batch_size, num_heads, | |
sequence_length, sequence_length)`. | |
encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of | |
shape :obj:`(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`. | |
decoder_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, num_heads, generated_length, | |
sequence_length)`. | |
cross_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. | |
decoder_hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, generated_length, | |
hidden_size)`. | |
""" | |
sequences: tf.Tensor = None | |
sequences_scores: Optional[tf.Tensor] = None | |
scores: Optional[Tuple[tf.Tensor]] = None | |
encoder_attentions: Optional[Tuple[tf.Tensor]] = None | |
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None | |
decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
class TFBeamSampleDecoderOnlyOutput(ModelOutput): | |
""" | |
Base class for outputs of decoder-only generation models using beam sample. | |
Args: | |
sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): | |
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or | |
shorter if all batches finished early due to the :obj:`eos_token_id`. | |
sequences_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size * num_return_sequence)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): | |
Final beam scores of the generated ``sequences``. | |
scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): | |
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log | |
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam | |
. :obj:`(max_length-input_ids.shape[-1],)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape | |
:obj:`(batch_size*num_beams*num_return_sequences, config.vocab_size)`). | |
attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length, sequence_length)`. | |
hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams, generated_length, hidden_size)`. | |
""" | |
sequences: tf.Tensor = None | |
sequences_scores: Optional[tf.Tensor] = None | |
scores: Optional[Tuple[tf.Tensor]] = None | |
attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
class TFBeamSampleEncoderDecoderOutput(ModelOutput): | |
""" | |
Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention | |
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the | |
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) | |
Args: | |
sequences (:obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams, sequence_length)`): | |
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or | |
shorter if all batches finished early due to the :obj:`eos_token_id`. | |
sequences_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size * num_return_sequence)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): | |
Final beam scores of the generated ``sequences``. | |
scores (:obj:`tuple(tf.Tensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): | |
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log | |
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam | |
. :obj:`(max_length-1,)`-shaped tuple of :obj:`tf.Tensor` with each tensor of shape | |
:obj:`(batch_size*num_beams, config.vocab_size)`). | |
encoder_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple of :obj:`tf.Tensor` (one for each layer of the decoder) of shape :obj:`(batch_size, num_heads, | |
sequence_length, sequence_length)`. | |
encoder_hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of | |
shape :obj:`(batch_size*num_beams, sequence_length, hidden_size)`. | |
decoder_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length, sequence_length)`. | |
cross_attentions (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. | |
decoder_hidden_states (:obj:`tuple(tuple(tf.Tensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): | |
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of | |
:obj:`tf.Tensor` of shape :obj:`(batch_size*num_beams, generated_length, hidden_size)`. | |
""" | |
sequences: tf.Tensor = None | |
sequences_scores: Optional[tf.Tensor] = None | |
scores: Optional[Tuple[tf.Tensor]] = None | |
encoder_attentions: Optional[Tuple[tf.Tensor]] = None | |
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None | |
decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None | |
TFGreedySearchOutput = Union[TFGreedySearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput] | |
TFSampleOutput = Union[TFSampleEncoderDecoderOutput, TFSampleDecoderOnlyOutput] | |
TFBeamSearchOutput = Union[TFBeamSearchEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput] | |
TFBeamSampleOutput = Union[TFBeamSampleEncoderDecoderOutput, TFBeamSampleDecoderOnlyOutput] | |
class TFGenerationMixin: | |
""" | |
A class containing all of the functions supporting generation, to be used as a mixin in | |
:class:`~transformers.TFPreTrainedModel`. | |
""" | |
def prepare_inputs_for_generation(self, inputs, **kwargs): | |
""" | |
Implement in subclasses of :class:`~transformers.TFPreTrainedModel` for custom behavior to prepare inputs in | |
the generate method. | |
""" | |
return {"input_ids": inputs} | |
def _use_cache(self, outputs, use_cache): | |
"""During generation, decide whether to pass the `past` variable to the next forward pass.""" | |
use_cache = getattr(self.config, "use_cache", False) | |
if len(outputs) <= 1 or use_cache is False: | |
return False | |
if hasattr(self.config, "mem_len") and self.config.mem_len == 0: | |
return False | |
return True | |
def generate( | |
self, | |
input_ids=None, | |
max_length=None, | |
min_length=None, | |
do_sample=None, | |
early_stopping=None, | |
num_beams=None, | |
temperature=None, | |
top_k=None, | |
top_p=None, | |
repetition_penalty=None, | |
bad_words_ids=None, | |
bos_token_id=None, | |
pad_token_id=None, | |
eos_token_id=None, | |
length_penalty=None, | |
no_repeat_ngram_size=None, | |
num_return_sequences=None, | |
attention_mask=None, | |
decoder_start_token_id=None, | |
use_cache=None, | |
output_scores=None, | |
output_attentions=None, | |
output_hidden_states=None, | |
return_dict_in_generate=None, | |
forced_bos_token_id=None, | |
forced_eos_token_id=None, | |
**model_kwargs, | |
) -> Union[TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]: | |
r""" | |
Generates sequences for models with a language modeling head. The method currently supports greedy decoding, | |
beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling. | |
Adapted in part from `Facebook's XLM beam search code | |
<https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__. | |
Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the | |
attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values | |
indicated are the default values of those config. | |
Most of these parameters are explained in more detail in `this blog post | |
<https://huggingface.co/blog/how-to-generate>`__. | |
Parameters: | |
input_ids (:obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size, sequence_length)`, `optional`): | |
The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty | |
:obj:`tf.Tensor` of shape :obj:`(1,)`. | |
max_length (:obj:`int`, `optional`, defaults to 20): | |
The maximum length of the sequence to be generated. | |
min_length (:obj:`int`, `optional`, defaults to 10): | |
The minimum length of the sequence to be generated. | |
do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether or not to use sampling ; use greedy decoding otherwise. | |
early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`): | |
Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not. | |
num_beams (:obj:`int`, `optional`, defaults to 1): | |
Number of beams for beam search. 1 means no beam search. | |
temperature (:obj:`float`, `optional`, defaults to 1.0): | |
The value used to module the next token probabilities. | |
top_k (:obj:`int`, `optional`, defaults to 50): | |
The number of highest probability vocabulary tokens to keep for top-k-filtering. | |
top_p (:obj:`float`, `optional`, defaults to 1.0): | |
If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or | |
higher are kept for generation. | |
repetition_penalty (:obj:`float`, `optional`, defaults to 1.0): | |
The parameter for repetition penalty. 1.0 means no penalty. See `this paper | |
<https://arxiv.org/pdf/1909.05858.pdf>`__ for more details. | |
pad_token_id (:obj:`int`, `optional`): | |
The id of the `padding` token. | |
bos_token_id (:obj:`int`, `optional`): | |
The id of the `beginning-of-sequence` token. | |
eos_token_id (:obj:`int`, `optional`): | |
The id of the `end-of-sequence` token. | |
length_penalty (:obj:`float`, `optional`, defaults to 1.0): | |
Exponential penalty to the length. 1.0 means no penalty. | |
Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in | |
order to encourage the model to produce longer sequences. | |
no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0): | |
If set to int > 0, all ngrams of that size can only occur once. | |
bad_words_ids(:obj:`List[int]`, `optional`): | |
List of token ids that are not allowed to be generated. In order to get the tokens of the words that | |
should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`. | |
num_return_sequences(:obj:`int`, `optional`, defaults to 1): | |
The number of independently computed returned sequences for each element in the batch. | |
attention_mask (:obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size, sequence_length)`, `optional`): | |
Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for | |
tokens that are not masked, and 0 for masked tokens. | |
If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token. | |
`What are attention masks? <../glossary.html#attention-mask>`__ | |
decoder_start_token_id (:obj:`int`, `optional`): | |
If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token. | |
use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`): | |
Whether or not the model should use the past last key/values attentions (if applicable to the model) to | |
speed up decoding. | |
output_attentions (:obj:`bool`, `optional`, defaults to `False`): | |
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under | |
returned tensors for more details. | |
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`): | |
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors | |
for more details. | |
output_scores (:obj:`bool`, `optional`, defaults to `False`): | |
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details. | |
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`): | |
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. | |
forced_bos_token_id (:obj:`int`, `optional`): | |
The id of the token to force as the first generated token after the :obj:`decoder_start_token_id`. | |
Useful for multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token | |
needs to be the target language token. | |
forced_eos_token_id (:obj:`int`, `optional`): | |
The id of the token to force as the last generated token when :obj:`max_length` is reached. | |
model_specific_kwargs: | |
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. | |
Return: | |
:class:`~transformers.file_utils.ModelOutput` or :obj:`tf.Tensor`: A | |
:class:`~transformers.file_utils.ModelOutput` (if ``return_dict_in_generate=True`` or when | |
``config.return_dict_in_generate=True``) or a :obj:`tf.Tensor`. | |
If the model is `not` an encoder-decoder model (``model.config.is_encoder_decoder=False``), the | |
possible :class:`~transformers.file_utils.ModelOutput` types are: | |
- :class:`~transformers.generation_utils.TFGreedySearchDecoderOnlyOutput`, | |
- :class:`~transformers.generation_utils.TFSampleDecoderOnlyOutput`, | |
- :class:`~transformers.generation_utils.TFBeamSearchDecoderOnlyOutput`, | |
- :class:`~transformers.generation_utils.TFBeamSampleDecoderOnlyOutput` | |
If the model is an encoder-decoder model (``model.config.is_encoder_decoder=True``), the possible | |
:class:`~transformers.file_utils.ModelOutput` types are: | |
- :class:`~transformers.generation_utils.TFGreedySearchEncoderDecoderOutput`, | |
- :class:`~transformers.generation_utils.TFSampleEncoderDecoderOutput`, | |
- :class:`~transformers.generation_utils.TFBeamSearchEncoderDecoderOutput`, | |
- :class:`~transformers.generation_utils.TFBeamSampleEncoderDecoderOutput` | |
Examples:: | |
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer | |
model = TFAutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from huggingface.co and cache. | |
outputs = model.generate(max_length=40) # do greedy decoding | |
print(f'Generated: {tokenizer.decode(outputs[0], skip_special_tokens=True)}') | |
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer | |
model = TFAutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from huggingface.co and cache. | |
input_context = 'The dog' | |
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context | |
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog' | |
for i in range(3): # 3 output sequences were generated | |
print(f'Generated {i}: {tokenizer.decode(outputs[i], skip_special_tokens=True)}') | |
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer | |
model = TFAutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from huggingface.co and cache. | |
input_context = 'The dog' | |
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context | |
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # generate 3 candidates using sampling | |
for i in range(3): # 3 output sequences were generated | |
print(f'Generated {i}: {tokenizer.decode(outputs[i], skip_special_tokens=True)}') | |
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer | |
model = TFAutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from huggingface.co and cache. | |
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl | |
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context | |
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences | |
print(f'Generated: {tokenizer.decode(outputs[0], skip_special_tokens=True)}') | |
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer | |
model = TFAutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from huggingface.co and cache. | |
input_context = 'My cute dog' | |
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']] | |
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context | |
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated | |
""" | |
# We cannot generate if the model does not have a LM head | |
if self.get_output_embeddings() is None: | |
raise AttributeError( | |
"You tried to generate sequences with a model that does not have a LM Head." | |
"Please use another model class (e.g. `TFOpenAIGPTLMHeadModel`, `TFXLNetLMHeadModel`, `TFGPT2LMHeadModel`, `TFCTRLLMHeadModel`, `TFT5ForConditionalGeneration`, `TFTransfoXLLMHeadModel`)" | |
) | |
max_length = max_length if max_length is not None else self.config.max_length | |
min_length = min_length if min_length is not None else self.config.min_length | |
do_sample = do_sample if do_sample is not None else self.config.do_sample | |
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping | |
num_beams = num_beams if num_beams is not None else self.config.num_beams | |
temperature = temperature if temperature is not None else self.config.temperature | |
top_k = top_k if top_k is not None else self.config.top_k | |
top_p = top_p if top_p is not None else self.config.top_p | |
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty | |
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id | |
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id | |
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id | |
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty | |
no_repeat_ngram_size = ( | |
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size | |
) | |
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids | |
num_return_sequences = ( | |
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences | |
) | |
decoder_start_token_id = ( | |
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id | |
) | |
forced_bos_token_id = ( | |
forced_bos_token_id if forced_bos_token_id is not None else self.config.forced_bos_token_id | |
) | |
forced_eos_token_id = ( | |
forced_eos_token_id if forced_eos_token_id is not None else self.config.forced_eos_token_id | |
) | |
output_scores = output_scores if output_scores is not None else self.config.output_scores | |
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | |
output_hidden_states = ( | |
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | |
) | |
return_dict_in_generate = ( | |
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate | |
) | |
model_kwargs["output_scores"] = output_scores | |
model_kwargs["output_attentions"] = output_attentions | |
model_kwargs["output_hidden_states"] = output_hidden_states | |
if self.config.is_encoder_decoder: | |
model_kwargs["encoder_attentions"] = None | |
model_kwargs["encoder_hidden_states"] = None | |
if input_ids is not None: | |
batch_size = shape_list(input_ids)[0] # overridden by the input batch_size | |
else: | |
batch_size = 1 | |
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer." | |
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer." | |
assert isinstance(do_sample, bool), "`do_sample` should be a boolean." | |
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean." | |
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer." | |
assert temperature > 0, "`temperature` should be strictly positive." | |
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer." | |
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1." | |
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1." | |
assert input_ids is not None or ( | |
isinstance(bos_token_id, int) and bos_token_id >= 0 | |
), "If input_ids is not defined, `bos_token_id` should be a positive integer." | |
assert pad_token_id is None or ( | |
isinstance(pad_token_id, int) and (pad_token_id >= 0) | |
), "`pad_token_id` should be a positive integer." | |
assert (eos_token_id is None) or ( | |
isinstance(eos_token_id, int) and (eos_token_id >= 0) | |
), "`eos_token_id` should be a positive integer." | |
assert length_penalty > 0, "`length_penalty` should be strictly positive." | |
assert ( | |
isinstance(num_return_sequences, int) and num_return_sequences > 0 | |
), "`num_return_sequences` should be a strictly positive integer." | |
assert ( | |
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list) | |
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated" | |
if input_ids is None: | |
assert isinstance(bos_token_id, int) and bos_token_id >= 0, ( | |
"you should either supply a context to complete as `input_ids` input " | |
"or a `bos_token_id` (integer >= 0) as a first token to start the generation." | |
) | |
input_ids = tf.fill((batch_size, 1), bos_token_id) | |
else: | |
assert len(shape_list(input_ids)) == 2, "Input prompt should be of shape (batch_size, sequence length)." | |
# not allow to duplicate outputs when greedy decoding | |
if do_sample is False: | |
if num_beams == 1: | |
# no_beam_search greedy generation conditions | |
assert ( | |
num_return_sequences == 1 | |
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1" | |
else: | |
# beam_search greedy generation conditions | |
assert ( | |
num_beams >= num_return_sequences | |
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences" | |
# create attention mask if necessary | |
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140 | |
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids.numpy()): | |
attention_mask = tf.cast(tf.math.not_equal(input_ids, pad_token_id), dtype=tf.int32) | |
elif attention_mask is None: | |
attention_mask = tf.ones_like(input_ids) | |
if pad_token_id is None and eos_token_id is not None: | |
logger.warning(f"Setting `pad_token_id` to {eos_token_id} (first `eos_token_id`) to generate sequence") | |
pad_token_id = eos_token_id | |
# current position and vocab size | |
cur_len = shape_list(input_ids)[1] # unused | |
vocab_size = self.config.vocab_size | |
# set effective batch size and effective batch multiplier according to do_sample | |
if do_sample: | |
effective_batch_size = batch_size * num_return_sequences | |
effective_batch_mult = num_return_sequences | |
else: | |
effective_batch_size = batch_size | |
effective_batch_mult = 1 | |
if self.config.is_encoder_decoder: | |
if decoder_start_token_id is None: | |
decoder_start_token_id = bos_token_id | |
assert ( | |
decoder_start_token_id is not None | |
), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation" | |
assert hasattr(self, "get_encoder"), f"{self} should have a 'get_encoder' function defined" | |
assert callable(self.get_encoder), f"{self.get_encoder} should be a method" | |
# get encoder and store encoder outputs | |
encoder = self.get_encoder() | |
encoder_outputs = encoder( | |
input_ids, | |
attention_mask=attention_mask, | |
output_attentions=output_attentions, | |
output_hidden_states=output_hidden_states, | |
) | |
if return_dict_in_generate: | |
if output_attentions: | |
model_kwargs["encoder_attentions"] = encoder_outputs.attentions | |
if output_hidden_states: | |
model_kwargs["encoder_hidden_states"] = encoder_outputs.hidden_states | |
# Expand input ids if num_beams > 1 or num_return_sequences > 1 | |
if num_return_sequences > 1 or num_beams > 1: | |
input_ids_len = shape_list(input_ids)[-1] | |
input_ids = tf.broadcast_to( | |
tf.expand_dims(input_ids, 1), (batch_size, effective_batch_mult * num_beams, input_ids_len) | |
) | |
attention_mask = tf.broadcast_to( | |
tf.expand_dims(attention_mask, 1), (batch_size, effective_batch_mult * num_beams, input_ids_len) | |
) | |
input_ids = tf.reshape( | |
input_ids, (effective_batch_size * num_beams, input_ids_len) | |
) # shape: (batch_size * num_return_sequences * num_beams, cur_len) | |
attention_mask = tf.reshape( | |
attention_mask, (effective_batch_size * num_beams, input_ids_len) | |
) # shape: (batch_size * num_return_sequences * num_beams, cur_len) | |
if self.config.is_encoder_decoder: | |
# create empty decoder_input_ids | |
input_ids = ( | |
tf.ones( | |
(effective_batch_size * num_beams, 1), | |
dtype=tf.int32, | |
) | |
* decoder_start_token_id | |
) | |
cur_len = 1 | |
assert ( | |
batch_size == encoder_outputs[0].shape[0] | |
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} " | |
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1) | |
expanded_batch_idxs = tf.reshape( | |
tf.repeat(tf.expand_dims(tf.range(batch_size), -1), repeats=num_beams * effective_batch_mult, axis=1), | |
shape=(-1,), | |
) | |
# expand encoder_outputs | |
encoder_outputs = (tf.gather(encoder_outputs[0], expanded_batch_idxs, axis=0),) | |
else: | |
encoder_outputs = None | |
cur_len = shape_list(input_ids)[-1] | |
assert ( | |
cur_len < max_length | |
), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`" | |
if num_beams > 1: | |
output = self._generate_beam_search( | |
input_ids, | |
cur_len=cur_len, | |
max_length=max_length, | |
min_length=min_length, | |
do_sample=do_sample, | |
early_stopping=early_stopping, | |
temperature=temperature, | |
top_k=top_k, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
no_repeat_ngram_size=no_repeat_ngram_size, | |
bad_words_ids=bad_words_ids, | |
pad_token_id=pad_token_id, | |
eos_token_id=eos_token_id, | |
batch_size=effective_batch_size, | |
num_return_sequences=num_return_sequences, | |
length_penalty=length_penalty, | |
num_beams=num_beams, | |
vocab_size=vocab_size, | |
encoder_outputs=encoder_outputs, | |
attention_mask=attention_mask, | |
use_cache=use_cache, | |
forced_bos_token_id=forced_bos_token_id, | |
forced_eos_token_id=forced_eos_token_id, | |
return_dict_in_generate=return_dict_in_generate, | |
**model_kwargs, | |
) | |
else: | |
output = self._generate_no_beam_search( | |
input_ids, | |
cur_len=cur_len, | |
max_length=max_length, | |
min_length=min_length, | |
do_sample=do_sample, | |
temperature=temperature, | |
top_k=top_k, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
no_repeat_ngram_size=no_repeat_ngram_size, | |
bad_words_ids=bad_words_ids, | |
pad_token_id=pad_token_id, | |
eos_token_id=eos_token_id, | |
batch_size=effective_batch_size, | |
vocab_size=vocab_size, | |
encoder_outputs=encoder_outputs, | |
attention_mask=attention_mask, | |
use_cache=use_cache, | |
return_dict_in_generate=return_dict_in_generate, | |
**model_kwargs, | |
) | |
return output | |
def _generate_no_beam_search( | |
self, | |
input_ids, | |
cur_len, | |
max_length, | |
min_length, | |
do_sample, | |
temperature, | |
top_k, | |
top_p, | |
repetition_penalty, | |
no_repeat_ngram_size, | |
bad_words_ids, | |
pad_token_id, | |
eos_token_id, | |
batch_size, | |
vocab_size, | |
encoder_outputs, | |
attention_mask, | |
use_cache, | |
return_dict_in_generate, | |
**kwargs | |
) -> Union[TFGreedySearchOutput, TFSampleOutput, tf.Tensor]: | |
""" | |
Generate sequences for each example without beam search (num_beams == 1). All returned sequences are generated | |
independently. | |
""" | |
# length of generated sentences / unfinished sentences | |
unfinished_sents = tf.ones_like(input_ids[:, 0]) | |
sent_lengths = tf.ones_like(input_ids[:, 0]) * max_length | |
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models | |
# init attention / hidden states / scores tuples | |
scores = () if (return_dict_in_generate and kwargs["output_scores"]) else None | |
decoder_attentions = () if (return_dict_in_generate and kwargs["output_attentions"]) else None | |
cross_attentions = () if (return_dict_in_generate and kwargs["output_attentions"]) else None | |
decoder_hidden_states = () if (return_dict_in_generate and kwargs["output_hidden_states"]) else None | |
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states | |
if self.config.is_encoder_decoder: | |
encoder_attentions = ( | |
kwargs["encoder_attentions"] if (return_dict_in_generate and kwargs["encoder_attentions"]) else None | |
) | |
encoder_hidden_states = ( | |
kwargs["encoder_hidden_states"] | |
if (return_dict_in_generate and kwargs["encoder_hidden_states"]) | |
else None | |
) | |
while cur_len < max_length: | |
model_inputs = self.prepare_inputs_for_generation( | |
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **kwargs | |
) | |
outputs = self( | |
**model_inputs, | |
return_dict=True, | |
output_attentions=kwargs["output_attentions"], | |
output_hidden_states=kwargs["output_hidden_states"], | |
) | |
next_token_logits = outputs.logits[:, -1, :] # (batch_size * num_beams, vocab_size) | |
# Store scores, attentions and hidden_states when required | |
if return_dict_in_generate: | |
if kwargs["output_scores"]: | |
scores += (next_token_logits,) | |
if kwargs["output_attentions"]: | |
decoder_attentions += ( | |
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) | |
) | |
if self.config.is_encoder_decoder: | |
cross_attentions += (outputs.cross_attentions,) | |
if kwargs["output_hidden_states"]: | |
decoder_hidden_states += ( | |
(outputs.decoder_hidden_states,) | |
if self.config.is_encoder_decoder | |
else (outputs.hidden_states,) | |
) | |
# if model has past, then set the past variable to speed up decoding | |
if self._use_cache(outputs, use_cache): | |
past = outputs[1] | |
# repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858) | |
if repetition_penalty != 1.0: | |
next_token_logits_penalties = _create_next_token_logits_penalties( | |
input_ids, next_token_logits, repetition_penalty | |
) | |
next_token_logits = tf.math.multiply(next_token_logits, next_token_logits_penalties) | |
if no_repeat_ngram_size > 0: | |
# calculate a list of banned tokens to prevent repetitively generating the same ngrams | |
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345 | |
banned_tokens = calc_banned_ngram_tokens(input_ids, batch_size, no_repeat_ngram_size, cur_len) | |
# create banned_tokens boolean mask | |
banned_tokens_indices_mask = [] | |
for banned_tokens_slice in banned_tokens: | |
banned_tokens_indices_mask.append( | |
[True if token in banned_tokens_slice else False for token in range(vocab_size)] | |
) | |
next_token_logits = set_tensor_by_indices_to_value( | |
next_token_logits, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf") | |
) | |
if bad_words_ids is not None: | |
# calculate a list of banned tokens according to bad words | |
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids) | |
banned_tokens_indices_mask = [] | |
for banned_tokens_slice in banned_tokens: | |
banned_tokens_indices_mask.append( | |
[True if token in banned_tokens_slice else False for token in range(vocab_size)] | |
) | |
next_token_logits = set_tensor_by_indices_to_value( | |
next_token_logits, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf") | |
) | |
# set eos token prob to zero if min_length is not reached | |
if eos_token_id is not None and cur_len < min_length: | |
# create eos_token_id boolean mask | |
is_token_logit_eos_token = tf.convert_to_tensor( | |
[True if token is eos_token_id else False for token in range(vocab_size)], dtype=tf.bool | |
) | |
eos_token_indices_mask = tf.broadcast_to(is_token_logit_eos_token, [batch_size, vocab_size]) | |
next_token_logits = set_tensor_by_indices_to_value( | |
next_token_logits, eos_token_indices_mask, -float("inf") | |
) | |
if do_sample: | |
# Temperature (higher temperature => more likely to sample low probability tokens) | |
if temperature != 1.0: | |
next_token_logits = next_token_logits / temperature | |
# Top-p/top-k filtering | |
next_token_logits = tf_top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) | |
# Sample | |
next_token = tf.squeeze( | |
tf.random.categorical(next_token_logits, dtype=tf.int32, num_samples=1), axis=1 | |
) | |
else: | |
# Greedy decoding | |
next_token = tf.math.argmax(next_token_logits, axis=-1, output_type=tf.int32) | |
# update generations and finished sentences | |
if eos_token_id is not None: | |
# pad finished sentences if eos_token_id exist | |
tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents) | |
else: | |
tokens_to_add = next_token | |
# add token and increase length by one | |
input_ids = tf.concat([input_ids, tf.expand_dims(tokens_to_add, -1)], 1) | |
cur_len = cur_len + 1 | |
if eos_token_id is not None: | |
eos_in_sents = tokens_to_add == eos_token_id | |
# if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length | |
is_sents_unfinished_and_token_to_add_is_eos = tf.math.multiply( | |
unfinished_sents, tf.cast(eos_in_sents, tf.int32) | |
) | |
sent_lengths = ( | |
sent_lengths * (1 - is_sents_unfinished_and_token_to_add_is_eos) | |
+ cur_len * is_sents_unfinished_and_token_to_add_is_eos | |
) | |
# unfinished_sents is set to zero if eos in sentence | |
unfinished_sents -= is_sents_unfinished_and_token_to_add_is_eos | |
# stop when there is a </s> in each sentence, or if we exceed the maximum length | |
if tf.math.reduce_max(unfinished_sents) == 0: | |
break | |
# extend attention_mask for new generated input if only decoder | |
if self.config.is_encoder_decoder is False: | |
attention_mask = tf.concat( | |
[attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1 | |
) | |
# if there are different sentences lengths in the batch, some batches have to be padded | |
min_sent_length = tf.math.reduce_min(sent_lengths) | |
max_sent_length = tf.math.reduce_max(sent_lengths) | |
if min_sent_length != max_sent_length: | |
assert pad_token_id is not None, "`Pad_token_id` has to be defined if batches have different lengths" | |
# finished sents are filled with pad_token | |
padding = tf.ones([batch_size, max_sent_length.numpy()], dtype=tf.int32) * pad_token_id | |
# create length masks for tf.where operation | |
broad_casted_sent_lengths = tf.broadcast_to( | |
tf.expand_dims(sent_lengths, -1), [batch_size, max_sent_length] | |
) | |
broad_casted_range = tf.transpose( | |
tf.broadcast_to(tf.expand_dims(tf.range(max_sent_length), -1), [max_sent_length, batch_size]) | |
) | |
decoded = tf.where(broad_casted_range < broad_casted_sent_lengths, input_ids, padding) | |
else: | |
decoded = input_ids | |
if return_dict_in_generate: | |
if do_sample: | |
if self.config.is_encoder_decoder: | |
return TFSampleEncoderDecoderOutput( | |
sequences=decoded, | |
scores=scores, | |
encoder_attentions=encoder_attentions, | |
encoder_hidden_states=encoder_hidden_states, | |
decoder_attentions=decoder_attentions, | |
cross_attentions=cross_attentions, | |
decoder_hidden_states=decoder_hidden_states, | |
) | |
else: | |
return TFSampleDecoderOnlyOutput( | |
sequences=decoded, | |
scores=scores, | |
attentions=decoder_attentions, | |
hidden_states=decoder_hidden_states, | |
) | |
else: | |
if self.config.is_encoder_decoder: | |
return TFGreedySearchEncoderDecoderOutput( | |
sequences=decoded, | |
scores=scores, | |
encoder_attentions=encoder_attentions, | |
encoder_hidden_states=encoder_hidden_states, | |
decoder_attentions=decoder_attentions, | |
cross_attentions=cross_attentions, | |
decoder_hidden_states=decoder_hidden_states, | |
) | |
else: | |
return TFGreedySearchDecoderOnlyOutput( | |
sequences=decoded, | |
scores=scores, | |
attentions=decoder_attentions, | |
hidden_states=decoder_hidden_states, | |
) | |
else: | |
return decoded | |
def _generate_beam_search( | |
self, | |
input_ids, | |
cur_len, | |
max_length, | |
min_length, | |
do_sample, | |
early_stopping, | |
temperature, | |
top_k, | |
top_p, | |
repetition_penalty, | |
no_repeat_ngram_size, | |
bad_words_ids, | |
pad_token_id, | |
eos_token_id, | |
batch_size, | |
num_return_sequences, | |
length_penalty, | |
num_beams, | |
vocab_size, | |
encoder_outputs, | |
attention_mask, | |
use_cache, | |
forced_bos_token_id, | |
forced_eos_token_id, | |
return_dict_in_generate, | |
**kwargs, | |
) -> Union[TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]: | |
"""Generate sequences for each example with beam search.""" | |
# generated hypotheses | |
generated_hyps = [ | |
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping) | |
for _ in range(batch_size) | |
] | |
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times | |
if do_sample is False: | |
beam_scores_begin = tf.zeros((batch_size, 1), dtype=tf.float32) | |
beam_scores_end = tf.ones((batch_size, num_beams - 1), dtype=tf.float32) * (-1e9) | |
beam_scores = tf.concat([beam_scores_begin, beam_scores_end], -1) | |
else: | |
beam_scores = tf.zeros((batch_size, num_beams), dtype=tf.float32) | |
beam_scores = tf.reshape(beam_scores, (batch_size * num_beams,)) | |
# cache compute states | |
past = encoder_outputs | |
# to stay similar to torch : past = (encoder_outputs, None) if encoder_outputs is not None else None | |
# init attention / hidden states / scores tuples | |
scores = () if (return_dict_in_generate and kwargs["output_scores"]) else None | |
decoder_attentions = () if (return_dict_in_generate and kwargs["output_attentions"]) else None | |
cross_attentions = () if (return_dict_in_generate and kwargs["output_attentions"]) else None | |
decoder_hidden_states = () if (return_dict_in_generate and kwargs["output_hidden_states"]) else None | |
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states | |
if self.config.is_encoder_decoder: | |
encoder_attentions = ( | |
kwargs["encoder_attentions"] if (return_dict_in_generate and kwargs["encoder_attentions"]) else None | |
) | |
encoder_hidden_states = ( | |
kwargs["encoder_hidden_states"] | |
if (return_dict_in_generate and kwargs["encoder_hidden_states"]) | |
else None | |
) | |
# done sentences | |
done = [False for _ in range(batch_size)] | |
while cur_len < max_length: | |
model_inputs = self.prepare_inputs_for_generation( | |
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **kwargs | |
) | |
outputs = self( | |
**model_inputs, | |
return_dict=True, | |
output_attentions=kwargs["output_attentions"], | |
output_hidden_states=kwargs["output_hidden_states"], | |
) | |
next_token_logits = outputs.logits[:, -1, :] # (batch_size * num_beams, vocab_size) | |
# if model has past, then set the past variable to speed up decoding | |
if self._use_cache(outputs, use_cache): | |
past = outputs[1] | |
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858) | |
if repetition_penalty != 1.0: | |
next_token_logits_penalties = _create_next_token_logits_penalties( | |
input_ids, next_token_logits, repetition_penalty | |
) | |
next_token_logits = tf.math.multiply(next_token_logits, next_token_logits_penalties) | |
# Temperature (higher temperature => more likely to sample low probability tokens) | |
if temperature != 1.0: | |
next_token_logits = next_token_logits / temperature | |
if self.config.is_encoder_decoder and do_sample is False: | |
next_token_logits = self.adjust_logits_during_generation( | |
next_token_logits, | |
cur_len=cur_len, | |
max_length=max_length, | |
forced_bos_token_id=forced_bos_token_id, | |
forced_eos_token_id=forced_eos_token_id, | |
) | |
# calculate log softmax score | |
scores = tf.nn.log_softmax(next_token_logits, axis=-1) # (batch_size * num_beams, vocab_size) | |
# set eos token prob to zero if min_length is not reached | |
if eos_token_id is not None and cur_len < min_length: | |
# create eos_token_id boolean mask | |
num_batch_hypotheses = batch_size * num_beams | |
is_token_logit_eos_token = tf.convert_to_tensor( | |
[True if token is eos_token_id else False for token in range(vocab_size)], dtype=tf.bool | |
) | |
eos_token_indices_mask = tf.broadcast_to(is_token_logit_eos_token, [num_batch_hypotheses, vocab_size]) | |
scores = set_tensor_by_indices_to_value(scores, eos_token_indices_mask, -float("inf")) | |
if no_repeat_ngram_size > 0: | |
# calculate a list of banned tokens to prevent repetitively generating the same ngrams | |
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345 | |
num_batch_hypotheses = batch_size * num_beams | |
banned_tokens = calc_banned_ngram_tokens( | |
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len | |
) | |
# create banned_tokens boolean mask | |
banned_tokens_indices_mask = [] | |
for banned_tokens_slice in banned_tokens: | |
banned_tokens_indices_mask.append( | |
[True if token in banned_tokens_slice else False for token in range(vocab_size)] | |
) | |
scores = set_tensor_by_indices_to_value( | |
scores, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf") | |
) | |
if bad_words_ids is not None: | |
# calculate a list of banned tokens according to bad words | |
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids) | |
banned_tokens_indices_mask = [] | |
for banned_tokens_slice in banned_tokens: | |
banned_tokens_indices_mask.append( | |
[True if token in banned_tokens_slice else False for token in range(vocab_size)] | |
) | |
scores = set_tensor_by_indices_to_value( | |
scores, tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf") | |
) | |
assert shape_list(scores) == [batch_size * num_beams, vocab_size] | |
if do_sample: | |
_scores = scores + tf.broadcast_to( | |
beam_scores[:, None], (batch_size * num_beams, vocab_size) | |
) # (batch_size * num_beams, vocab_size) | |
# Top-p/top-k filtering | |
_scores = tf_top_k_top_p_filtering( | |
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2 | |
) # (batch_size * num_beams, vocab_size) | |
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search) | |
_scores = tf.reshape(_scores, (batch_size, num_beams * vocab_size)) | |
next_tokens = sample_without_replacement( | |
_scores, num_samples=2 * num_beams | |
) # (batch_size, 2 * num_beams) | |
# Compute next scores | |
next_scores = tf.gather(_scores, next_tokens, batch_dims=1) # (batch_size, 2 * num_beams) | |
# sort the sampled vector to make sure that the first num_beams samples are the best | |
next_scores_indices = tf.argsort(next_scores, direction="DESCENDING", axis=1) | |
next_scores = tf.gather(next_scores, next_scores_indices, batch_dims=1) # (batch_size, num_beams * 2) | |
next_tokens = tf.gather(next_tokens, next_scores_indices, batch_dims=1) # (batch_size, num_beams * 2) | |
else: | |
# Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product) | |
next_scores = scores + tf.broadcast_to( | |
beam_scores[:, None], (batch_size * num_beams, vocab_size) | |
) # (batch_size * num_beams, vocab_size) | |
# re-organize to group the beam together (we are keeping top hypothesis across beams) | |
next_scores = tf.reshape( | |
next_scores, (batch_size, num_beams * vocab_size) | |
) # (batch_size, num_beams * vocab_size) | |
next_scores, next_tokens = tf.math.top_k(next_scores, k=2 * num_beams, sorted=True) | |
assert shape_list(next_scores) == shape_list(next_tokens) == [batch_size, 2 * num_beams] | |
# Store scores, attentions and hidden_states when required | |
if return_dict_in_generate: | |
if kwargs["output_scores"]: | |
scores += (next_token_logits,) | |
if kwargs["output_attentions"]: | |
decoder_attentions += ( | |
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) | |
) | |
if self.config.is_encoder_decoder: | |
cross_attentions += (outputs.cross_attentions,) | |
if kwargs["output_hidden_states"]: | |
decoder_hidden_states += ( | |
(outputs.decoder_hidden_states,) | |
if self.config.is_encoder_decoder | |
else (outputs.hidden_states,) | |
) | |
# next batch beam content | |
next_batch_beam = [] | |
# for each sentence | |
for batch_idx in range(batch_size): | |
# if we are done with this sentence | |
if done[batch_idx]: | |
assert ( | |
len(generated_hyps[batch_idx]) >= num_beams | |
), f"Batch can only be done if at least {num_beams} beams have been generated." | |
assert ( | |
eos_token_id is not None and pad_token_id is not None | |
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined" | |
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch | |
continue | |
# next sentence beam content | |
next_sent_beam = [] | |
# next tokens for this sentence | |
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate( | |
zip(next_tokens[batch_idx], next_scores[batch_idx]) | |
): | |
# get beam and token IDs | |
beam_id = beam_token_id // vocab_size | |
token_id = beam_token_id % vocab_size | |
effective_beam_id = batch_idx * num_beams + beam_id | |
# add to generated hypotheses if end of sentence or last iteration | |
if (eos_token_id is not None) and (token_id.numpy() == eos_token_id): | |
# if beam_token does not belong to top num_beams tokens, it should not be added | |
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams | |
if is_beam_token_worse_than_top_num_beams: | |
continue | |
generated_hyps[batch_idx].add( | |
tf.identity(input_ids[effective_beam_id]), beam_token_score.numpy() | |
) | |
else: | |
# add next predicted token if it is not eos_token | |
next_sent_beam.append((beam_token_score, token_id, effective_beam_id)) | |
# the beam for next step is full | |
if len(next_sent_beam) == num_beams: | |
break | |
# Check if we are done so that we can save a pad step if all(done) | |
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done( | |
tf.reduce_max(next_scores[batch_idx]).numpy(), cur_len | |
) | |
# update next beam content | |
assert len(next_sent_beam) == num_beams, "Beam should always be full" | |
next_batch_beam.extend(next_sent_beam) | |
assert len(next_batch_beam) == num_beams * (batch_idx + 1) | |
# stop when we are done with each sentence | |
if all(done): | |
break | |
# sanity check / prepare next batch | |
assert len(next_batch_beam) == batch_size * num_beams | |
beam_scores = tf.convert_to_tensor([x[0] for x in next_batch_beam], dtype=tf.float32) | |
beam_tokens = tf.convert_to_tensor([x[1] for x in next_batch_beam], dtype=tf.int32) | |
beam_idx = tf.convert_to_tensor([x[2] for x in next_batch_beam], dtype=tf.int32) | |
# re-order batch and update current length | |
input_ids = tf.stack([tf.identity(input_ids[x, :]) for x in beam_idx]) | |
input_ids = tf.concat([input_ids, tf.expand_dims(beam_tokens, 1)], axis=-1) | |
cur_len = cur_len + 1 | |
# re-order internal states | |
if past is not None: | |
past = self._reorder_cache(past, beam_idx) | |
# extend attention_mask for new generated input if only decoder | |
if self.config.is_encoder_decoder is False: | |
attention_mask = tf.concat( | |
[attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1 | |
) | |
# finalize all open beam hypotheses and end to generated hypotheses | |
for batch_idx in range(batch_size): | |
# Add all open beam hypothesis to generated_hyps | |
if done[batch_idx]: | |
continue | |
# test that beam scores match previously calculated scores if not eos and batch_idx not done | |
if eos_token_id is not None and all( | |
(token_id % vocab_size).numpy().item() != eos_token_id for token_id in next_tokens[batch_idx] | |
): | |
if not tf.reduce_all( | |
next_scores[batch_idx, :num_beams] == tf.reshape(beam_scores, (batch_size, num_beams))[batch_idx] | |
): | |
raise ValueError( | |
f"If batch_idx is not done, final next scores: {next_scores[:, :num_beams][batch_idx]} have " | |
"to equal to accumulated beam_scores: " | |
f"{tf.reshape(beam_scores, (batch_size, num_beams))[batch_idx]}" | |
) | |
# need to add best num_beams hypotheses to generated hyps | |
for beam_id in range(num_beams): | |
effective_beam_id = batch_idx * num_beams + beam_id | |
final_score = beam_scores[effective_beam_id].numpy().item() | |
final_tokens = input_ids[effective_beam_id] | |
generated_hyps[batch_idx].add(final_tokens, final_score) | |
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch | |
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences | |
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences | |
# select the best hypotheses | |
sent_lengths_list = [] | |
best = [] | |
# retrieve best hypotheses | |
for i, hypotheses in enumerate(generated_hyps): | |
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0]) | |
for j in range(output_num_return_sequences_per_batch): | |
best_hyp = sorted_hyps.pop()[1] | |
sent_lengths_list.append(len(best_hyp)) | |
best.append(best_hyp) | |
assert output_batch_size == len( | |
best | |
), f"Output batch size {output_batch_size} must match output beam hypotheses {len(best)}" | |
sent_lengths = tf.convert_to_tensor(sent_lengths_list, dtype=tf.int32) | |
# shorter batches are filled with pad_token | |
if tf.reduce_min(sent_lengths).numpy() != tf.reduce_max(sent_lengths).numpy(): | |
assert pad_token_id is not None, "`Pad_token_id` has to be defined" | |
sent_max_len = min(tf.reduce_max(sent_lengths).numpy() + 1, max_length) | |
decoded_list = [] | |
# fill with hypothesis and eos_token_id if necessary | |
for i, hypo in enumerate(best): | |
assert sent_lengths[i] == shape_list(hypo)[0] | |
# if sent_length is max_len do not pad | |
if sent_lengths[i] == sent_max_len: | |
decoded_slice = hypo | |
else: | |
# else pad to sent_max_len | |
num_pad_tokens = sent_max_len - sent_lengths[i] | |
padding = pad_token_id * tf.ones((num_pad_tokens,), dtype=tf.int32) | |
decoded_slice = tf.concat([hypo, padding], axis=-1) | |
# finish sentence with EOS token | |
if sent_lengths[i] < max_length: | |
decoded_slice = tf.where( | |
tf.range(sent_max_len, dtype=tf.int32) == sent_lengths[i], | |
eos_token_id * tf.ones((sent_max_len,), dtype=tf.int32), | |
decoded_slice, | |
) | |
# add to list | |
decoded_list.append(decoded_slice) | |
decoded = tf.stack(decoded_list) | |
else: | |
# none of the hypotheses have an eos_token | |
assert (len(hypo) == max_length for hypo in best) | |
decoded = tf.stack(best) | |
if return_dict_in_generate: | |
if do_sample and self.config.is_encoder_decoder: | |
return TFBeamSampleEncoderDecoderOutput( | |
sequences=decoded, | |
scores=scores, | |
encoder_attentions=encoder_attentions, | |
encoder_hidden_states=encoder_hidden_states, | |
decoder_attentions=decoder_attentions, | |
cross_attentions=cross_attentions, | |
decoder_hidden_states=decoder_hidden_states, | |
) | |
elif do_sample and not self.config.is_encoder_decoder: | |
return TFBeamSampleDecoderOnlyOutput( | |
sequences=decoded, | |
scores=scores, | |
attentions=decoder_attentions, | |
hidden_states=decoder_hidden_states, | |
) | |
elif self.config.is_encoder_decoder: | |
return TFBeamSearchEncoderDecoderOutput( | |
sequences=decoded, | |
scores=scores, | |
encoder_attentions=encoder_attentions, | |
encoder_hidden_states=encoder_hidden_states, | |
decoder_attentions=decoder_attentions, | |
cross_attentions=cross_attentions, | |
decoder_hidden_states=decoder_hidden_states, | |
) | |
else: | |
return TFBeamSearchDecoderOnlyOutput( | |
sequences=decoded, | |
scores=scores, | |
attentions=decoder_attentions, | |
hidden_states=decoder_hidden_states, | |
) | |
else: | |
return decoded | |
def _reorder_cache(past, beam_idx): | |
return tuple(tf.gather(layer_past, beam_idx, axis=1) for layer_past in past) | |
def adjust_logits_during_generation( | |
self, logits, cur_len, max_length, forced_bos_token_id, forced_eos_token_id, **kwargs | |
): | |
""" | |
Implement in subclasses of :class:`~transformers.PreTrainedModel` for custom behavior to adjust the logits in | |
the generate method. | |
""" | |
if cur_len == 1 and forced_bos_token_id is not None: | |
vocab_range = tf.constant(range(self.config.vocab_size)) | |
return tf.where(vocab_range != forced_bos_token_id, -1e8, logits) | |
elif cur_len == max_length - 1 and forced_eos_token_id is not None: | |
vocab_range = tf.constant(range(self.config.vocab_size)) | |
return tf.where(vocab_range != forced_eos_token_id, -1e8, logits) | |
else: | |
return logits | |
def _create_next_token_logits_penalties(input_ids, logits, repetition_penalty): | |
# create logit penalties for already seen input_ids | |
token_penalties = np.ones(shape_list(logits)) | |
prev_input_ids = [np.unique(input_id) for input_id in input_ids.numpy()] | |
for i, prev_input_id in enumerate(prev_input_ids): | |
logit_penalized = logits[i].numpy()[prev_input_id] | |
logit_penalties = np.zeros(logit_penalized.shape) | |
# if previous logit score is < 0 then multiply repetition penalty else divide | |
logit_penalties[logit_penalized < 0] = repetition_penalty | |
logit_penalties[logit_penalized > 0] = 1 / repetition_penalty | |
np.put(token_penalties[i], prev_input_id, logit_penalties) | |
return tf.convert_to_tensor(token_penalties, dtype=tf.float32) | |
def calc_banned_ngram_tokens(prev_input_ids, num_hypos, no_repeat_ngram_size, cur_len): | |
# Copied from fairseq for no_repeat_ngram in beam_search | |
if cur_len + 1 < no_repeat_ngram_size: | |
# return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet | |
return [[] for _ in range(num_hypos)] | |
generated_ngrams = [{} for _ in range(num_hypos)] | |
for idx in range(num_hypos): | |
gen_tokens = prev_input_ids[idx].numpy().tolist() | |
generated_ngram = generated_ngrams[idx] | |
for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]): | |
prev_ngram_tuple = tuple(ngram[:-1]) | |
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] | |
def _get_generated_ngrams(hypo_idx): | |
# Before decoding the next token, prevent decoding of ngrams that have already appeared | |
start_idx = cur_len + 1 - no_repeat_ngram_size | |
ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist()) | |
return generated_ngrams[hypo_idx].get(ngram_idx, []) | |
banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)] | |
return banned_tokens | |
def calc_banned_bad_words_ids(prev_input_ids, bad_words_ids): | |
banned_tokens = [] | |
def _tokens_match(prev_tokens, tokens): | |
if len(tokens) == 0: | |
# if bad word tokens is just one token always ban it | |
return True | |
if len(tokens) > len(prev_tokens): | |
# if bad word tokens are longer than prev tokens they can't be equal | |
return False | |
if prev_tokens[-len(tokens) :] == tokens: | |
# if tokens match | |
return True | |
else: | |
return False | |
for prev_input_ids_slice in prev_input_ids: | |
banned_tokens_slice = [] | |
for banned_token_seq in bad_words_ids: | |
assert ( | |
len(banned_token_seq) > 0 | |
), f"Banned words token sequences { bad_words_ids} cannot have an empty list" | |
if _tokens_match(prev_input_ids_slice.numpy().tolist(), banned_token_seq[:-1]) is False: | |
# if tokens do not match continue | |
continue | |
banned_tokens_slice.append(banned_token_seq[-1]) | |
banned_tokens.append(banned_tokens_slice) | |
return banned_tokens | |
def tf_top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1): | |
""" | |
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering | |
Args: | |
logits: logits distribution shape (batch size, vocabulary size) | |
if top_k > 0: keep only top k tokens with highest probability (top-k filtering). | |
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). | |
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) | |
Make sure we keep at least min_tokens_to_keep per batch example in the output | |
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 | |
""" | |
logits_shape = shape_list(logits) | |
if top_k > 0: | |
top_k = min(max(top_k, min_tokens_to_keep), logits_shape[-1]) # Safety check | |
# Remove all tokens with a probability less than the last token of the top-k | |
indices_to_remove = logits < tf.math.top_k(logits, k=top_k)[0][..., -1, None] | |
logits = set_tensor_by_indices_to_value(logits, indices_to_remove, filter_value) | |
if top_p < 1.0: | |
sorted_indices = tf.argsort(logits, direction="DESCENDING") | |
sorted_logits = tf.gather( | |
logits, sorted_indices, axis=-1, batch_dims=1 | |
) # expects logits to be of dim (batch_size, vocab_size) | |
cumulative_probs = tf.math.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1) | |
# Remove tokens with cumulative probability above the threshold (token with 0 are kept) | |
sorted_indices_to_remove = cumulative_probs > top_p | |
if min_tokens_to_keep > 1: | |
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) | |
sorted_indices_to_remove = tf.concat( | |
[ | |
tf.zeros_like(sorted_indices_to_remove[:, :min_tokens_to_keep]), | |
sorted_indices_to_remove[:, min_tokens_to_keep:], | |
], | |
-1, | |
) | |
# Shift the indices to the right to keep also the first token above the threshold | |
sorted_indices_to_remove = tf.concat( | |
[tf.zeros_like(sorted_indices_to_remove[:, :1]), sorted_indices_to_remove[:, :-1]], | |
-1, | |
) | |
# scatter sorted tensors to original indexing | |
indices_to_remove = scatter_values_on_batch_indices(sorted_indices_to_remove, sorted_indices) | |
logits = set_tensor_by_indices_to_value(logits, indices_to_remove, filter_value) | |
return logits | |
def scatter_values_on_batch_indices(values, batch_indices): | |
shape = shape_list(batch_indices) | |
# broadcast batch dim to shape | |
broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1]) | |
# transform batch_indices to pair_indices | |
pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) | |
# scatter values to pair indices | |
return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape) | |
def set_tensor_by_indices_to_value(tensor, indices, value): | |
# create value_tensor since tensor value assignment is not possible in TF | |
value_tensor = tf.zeros_like(tensor) + value | |
return tf.where(indices, value_tensor, tensor) | |
def sample_without_replacement(logits, num_samples): | |
""" | |
categorical sampling without replacement is currently not implemented the gumbel-max trick will do for now see | |
https://github.com/tensorflow/tensorflow/issues/9260 for more info | |
""" | |
z = -tf.math.log(tf.random.uniform(shape_list(logits), 0, 1)) | |
_, indices = tf.nn.top_k(logits + z, num_samples) | |
return indices | |
def shape_list(x): | |
"""Deal with dynamic shape in tensorflow cleanly.""" | |
static = x.shape.as_list() | |
dynamic = tf.shape(x) | |
return [dynamic[i] if s is None else s for i, s in enumerate(static)] | |
class BeamHypotheses(object): | |
def __init__(self, num_beams, max_length, length_penalty, early_stopping): | |
""" | |
Initialize n-best list of hypotheses. | |
""" | |
self.max_length = max_length - 1 # ignoring bos_token | |
self.length_penalty = length_penalty | |
self.early_stopping = early_stopping | |
self.num_beams = num_beams | |
self.beams = [] | |
self.worst_score = 1e9 | |
def __len__(self): | |
""" | |
Number of hypotheses in the list. | |
""" | |
return len(self.beams) | |
def add(self, hyp, sum_logprobs): | |
""" | |
Add a new hypothesis to the list. | |
""" | |
score = sum_logprobs / len(hyp) ** self.length_penalty | |
if len(self) < self.num_beams or score > self.worst_score: | |
self.beams.append((score, hyp)) | |
if len(self) > self.num_beams: | |
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)]) | |
del self.beams[sorted_scores[0][1]] | |
self.worst_score = sorted_scores[1][0] | |
else: | |
self.worst_score = min(score, self.worst_score) | |
def is_done(self, best_sum_logprobs, cur_len): | |
""" | |
If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst | |
one in the heap, then we are done with this sentence. | |
""" | |
if len(self) < self.num_beams: | |
return False | |
elif self.early_stopping: | |
return True | |
else: | |
cur_score = best_sum_logprobs / cur_len ** self.length_penalty | |
ret = self.worst_score >= cur_score | |
return ret | |