code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def decode_spans(
start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray
) -> Tuple:
"""
Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the actual
answer.
In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or
answer end position being before the starting position. The method supports output the k-best answer through the
topk argument.
Args:
start (`np.ndarray`): Individual start probabilities for each token.
end (`np.ndarray`): Individual end probabilities for each token.
topk (`int`): Indicates how many possible answer span(s) to extract from the model output.
max_answer_len (`int`): Maximum size of the answer to extract from the model's output.
undesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer
"""
# Ensure we have batch axis
if start.ndim == 1:
start = start[None]
if end.ndim == 1:
end = end[None]
# Compute the score of each tuple(start, end) to be the real answer
outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))
# Remove candidate with end < start and end - start > max_answer_len
candidates = np.tril(np.triu(outer), max_answer_len - 1)
# Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)
scores_flat = candidates.flatten()
if topk == 1:
idx_sort = [np.argmax(scores_flat)]
elif len(scores_flat) < topk:
idx_sort = np.argsort(-scores_flat)
else:
idx = np.argpartition(-scores_flat, topk)[0:topk]
idx_sort = idx[np.argsort(-scores_flat[idx])]
starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:]
desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero())
starts = starts[desired_spans]
ends = ends[desired_spans]
scores = candidates[0, starts, ends]
return starts, ends, scores
|
Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the actual
answer.
In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or
answer end position being before the starting position. The method supports output the k-best answer through the
topk argument.
Args:
start (`np.ndarray`): Individual start probabilities for each token.
end (`np.ndarray`): Individual end probabilities for each token.
topk (`int`): Indicates how many possible answer span(s) to extract from the model output.
max_answer_len (`int`): Maximum size of the answer to extract from the model's output.
undesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer
|
decode_spans
|
python
|
huggingface/transformers
|
src/transformers/pipelines/question_answering.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/question_answering.py
|
Apache-2.0
|
def select_starts_ends(
start,
end,
p_mask,
attention_mask,
min_null_score=1000000,
top_k=1,
handle_impossible_answer=False,
max_answer_len=15,
):
"""
Takes the raw output of any `ModelForQuestionAnswering` and first normalizes its outputs and then uses
`decode_spans()` to generate probabilities for each span to be the actual answer.
Args:
start (`np.ndarray`): Individual start logits for each token.
end (`np.ndarray`): Individual end logits for each token.
p_mask (`np.ndarray`): A mask with 1 for values that cannot be in the answer
attention_mask (`np.ndarray`): The attention mask generated by the tokenizer
min_null_score(`float`): The minimum null (empty) answer score seen so far.
topk (`int`): Indicates how many possible answer span(s) to extract from the model output.
handle_impossible_answer(`bool`): Whether to allow null (empty) answers
max_answer_len (`int`): Maximum size of the answer to extract from the model's output.
"""
# Ensure padded tokens & question tokens cannot belong to the set of candidate answers.
undesired_tokens = np.abs(np.array(p_mask) - 1)
if attention_mask is not None:
undesired_tokens = undesired_tokens & attention_mask
# Generate mask
undesired_tokens_mask = undesired_tokens == 0.0
# Make sure non-context indexes in the tensor cannot contribute to the softmax
start = np.where(undesired_tokens_mask, -10000.0, start)
end = np.where(undesired_tokens_mask, -10000.0, end)
# Normalize logits and spans to retrieve the answer
start = np.exp(start - start.max(axis=-1, keepdims=True))
start = start / start.sum()
end = np.exp(end - end.max(axis=-1, keepdims=True))
end = end / end.sum()
if handle_impossible_answer:
min_null_score = min(min_null_score, (start[0, 0] * end[0, 0]).item())
# Mask CLS
start[0, 0] = end[0, 0] = 0.0
starts, ends, scores = decode_spans(start, end, top_k, max_answer_len, undesired_tokens)
return starts, ends, scores, min_null_score
|
Takes the raw output of any `ModelForQuestionAnswering` and first normalizes its outputs and then uses
`decode_spans()` to generate probabilities for each span to be the actual answer.
Args:
start (`np.ndarray`): Individual start logits for each token.
end (`np.ndarray`): Individual end logits for each token.
p_mask (`np.ndarray`): A mask with 1 for values that cannot be in the answer
attention_mask (`np.ndarray`): The attention mask generated by the tokenizer
min_null_score(`float`): The minimum null (empty) answer score seen so far.
topk (`int`): Indicates how many possible answer span(s) to extract from the model output.
handle_impossible_answer(`bool`): Whether to allow null (empty) answers
max_answer_len (`int`): Maximum size of the answer to extract from the model's output.
|
select_starts_ends
|
python
|
huggingface/transformers
|
src/transformers/pipelines/question_answering.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/question_answering.py
|
Apache-2.0
|
def create_sample(
question: Union[str, List[str]], context: Union[str, List[str]]
) -> Union[SquadExample, List[SquadExample]]:
"""
QuestionAnsweringPipeline leverages the [`SquadExample`] internally. This helper method encapsulate all the
logic for converting question(s) and context(s) to [`SquadExample`].
We currently support extractive question answering.
Arguments:
question (`str` or `List[str]`): The question(s) asked.
context (`str` or `List[str]`): The context(s) in which we will look for the answer.
Returns:
One or a list of [`SquadExample`]: The corresponding [`SquadExample`] grouping question and context.
"""
if isinstance(question, list):
return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]
else:
return SquadExample(None, question, context, None, None, None)
|
QuestionAnsweringPipeline leverages the [`SquadExample`] internally. This helper method encapsulate all the
logic for converting question(s) and context(s) to [`SquadExample`].
We currently support extractive question answering.
Arguments:
question (`str` or `List[str]`): The question(s) asked.
context (`str` or `List[str]`): The context(s) in which we will look for the answer.
Returns:
One or a list of [`SquadExample`]: The corresponding [`SquadExample`] grouping question and context.
|
create_sample
|
python
|
huggingface/transformers
|
src/transformers/pipelines/question_answering.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/question_answering.py
|
Apache-2.0
|
def __call__(self, *args, **kwargs):
"""
Answer the question(s) given as inputs by using the context(s).
Args:
question (`str` or `List[str]`):
One or several question(s) (must be used in conjunction with the `context` argument).
context (`str` or `List[str]`):
One or several context(s) associated with the question(s) (must be used in conjunction with the
`question` argument).
top_k (`int`, *optional*, defaults to 1):
The number of answers to return (will be chosen by order of likelihood). Note that we return less than
top_k answers if there are not enough options available within the context.
doc_stride (`int`, *optional*, defaults to 128):
If the context is too long to fit with the question for the model, it will be split in several chunks
with some overlap. This argument controls the size of that overlap.
max_answer_len (`int`, *optional*, defaults to 15):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_seq_len (`int`, *optional*, defaults to 384):
The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
model. The context will be split in several chunks (using `doc_stride` as overlap) if needed.
max_question_len (`int`, *optional*, defaults to 64):
The maximum length of the question after tokenization. It will be truncated if needed.
handle_impossible_answer (`bool`, *optional*, defaults to `False`):
Whether or not we accept impossible as an answer.
align_to_words (`bool`, *optional*, defaults to `True`):
Attempts to align the answer to real words. Improves quality on space separated languages. Might hurt on
non-space-separated languages (like Japanese or Chinese)
Return:
A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
- **score** (`float`) -- The probability associated to the answer.
- **start** (`int`) -- The character start index of the answer (in the tokenized version of the input).
- **end** (`int`) -- The character end index of the answer (in the tokenized version of the input).
- **answer** (`str`) -- The answer to the question.
"""
# Convert inputs to features
if args:
warnings.warn(
"Passing a list of SQuAD examples to the pipeline is deprecated and will be removed in v5. Inputs should be passed using the `question` and `context` keyword arguments instead.",
FutureWarning,
)
examples = self._args_parser(*args, **kwargs)
if isinstance(examples, (list, tuple)) and len(examples) == 1:
return super().__call__(examples[0], **kwargs)
return super().__call__(examples, **kwargs)
|
Answer the question(s) given as inputs by using the context(s).
Args:
question (`str` or `List[str]`):
One or several question(s) (must be used in conjunction with the `context` argument).
context (`str` or `List[str]`):
One or several context(s) associated with the question(s) (must be used in conjunction with the
`question` argument).
top_k (`int`, *optional*, defaults to 1):
The number of answers to return (will be chosen by order of likelihood). Note that we return less than
top_k answers if there are not enough options available within the context.
doc_stride (`int`, *optional*, defaults to 128):
If the context is too long to fit with the question for the model, it will be split in several chunks
with some overlap. This argument controls the size of that overlap.
max_answer_len (`int`, *optional*, defaults to 15):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_seq_len (`int`, *optional*, defaults to 384):
The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
model. The context will be split in several chunks (using `doc_stride` as overlap) if needed.
max_question_len (`int`, *optional*, defaults to 64):
The maximum length of the question after tokenization. It will be truncated if needed.
handle_impossible_answer (`bool`, *optional*, defaults to `False`):
Whether or not we accept impossible as an answer.
align_to_words (`bool`, *optional*, defaults to `True`):
Attempts to align the answer to real words. Improves quality on space separated languages. Might hurt on
non-space-separated languages (like Japanese or Chinese)
Return:
A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
- **score** (`float`) -- The probability associated to the answer.
- **start** (`int`) -- The character start index of the answer (in the tokenized version of the input).
- **end** (`int`) -- The character end index of the answer (in the tokenized version of the input).
- **answer** (`str`) -- The answer to the question.
|
__call__
|
python
|
huggingface/transformers
|
src/transformers/pipelines/question_answering.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/question_answering.py
|
Apache-2.0
|
def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]:
"""
When decoding from token probabilities, this method maps token indexes to actual word in the initial context.
Args:
text (`str`): The actual context to extract the answer from.
start (`int`): The answer starting token index.
end (`int`): The answer end token index.
Returns:
Dictionary like `{'answer': str, 'start': int, 'end': int}`
"""
words = []
token_idx = char_start_idx = char_end_idx = chars_idx = 0
for i, word in enumerate(text.split(" ")):
token = self.tokenizer.tokenize(word)
# Append words if they are in the span
if start <= token_idx <= end:
if token_idx == start:
char_start_idx = chars_idx
if token_idx == end:
char_end_idx = chars_idx + len(word)
words += [word]
# Stop if we went over the end of the answer
if token_idx > end:
break
# Append the subtokenization length to the running index
token_idx += len(token)
chars_idx += len(word) + 1
# Join text with spaces
return {
"answer": " ".join(words),
"start": max(0, char_start_idx),
"end": min(len(text), char_end_idx),
}
|
When decoding from token probabilities, this method maps token indexes to actual word in the initial context.
Args:
text (`str`): The actual context to extract the answer from.
start (`int`): The answer starting token index.
end (`int`): The answer end token index.
Returns:
Dictionary like `{'answer': str, 'start': int, 'end': int}`
|
span_to_answer
|
python
|
huggingface/transformers
|
src/transformers/pipelines/question_answering.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/question_answering.py
|
Apache-2.0
|
def sequential_inference(self, **inputs):
"""
Inference used for models that need to process sequences in a sequential fashion, like the SQA models which
handle conversational query related to a table.
"""
if self.framework == "pt":
all_logits = []
all_aggregations = []
prev_answers = None
batch_size = inputs["input_ids"].shape[0]
input_ids = inputs["input_ids"].to(self.device)
attention_mask = inputs["attention_mask"].to(self.device)
token_type_ids = inputs["token_type_ids"].to(self.device)
token_type_ids_example = None
for index in range(batch_size):
# If sequences have already been processed, the token type IDs will be created according to the previous
# answer.
if prev_answers is not None:
prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,)
model_labels = np.zeros_like(prev_labels_example.cpu().numpy()) # shape (seq_len,)
token_type_ids_example = token_type_ids[index] # shape (seq_len, 7)
for i in range(model_labels.shape[0]):
segment_id = token_type_ids_example[:, 0].tolist()[i]
col_id = token_type_ids_example[:, 1].tolist()[i] - 1
row_id = token_type_ids_example[:, 2].tolist()[i] - 1
if row_id >= 0 and col_id >= 0 and segment_id == 1:
model_labels[i] = int(prev_answers[(col_id, row_id)])
token_type_ids_example[:, 3] = torch.from_numpy(model_labels).type(torch.long).to(self.device)
input_ids_example = input_ids[index]
attention_mask_example = attention_mask[index] # shape (seq_len,)
token_type_ids_example = token_type_ids[index] # shape (seq_len, 7)
outputs = self.model(
input_ids=input_ids_example.unsqueeze(0),
attention_mask=attention_mask_example.unsqueeze(0),
token_type_ids=token_type_ids_example.unsqueeze(0),
)
logits = outputs.logits
if self.aggregate:
all_aggregations.append(outputs.logits_aggregation)
all_logits.append(logits)
dist_per_token = torch.distributions.Bernoulli(logits=logits)
probabilities = dist_per_token.probs * attention_mask_example.type(torch.float32).to(
dist_per_token.probs.device
)
coords_to_probs = collections.defaultdict(list)
for i, p in enumerate(probabilities.squeeze().tolist()):
segment_id = token_type_ids_example[:, 0].tolist()[i]
col = token_type_ids_example[:, 1].tolist()[i] - 1
row = token_type_ids_example[:, 2].tolist()[i] - 1
if col >= 0 and row >= 0 and segment_id == 1:
coords_to_probs[(col, row)].append(p)
prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs}
logits_batch = torch.cat(tuple(all_logits), 0)
return (logits_batch,) if not self.aggregate else (logits_batch, torch.cat(tuple(all_aggregations), 0))
else:
all_logits = []
all_aggregations = []
prev_answers = None
batch_size = inputs["input_ids"].shape[0]
input_ids = inputs["input_ids"]
attention_mask = inputs["attention_mask"]
token_type_ids = inputs["token_type_ids"].numpy()
token_type_ids_example = None
for index in range(batch_size):
# If sequences have already been processed, the token type IDs will be created according to the previous
# answer.
if prev_answers is not None:
prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,)
model_labels = np.zeros_like(prev_labels_example, dtype=np.int32) # shape (seq_len,)
token_type_ids_example = token_type_ids[index] # shape (seq_len, 7)
for i in range(model_labels.shape[0]):
segment_id = token_type_ids_example[:, 0].tolist()[i]
col_id = token_type_ids_example[:, 1].tolist()[i] - 1
row_id = token_type_ids_example[:, 2].tolist()[i] - 1
if row_id >= 0 and col_id >= 0 and segment_id == 1:
model_labels[i] = int(prev_answers[(col_id, row_id)])
token_type_ids_example[:, 3] = model_labels
input_ids_example = input_ids[index]
attention_mask_example = attention_mask[index] # shape (seq_len,)
token_type_ids_example = token_type_ids[index] # shape (seq_len, 7)
outputs = self.model(
input_ids=np.expand_dims(input_ids_example, axis=0),
attention_mask=np.expand_dims(attention_mask_example, axis=0),
token_type_ids=np.expand_dims(token_type_ids_example, axis=0),
)
logits = outputs.logits
if self.aggregate:
all_aggregations.append(outputs.logits_aggregation)
all_logits.append(logits)
probabilities = tf.math.sigmoid(tf.cast(logits, tf.float32)) * tf.cast(
attention_mask_example, tf.float32
)
coords_to_probs = collections.defaultdict(list)
token_type_ids_example = token_type_ids_example
for i, p in enumerate(tf.squeeze(probabilities).numpy().tolist()):
segment_id = token_type_ids_example[:, 0].tolist()[i]
col = token_type_ids_example[:, 1].tolist()[i] - 1
row = token_type_ids_example[:, 2].tolist()[i] - 1
if col >= 0 and row >= 0 and segment_id == 1:
coords_to_probs[(col, row)].append(p)
prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs}
logits_batch = tf.concat(tuple(all_logits), 0)
return (logits_batch,) if not self.aggregate else (logits_batch, tf.concat(tuple(all_aggregations), 0))
|
Inference used for models that need to process sequences in a sequential fashion, like the SQA models which
handle conversational query related to a table.
|
sequential_inference
|
python
|
huggingface/transformers
|
src/transformers/pipelines/table_question_answering.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/table_question_answering.py
|
Apache-2.0
|
def __call__(self, *args, **kwargs):
r"""
Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below:
- `pipeline(table, query)`
- `pipeline(table, [query])`
- `pipeline(table=table, query=query)`
- `pipeline(table=table, query=[query])`
- `pipeline({"table": table, "query": query})`
- `pipeline({"table": table, "query": [query]})`
- `pipeline([{"table": table, "query": query}, {"table": table, "query": query}])`
The `table` argument should be a dict or a DataFrame built from that dict, containing the whole table:
Example:
```python
data = {
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
}
```
This dictionary can be passed in as such, or can be converted to a pandas DataFrame:
Example:
```python
import pandas as pd
table = pd.DataFrame.from_dict(data)
```
Args:
table (`pd.DataFrame` or `Dict`):
Pandas DataFrame or dictionary that will be converted to a DataFrame containing all the table values.
See above for an example of dictionary.
query (`str` or `List[str]`):
Query or list of queries that will be sent to the model alongside the table.
sequential (`bool`, *optional*, defaults to `False`):
Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the
inference to be done sequentially to extract relations within sequences, given their conversational
nature.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`TapasTruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length`
or to the maximum acceptable input length for the model if that argument is not provided. This will
truncate row by row, removing rows from the table.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
Return:
A dictionary or a list of dictionaries containing results: Each result is a dictionary with the following
keys:
- **answer** (`str`) -- The answer of the query given the table. If there is an aggregator, the answer will
be preceded by `AGGREGATOR >`.
- **coordinates** (`List[Tuple[int, int]]`) -- Coordinates of the cells of the answers.
- **cells** (`List[str]`) -- List of strings made up of the answer cell values.
- **aggregator** (`str`) -- If the model has an aggregator, this returns the aggregator.
"""
pipeline_inputs = self._args_parser(*args, **kwargs)
results = super().__call__(pipeline_inputs, **kwargs)
if len(results) == 1:
return results[0]
return results
|
Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below:
- `pipeline(table, query)`
- `pipeline(table, [query])`
- `pipeline(table=table, query=query)`
- `pipeline(table=table, query=[query])`
- `pipeline({"table": table, "query": query})`
- `pipeline({"table": table, "query": [query]})`
- `pipeline([{"table": table, "query": query}, {"table": table, "query": query}])`
The `table` argument should be a dict or a DataFrame built from that dict, containing the whole table:
Example:
```python
data = {
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
}
```
This dictionary can be passed in as such, or can be converted to a pandas DataFrame:
Example:
```python
import pandas as pd
table = pd.DataFrame.from_dict(data)
```
Args:
table (`pd.DataFrame` or `Dict`):
Pandas DataFrame or dictionary that will be converted to a DataFrame containing all the table values.
See above for an example of dictionary.
query (`str` or `List[str]`):
Query or list of queries that will be sent to the model alongside the table.
sequential (`bool`, *optional*, defaults to `False`):
Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the
inference to be done sequentially to extract relations within sequences, given their conversational
nature.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`TapasTruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length`
or to the maximum acceptable input length for the model if that argument is not provided. This will
truncate row by row, removing rows from the table.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
Return:
A dictionary or a list of dictionaries containing results: Each result is a dictionary with the following
keys:
- **answer** (`str`) -- The answer of the query given the table. If there is an aggregator, the answer will
be preceded by `AGGREGATOR >`.
- **coordinates** (`List[Tuple[int, int]]`) -- Coordinates of the cells of the answers.
- **cells** (`List[str]`) -- List of strings made up of the answer cell values.
- **aggregator** (`str`) -- If the model has an aggregator, this returns the aggregator.
|
__call__
|
python
|
huggingface/transformers
|
src/transformers/pipelines/table_question_answering.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/table_question_answering.py
|
Apache-2.0
|
def __call__(self, *args, **kwargs):
r"""
Generate the output text(s) using text(s) given as inputs.
Args:
args (`str` or `List[str]`):
Input text for the encoder.
return_tensors (`bool`, *optional*, defaults to `False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
return_text (`bool`, *optional*, defaults to `True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the potential extra spaces in the text output.
truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`):
The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE`
(default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's
max_length instead of throwing an error down the line.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework [here](./text_generation)).
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
- **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
ids of the generated text.
"""
result = super().__call__(*args, **kwargs)
if (
isinstance(args[0], list)
and all(isinstance(el, str) for el in args[0])
and all(len(res) == 1 for res in result)
):
return [res[0] for res in result]
return result
|
Generate the output text(s) using text(s) given as inputs.
Args:
args (`str` or `List[str]`):
Input text for the encoder.
return_tensors (`bool`, *optional*, defaults to `False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
return_text (`bool`, *optional*, defaults to `True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the potential extra spaces in the text output.
truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`):
The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE`
(default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's
max_length instead of throwing an error down the line.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework [here](./text_generation)).
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
- **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
ids of the generated text.
|
__call__
|
python
|
huggingface/transformers
|
src/transformers/pipelines/text2text_generation.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/text2text_generation.py
|
Apache-2.0
|
def check_inputs(self, input_length: int, min_length: int, max_length: int) -> bool:
"""
Checks whether there might be something wrong with given input with regard to the model.
"""
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}.")
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length // 2})"
)
|
Checks whether there might be something wrong with given input with regard to the model.
|
check_inputs
|
python
|
huggingface/transformers
|
src/transformers/pipelines/text2text_generation.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/text2text_generation.py
|
Apache-2.0
|
def __call__(self, inputs, **kwargs):
"""
Classify the text(s) given as inputs.
Args:
inputs (`str` or `List[str]` or `Dict[str]`, or `List[Dict[str]]`):
One or several texts to classify. In order to use text pairs for your classification, you can send a
dictionary containing `{"text", "text_pair"}` keys, or a list of those.
top_k (`int`, *optional*, defaults to `1`):
How many results to return.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different
values:
If this argument is not specified, then it will apply the following functions according to the number
of labels:
- If problem type is regression, will not apply any function on the output.
- If the model has a single label, will apply the sigmoid function on the output.
- If the model has several labels, will apply the softmax function on the output.
Possible values are:
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
Return:
A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys:
- **label** (`str`) -- The label predicted.
- **score** (`float`) -- The corresponding probability.
If `top_k` is used, one such dictionary is returned per label.
"""
inputs = (inputs,)
result = super().__call__(*inputs, **kwargs)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_legacy = "top_k" not in kwargs
if isinstance(inputs[0], str) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
|
Classify the text(s) given as inputs.
Args:
inputs (`str` or `List[str]` or `Dict[str]`, or `List[Dict[str]]`):
One or several texts to classify. In order to use text pairs for your classification, you can send a
dictionary containing `{"text", "text_pair"}` keys, or a list of those.
top_k (`int`, *optional*, defaults to `1`):
How many results to return.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different
values:
If this argument is not specified, then it will apply the following functions according to the number
of labels:
- If problem type is regression, will not apply any function on the output.
- If the model has a single label, will apply the sigmoid function on the output.
- If the model has several labels, will apply the softmax function on the output.
Possible values are:
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
Return:
A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys:
- **label** (`str`) -- The label predicted.
- **score** (`float`) -- The corresponding probability.
If `top_k` is used, one such dictionary is returned per label.
|
__call__
|
python
|
huggingface/transformers
|
src/transformers/pipelines/text_classification.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/text_classification.py
|
Apache-2.0
|
def __call__(self, inputs: Union[str, List[str]], **kwargs):
"""
Classify each token of the text(s) given as inputs.
Args:
inputs (`str` or `List[str]`):
One or several texts (or one list of texts) for token classification.
Return:
A list or a list of list of `dict`: Each result comes as a list of dictionaries (one for each token in the
corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with
the following keys:
- **word** (`str`) -- The token/word classified. This is obtained by decoding the selected tokens. If you
want to have the exact string in the original sentence, use `start` and `end`.
- **score** (`float`) -- The corresponding probability for `entity`.
- **entity** (`str`) -- The entity predicted for that token/word (it is named *entity_group* when
*aggregation_strategy* is not `"none"`.
- **index** (`int`, only present when `aggregation_strategy="none"`) -- The index of the corresponding
token in the sentence.
- **start** (`int`, *optional*) -- The index of the start of the corresponding entity in the sentence. Only
exists if the offsets are available within the tokenizer
- **end** (`int`, *optional*) -- The index of the end of the corresponding entity in the sentence. Only
exists if the offsets are available within the tokenizer
"""
_inputs, offset_mapping = self._args_parser(inputs, **kwargs)
if offset_mapping:
kwargs["offset_mapping"] = offset_mapping
return super().__call__(inputs, **kwargs)
|
Classify each token of the text(s) given as inputs.
Args:
inputs (`str` or `List[str]`):
One or several texts (or one list of texts) for token classification.
Return:
A list or a list of list of `dict`: Each result comes as a list of dictionaries (one for each token in the
corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with
the following keys:
- **word** (`str`) -- The token/word classified. This is obtained by decoding the selected tokens. If you
want to have the exact string in the original sentence, use `start` and `end`.
- **score** (`float`) -- The corresponding probability for `entity`.
- **entity** (`str`) -- The entity predicted for that token/word (it is named *entity_group* when
*aggregation_strategy* is not `"none"`.
- **index** (`int`, only present when `aggregation_strategy="none"`) -- The index of the corresponding
token in the sentence.
- **start** (`int`, *optional*) -- The index of the start of the corresponding entity in the sentence. Only
exists if the offsets are available within the tokenizer
- **end** (`int`, *optional*) -- The index of the end of the corresponding entity in the sentence. Only
exists if the offsets are available within the tokenizer
|
__call__
|
python
|
huggingface/transformers
|
src/transformers/pipelines/token_classification.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/token_classification.py
|
Apache-2.0
|
def gather_pre_entities(
self,
sentence: str,
input_ids: np.ndarray,
scores: np.ndarray,
offset_mapping: Optional[List[Tuple[int, int]]],
special_tokens_mask: np.ndarray,
aggregation_strategy: AggregationStrategy,
) -> List[dict]:
"""Fuse various numpy arrays into dicts with all the information needed for aggregation"""
pre_entities = []
for idx, token_scores in enumerate(scores):
# Filter special_tokens
if special_tokens_mask[idx]:
continue
word = self.tokenizer.convert_ids_to_tokens(int(input_ids[idx]))
if offset_mapping is not None:
start_ind, end_ind = offset_mapping[idx]
if not isinstance(start_ind, int):
if self.framework == "pt":
start_ind = start_ind.item()
end_ind = end_ind.item()
word_ref = sentence[start_ind:end_ind]
if getattr(self.tokenizer, "_tokenizer", None) and getattr(
self.tokenizer._tokenizer.model, "continuing_subword_prefix", None
):
# This is a BPE, word aware tokenizer, there is a correct way
# to fuse tokens
is_subword = len(word) != len(word_ref)
else:
# This is a fallback heuristic. This will fail most likely on any kind of text + punctuation mixtures that will be considered "words". Non word aware models cannot do better than this unfortunately.
if aggregation_strategy in {
AggregationStrategy.FIRST,
AggregationStrategy.AVERAGE,
AggregationStrategy.MAX,
}:
warnings.warn(
"Tokenizer does not support real words, using fallback heuristic",
UserWarning,
)
is_subword = start_ind > 0 and " " not in sentence[start_ind - 1 : start_ind + 1]
if int(input_ids[idx]) == self.tokenizer.unk_token_id:
word = word_ref
is_subword = False
else:
start_ind = None
end_ind = None
is_subword = False
pre_entity = {
"word": word,
"scores": token_scores,
"start": start_ind,
"end": end_ind,
"index": idx,
"is_subword": is_subword,
}
pre_entities.append(pre_entity)
return pre_entities
|
Fuse various numpy arrays into dicts with all the information needed for aggregation
|
gather_pre_entities
|
python
|
huggingface/transformers
|
src/transformers/pipelines/token_classification.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/token_classification.py
|
Apache-2.0
|
def aggregate_words(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]:
"""
Override tokens from a given word that disagree to force agreement on word boundaries.
Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft|
company| B-ENT I-ENT
"""
if aggregation_strategy in {
AggregationStrategy.NONE,
AggregationStrategy.SIMPLE,
}:
raise ValueError("NONE and SIMPLE strategies are invalid for word aggregation")
word_entities = []
word_group = None
for entity in entities:
if word_group is None:
word_group = [entity]
elif entity["is_subword"]:
word_group.append(entity)
else:
word_entities.append(self.aggregate_word(word_group, aggregation_strategy))
word_group = [entity]
# Last item
if word_group is not None:
word_entities.append(self.aggregate_word(word_group, aggregation_strategy))
return word_entities
|
Override tokens from a given word that disagree to force agreement on word boundaries.
Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft|
company| B-ENT I-ENT
|
aggregate_words
|
python
|
huggingface/transformers
|
src/transformers/pipelines/token_classification.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/token_classification.py
|
Apache-2.0
|
def group_sub_entities(self, entities: List[dict]) -> dict:
"""
Group together the adjacent tokens with the same entity predicted.
Args:
entities (`dict`): The entities predicted by the pipeline.
"""
# Get the first entity in the entity group
entity = entities[0]["entity"].split("-", 1)[-1]
scores = np.nanmean([entity["score"] for entity in entities])
tokens = [entity["word"] for entity in entities]
entity_group = {
"entity_group": entity,
"score": np.mean(scores),
"word": self.tokenizer.convert_tokens_to_string(tokens),
"start": entities[0]["start"],
"end": entities[-1]["end"],
}
return entity_group
|
Group together the adjacent tokens with the same entity predicted.
Args:
entities (`dict`): The entities predicted by the pipeline.
|
group_sub_entities
|
python
|
huggingface/transformers
|
src/transformers/pipelines/token_classification.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/token_classification.py
|
Apache-2.0
|
def group_entities(self, entities: List[dict]) -> List[dict]:
"""
Find and group together the adjacent tokens with the same entity predicted.
Args:
entities (`dict`): The entities predicted by the pipeline.
"""
entity_groups = []
entity_group_disagg = []
for entity in entities:
if not entity_group_disagg:
entity_group_disagg.append(entity)
continue
# If the current entity is similar and adjacent to the previous entity,
# append it to the disaggregated entity group
# The split is meant to account for the "B" and "I" prefixes
# Shouldn't merge if both entities are B-type
bi, tag = self.get_tag(entity["entity"])
last_bi, last_tag = self.get_tag(entity_group_disagg[-1]["entity"])
if tag == last_tag and bi != "B":
# Modify subword type to be previous_type
entity_group_disagg.append(entity)
else:
# If the current entity is different from the previous entity
# aggregate the disaggregated entity group
entity_groups.append(self.group_sub_entities(entity_group_disagg))
entity_group_disagg = [entity]
if entity_group_disagg:
# it's the last entity, add it to the entity groups
entity_groups.append(self.group_sub_entities(entity_group_disagg))
return entity_groups
|
Find and group together the adjacent tokens with the same entity predicted.
Args:
entities (`dict`): The entities predicted by the pipeline.
|
group_entities
|
python
|
huggingface/transformers
|
src/transformers/pipelines/token_classification.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/token_classification.py
|
Apache-2.0
|
def __call__(self, inputs: Optional[Union[str, List[str]]] = None, **kwargs):
"""
Assign labels to the video(s) passed as inputs.
Args:
inputs (`str`, `List[str]`):
The pipeline handles three types of videos:
- A string containing a http link pointing to a video
- A string containing a local path to a video
The pipeline accepts either a single video or a batch of videos, which must then be passed as a string.
Videos in a batch must all be in the same format: all as http links or all as local paths.
top_k (`int`, *optional*, defaults to 5):
The number of top labels that will be returned by the pipeline. If the provided number is higher than
the number of labels available in the model configuration, it will default to the number of labels.
num_frames (`int`, *optional*, defaults to `self.model.config.num_frames`):
The number of frames sampled from the video to run the classification on. If not provided, will default
to the number of frames specified in the model configuration.
frame_sampling_rate (`int`, *optional*, defaults to 1):
The sampling rate used to select frames from the video. If not provided, will default to 1, i.e. every
frame will be used.
function_to_apply(`str`, *optional*, defaults to "softmax"):
The function to apply to the model output. By default, the pipeline will apply the softmax function to
the output of the model. Valid options: ["softmax", "sigmoid", "none"]. Note that passing Python's
built-in `None` will default to "softmax", so you need to pass the string "none" to disable any
post-processing.
Return:
A list of dictionaries or a list of list of dictionaries containing result. If the input is a single video,
will return a list of `top_k` dictionaries, if the input is a list of several videos, will return a list of list of
`top_k` dictionaries corresponding to the videos.
The dictionaries contain the following keys:
- **label** (`str`) -- The label identified by the model.
- **score** (`int`) -- The score attributed by the model for that label.
"""
# After deprecation of this is completed, remove the default `None` value for `images`
if "videos" in kwargs:
warnings.warn(
"The `videos` argument has been renamed to `inputs`. In version 5 of Transformers, `videos` will no longer be accepted",
FutureWarning,
)
inputs = kwargs.pop("videos")
if inputs is None:
raise ValueError("Cannot call the video-classification pipeline without an inputs argument!")
return super().__call__(inputs, **kwargs)
|
Assign labels to the video(s) passed as inputs.
Args:
inputs (`str`, `List[str]`):
The pipeline handles three types of videos:
- A string containing a http link pointing to a video
- A string containing a local path to a video
The pipeline accepts either a single video or a batch of videos, which must then be passed as a string.
Videos in a batch must all be in the same format: all as http links or all as local paths.
top_k (`int`, *optional*, defaults to 5):
The number of top labels that will be returned by the pipeline. If the provided number is higher than
the number of labels available in the model configuration, it will default to the number of labels.
num_frames (`int`, *optional*, defaults to `self.model.config.num_frames`):
The number of frames sampled from the video to run the classification on. If not provided, will default
to the number of frames specified in the model configuration.
frame_sampling_rate (`int`, *optional*, defaults to 1):
The sampling rate used to select frames from the video. If not provided, will default to 1, i.e. every
frame will be used.
function_to_apply(`str`, *optional*, defaults to "softmax"):
The function to apply to the model output. By default, the pipeline will apply the softmax function to
the output of the model. Valid options: ["softmax", "sigmoid", "none"]. Note that passing Python's
built-in `None` will default to "softmax", so you need to pass the string "none" to disable any
post-processing.
Return:
A list of dictionaries or a list of list of dictionaries containing result. If the input is a single video,
will return a list of `top_k` dictionaries, if the input is a list of several videos, will return a list of list of
`top_k` dictionaries corresponding to the videos.
The dictionaries contain the following keys:
- **label** (`str`) -- The label identified by the model.
- **score** (`int`) -- The score attributed by the model for that label.
|
__call__
|
python
|
huggingface/transformers
|
src/transformers/pipelines/video_classification.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/video_classification.py
|
Apache-2.0
|
def __call__(
self,
image: Union["Image.Image", str, List["Image.Image"], List[str], "KeyDataset"],
question: Optional[Union[str, List[str]]] = None,
**kwargs,
):
r"""
Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed
below:
- `pipeline(image=image, question=question)`
- `pipeline({"image": image, "question": question})`
- `pipeline([{"image": image, "question": question}])`
- `pipeline([{"image": image, "question": question}, {"image": image, "question": question}])`
Args:
image (`str`, `List[str]`, `PIL.Image`, `List[PIL.Image]` or `KeyDataset`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images. If given a single image, it can be
broadcasted to multiple questions.
For dataset: the passed in dataset must be of type `transformers.pipelines.pt_utils.KeyDataset`
Example:
```python
>>> from transformers.pipelines.pt_utils import KeyDataset
>>> from datasets import load_dataset
>>> dataset = load_dataset("detection-datasets/coco")
>>> oracle(image=KeyDataset(dataset, "image"), question="What's in this image?")
```
question (`str`, `List[str]`):
The question(s) asked. If given a single question, it can be broadcasted to multiple images.
If multiple images and questions are given, each and every question will be broadcasted to all images
(same effect as a Cartesian product)
top_k (`int`, *optional*, defaults to 5):
The number of top labels that will be returned by the pipeline. If the provided number is higher than
the number of labels available in the model configuration, it will default to the number of labels.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys:
- **label** (`str`) -- The label identified by the model.
- **score** (`int`) -- The score attributed by the model for that label.
"""
is_dataset = isinstance(image, KeyDataset)
is_image_batch = isinstance(image, list) and all(isinstance(item, (Image.Image, str)) for item in image)
is_question_batch = isinstance(question, list) and all(isinstance(item, str) for item in question)
if isinstance(image, (Image.Image, str)) and isinstance(question, str):
inputs = {"image": image, "question": question}
elif (is_image_batch or is_dataset) and isinstance(question, str):
inputs = [{"image": im, "question": question} for im in image]
elif isinstance(image, (Image.Image, str)) and is_question_batch:
inputs = [{"image": image, "question": q} for q in question]
elif (is_image_batch or is_dataset) and is_question_batch:
question_image_pairs = []
for q in question:
for im in image:
question_image_pairs.append({"image": im, "question": q})
inputs = question_image_pairs
else:
"""
Supports the following format
- {"image": image, "question": question}
- [{"image": image, "question": question}]
- Generator and datasets
"""
inputs = image
results = super().__call__(inputs, **kwargs)
return results
|
Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed
below:
- `pipeline(image=image, question=question)`
- `pipeline({"image": image, "question": question})`
- `pipeline([{"image": image, "question": question}])`
- `pipeline([{"image": image, "question": question}, {"image": image, "question": question}])`
Args:
image (`str`, `List[str]`, `PIL.Image`, `List[PIL.Image]` or `KeyDataset`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images. If given a single image, it can be
broadcasted to multiple questions.
For dataset: the passed in dataset must be of type `transformers.pipelines.pt_utils.KeyDataset`
Example:
```python
>>> from transformers.pipelines.pt_utils import KeyDataset
>>> from datasets import load_dataset
>>> dataset = load_dataset("detection-datasets/coco")
>>> oracle(image=KeyDataset(dataset, "image"), question="What's in this image?")
```
question (`str`, `List[str]`):
The question(s) asked. If given a single question, it can be broadcasted to multiple images.
If multiple images and questions are given, each and every question will be broadcasted to all images
(same effect as a Cartesian product)
top_k (`int`, *optional*, defaults to 5):
The number of top labels that will be returned by the pipeline. If the provided number is higher than
the number of labels available in the model configuration, it will default to the number of labels.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys:
- **label** (`str`) -- The label identified by the model.
- **score** (`int`) -- The score attributed by the model for that label.
|
__call__
|
python
|
huggingface/transformers
|
src/transformers/pipelines/visual_question_answering.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/visual_question_answering.py
|
Apache-2.0
|
def _parse_and_tokenize(
self, sequence_pairs, padding=True, add_special_tokens=True, truncation=TruncationStrategy.ONLY_FIRST, **kwargs
):
"""
Parse arguments and tokenize only_first so that hypothesis (label) is not truncated
"""
return_tensors = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`"
)
self.tokenizer.pad_token = self.tokenizer.eos_token
try:
inputs = self.tokenizer(
sequence_pairs,
add_special_tokens=add_special_tokens,
return_tensors=return_tensors,
padding=padding,
truncation=truncation,
)
except Exception as e:
if "too short" in str(e):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
inputs = self.tokenizer(
sequence_pairs,
add_special_tokens=add_special_tokens,
return_tensors=return_tensors,
padding=padding,
truncation=TruncationStrategy.DO_NOT_TRUNCATE,
)
else:
raise e
return inputs
|
Parse arguments and tokenize only_first so that hypothesis (label) is not truncated
|
_parse_and_tokenize
|
python
|
huggingface/transformers
|
src/transformers/pipelines/zero_shot_classification.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/zero_shot_classification.py
|
Apache-2.0
|
def __call__(
self,
sequences: Union[str, List[str]],
*args,
**kwargs,
):
"""
Classify the sequence(s) given as inputs. See the [`ZeroShotClassificationPipeline`] documentation for more
information.
Args:
sequences (`str` or `List[str]`):
The sequence(s) to classify, will be truncated if the model input is too large.
candidate_labels (`str` or `List[str]`):
The set of possible class labels to classify each sequence into. Can be a single label, a string of
comma-separated labels, or a list of labels.
hypothesis_template (`str`, *optional*, defaults to `"This example is {}."`):
The template used to turn each label into an NLI-style hypothesis. This template must include a {} or
similar syntax for the candidate label to be inserted into the template. For example, the default
template is `"This example is {}."` With the candidate label `"sports"`, this would be fed into the
model like `"<cls> sequence to classify <sep> This example is sports . <sep>"`. The default template
works well in many cases, but it may be worthwhile to experiment with different templates depending on
the task setting.
multi_label (`bool`, *optional*, defaults to `False`):
Whether or not multiple candidate labels can be true. If `False`, the scores are normalized such that
the sum of the label likelihoods for each sequence is 1. If `True`, the labels are considered
independent and probabilities are normalized for each candidate by doing a softmax of the entailment
score vs. the contradiction score.
Return:
A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
- **sequence** (`str`) -- The sequence for which this is the output.
- **labels** (`List[str]`) -- The labels sorted by order of likelihood.
- **scores** (`List[float]`) -- The probabilities for each of the labels.
"""
if len(args) == 0:
pass
elif len(args) == 1 and "candidate_labels" not in kwargs:
kwargs["candidate_labels"] = args[0]
else:
raise ValueError(f"Unable to understand extra arguments {args}")
return super().__call__(sequences, **kwargs)
|
Classify the sequence(s) given as inputs. See the [`ZeroShotClassificationPipeline`] documentation for more
information.
Args:
sequences (`str` or `List[str]`):
The sequence(s) to classify, will be truncated if the model input is too large.
candidate_labels (`str` or `List[str]`):
The set of possible class labels to classify each sequence into. Can be a single label, a string of
comma-separated labels, or a list of labels.
hypothesis_template (`str`, *optional*, defaults to `"This example is {}."`):
The template used to turn each label into an NLI-style hypothesis. This template must include a {} or
similar syntax for the candidate label to be inserted into the template. For example, the default
template is `"This example is {}."` With the candidate label `"sports"`, this would be fed into the
model like `"<cls> sequence to classify <sep> This example is sports . <sep>"`. The default template
works well in many cases, but it may be worthwhile to experiment with different templates depending on
the task setting.
multi_label (`bool`, *optional*, defaults to `False`):
Whether or not multiple candidate labels can be true. If `False`, the scores are normalized such that
the sum of the label likelihoods for each sequence is 1. If `True`, the labels are considered
independent and probabilities are normalized for each candidate by doing a softmax of the entailment
score vs. the contradiction score.
Return:
A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
- **sequence** (`str`) -- The sequence for which this is the output.
- **labels** (`List[str]`) -- The labels sorted by order of likelihood.
- **scores** (`List[float]`) -- The probabilities for each of the labels.
|
__call__
|
python
|
huggingface/transformers
|
src/transformers/pipelines/zero_shot_classification.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/zero_shot_classification.py
|
Apache-2.0
|
def __call__(self, image: Union[str, List[str], "Image", List["Image"]] = None, **kwargs):
"""
Assign labels to the image(s) passed as inputs.
Args:
image (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
candidate_labels (`List[str]`):
The candidate labels for this image. They will be formatted using *hypothesis_template*.
hypothesis_template (`str`, *optional*, defaults to `"This is a photo of {}"`):
The format used in conjunction with *candidate_labels* to attempt the image classification by
replacing the placeholder with the candidate_labels. Pass "{}" if *candidate_labels* are
already formatted.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A list of dictionaries containing one entry per proposed label. Each dictionary contains the
following keys:
- **label** (`str`) -- One of the suggested *candidate_labels*.
- **score** (`float`) -- The score attributed by the model to that label. It is a value between
0 and 1, computed as the `softmax` of `logits_per_image`.
"""
# After deprecation of this is completed, remove the default `None` value for `image`
if "images" in kwargs:
image = kwargs.pop("images")
if image is None:
raise ValueError("Cannot call the zero-shot-image-classification pipeline without an images argument!")
return super().__call__(image, **kwargs)
|
Assign labels to the image(s) passed as inputs.
Args:
image (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
candidate_labels (`List[str]`):
The candidate labels for this image. They will be formatted using *hypothesis_template*.
hypothesis_template (`str`, *optional*, defaults to `"This is a photo of {}"`):
The format used in conjunction with *candidate_labels* to attempt the image classification by
replacing the placeholder with the candidate_labels. Pass "{}" if *candidate_labels* are
already formatted.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A list of dictionaries containing one entry per proposed label. Each dictionary contains the
following keys:
- **label** (`str`) -- One of the suggested *candidate_labels*.
- **score** (`float`) -- The score attributed by the model to that label. It is a value between
0 and 1, computed as the `softmax` of `logits_per_image`.
|
__call__
|
python
|
huggingface/transformers
|
src/transformers/pipelines/zero_shot_image_classification.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/zero_shot_image_classification.py
|
Apache-2.0
|
def __call__(
self,
image: Union[str, "Image.Image", List[Dict[str, Any]]],
candidate_labels: Optional[Union[str, List[str]]] = None,
**kwargs,
):
"""
Detect objects (bounding boxes & classes) in the image(s) passed as inputs.
Args:
image (`str`, `PIL.Image` or `List[Dict[str, Any]]`):
The pipeline handles three types of images:
- A string containing an http url pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
You can use this parameter to send directly a list of images, or a dataset or a generator like so:
```python
>>> from transformers import pipeline
>>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection")
>>> detector(
... [
... {
... "image": "http://images.cocodataset.org/val2017/000000039769.jpg",
... "candidate_labels": ["cat", "couch"],
... },
... {
... "image": "http://images.cocodataset.org/val2017/000000039769.jpg",
... "candidate_labels": ["cat", "couch"],
... },
... ]
... )
[[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.25, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}], [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]]
```
candidate_labels (`str` or `List[str]` or `List[List[str]]`):
What the model should recognize in the image.
threshold (`float`, *optional*, defaults to 0.1):
The probability necessary to make a prediction.
top_k (`int`, *optional*, defaults to None):
The number of top predictions that will be returned by the pipeline. If the provided number is `None`
or higher than the number of predictions available, it will default to the number of predictions.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A list of lists containing prediction results, one list per input image. Each list contains dictionaries
with the following keys:
- **label** (`str`) -- Text query corresponding to the found object.
- **score** (`float`) -- Score corresponding to the object (between 0 and 1).
- **box** (`Dict[str,int]`) -- Bounding box of the detected object in image's original size. It is a
dictionary with `x_min`, `x_max`, `y_min`, `y_max` keys.
"""
if "text_queries" in kwargs:
candidate_labels = kwargs.pop("text_queries")
if isinstance(image, (str, Image.Image)):
inputs = {"image": image, "candidate_labels": candidate_labels}
elif isinstance(image, (list, tuple)) and valid_images(image):
return list(
super().__call__(
({"image": img, "candidate_labels": labels} for img, labels in zip(image, candidate_labels)),
**kwargs,
)
)
else:
"""
Supports the following format
- {"image": image, "candidate_labels": candidate_labels}
- [{"image": image, "candidate_labels": candidate_labels}]
- Generator and datasets
This is a common pattern in other multimodal pipelines, so we support it here as well.
"""
inputs = image
results = super().__call__(inputs, **kwargs)
return results
|
Detect objects (bounding boxes & classes) in the image(s) passed as inputs.
Args:
image (`str`, `PIL.Image` or `List[Dict[str, Any]]`):
The pipeline handles three types of images:
- A string containing an http url pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
You can use this parameter to send directly a list of images, or a dataset or a generator like so:
```python
>>> from transformers import pipeline
>>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection")
>>> detector(
... [
... {
... "image": "http://images.cocodataset.org/val2017/000000039769.jpg",
... "candidate_labels": ["cat", "couch"],
... },
... {
... "image": "http://images.cocodataset.org/val2017/000000039769.jpg",
... "candidate_labels": ["cat", "couch"],
... },
... ]
... )
[[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.25, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}], [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]]
```
candidate_labels (`str` or `List[str]` or `List[List[str]]`):
What the model should recognize in the image.
threshold (`float`, *optional*, defaults to 0.1):
The probability necessary to make a prediction.
top_k (`int`, *optional*, defaults to None):
The number of top predictions that will be returned by the pipeline. If the provided number is `None`
or higher than the number of predictions available, it will default to the number of predictions.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A list of lists containing prediction results, one list per input image. Each list contains dictionaries
with the following keys:
- **label** (`str`) -- Text query corresponding to the found object.
- **score** (`float`) -- Score corresponding to the object (between 0 and 1).
- **box** (`Dict[str,int]`) -- Bounding box of the detected object in image's original size. It is a
dictionary with `x_min`, `x_max`, `y_min`, `y_max` keys.
|
__call__
|
python
|
huggingface/transformers
|
src/transformers/pipelines/zero_shot_object_detection.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/zero_shot_object_detection.py
|
Apache-2.0
|
def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]:
"""
Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... }
Args:
box (`torch.Tensor`): Tensor containing the coordinates in corners format.
Returns:
bbox (`Dict[str, int]`): Dict containing the coordinates in corners format.
"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
xmin, ymin, xmax, ymax = box.int().tolist()
bbox = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
|
Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... }
Args:
box (`torch.Tensor`): Tensor containing the coordinates in corners format.
Returns:
bbox (`Dict[str, int]`): Dict containing the coordinates in corners format.
|
_get_bounding_box
|
python
|
huggingface/transformers
|
src/transformers/pipelines/zero_shot_object_detection.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/zero_shot_object_detection.py
|
Apache-2.0
|
def merge_quantization_configs(
cls,
quantization_config: Union[dict, QuantizationConfigMixin],
quantization_config_from_args: Optional[QuantizationConfigMixin],
):
"""
handles situations where both quantization_config from args and quantization_config from model config are present.
"""
if quantization_config_from_args is not None:
warning_msg = (
"You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading"
" already has a `quantization_config` attribute. The `quantization_config` from the model will be used."
)
else:
warning_msg = ""
if isinstance(quantization_config, dict):
# Convert the config based on the type of quantization_config_from_args (e.g., AutoRoundConfig), which takes priority before automatic configuration dispatch.
if isinstance(quantization_config_from_args, AutoRoundConfig):
quantization_config = AutoRoundConfig.from_dict(quantization_config)
else:
quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
if (
isinstance(
quantization_config, (GPTQConfig, AwqConfig, AutoRoundConfig, FbgemmFp8Config, CompressedTensorsConfig)
)
and quantization_config_from_args is not None
):
# special case for GPTQ / AWQ / FbgemmFp8 config collision
loading_attr_dict = quantization_config_from_args.get_loading_attributes()
for attr, val in loading_attr_dict.items():
setattr(quantization_config, attr, val)
warning_msg += f"However, loading attributes (e.g. {list(loading_attr_dict.keys())}) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored."
if warning_msg != "":
warnings.warn(warning_msg)
return quantization_config
|
handles situations where both quantization_config from args and quantization_config from model config are present.
|
merge_quantization_configs
|
python
|
huggingface/transformers
|
src/transformers/quantizers/auto.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/auto.py
|
Apache-2.0
|
def get_special_dtypes_update(self, model, torch_dtype: "torch.dtype") -> Dict[str, "torch.dtype"]:
"""
returns dtypes for modules that are not quantized - used for the computation of the device_map in case
one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified
in `_process_model_before_weight_loading`.
Args:
model (`~transformers.PreTrainedModel`):
The model to quantize
torch_dtype (`torch.dtype`):
The dtype passed in `from_pretrained` method.
"""
return {
name: torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in self.modules_to_not_convert)
}
|
returns dtypes for modules that are not quantized - used for the computation of the device_map in case
one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified
in `_process_model_before_weight_loading`.
Args:
model (`~transformers.PreTrainedModel`):
The model to quantize
torch_dtype (`torch.dtype`):
The dtype passed in `from_pretrained` method.
|
get_special_dtypes_update
|
python
|
huggingface/transformers
|
src/transformers/quantizers/base.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/base.py
|
Apache-2.0
|
def check_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
state_dict: Dict[str, Any],
**kwargs,
) -> bool:
"""
checks if a loaded state_dict component is part of quantized param + some validation; only defined if
requires_parameters_quantization == True for quantization methods that require to create a new parameters
for quantization.
"""
return False
|
checks if a loaded state_dict component is part of quantized param + some validation; only defined if
requires_parameters_quantization == True for quantization methods that require to create a new parameters
for quantization.
|
check_quantized_param
|
python
|
huggingface/transformers
|
src/transformers/quantizers/base.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/base.py
|
Apache-2.0
|
def create_quantized_param(self, *args, **kwargs) -> "torch.nn.Parameter":
"""
takes needed components from state_dict and creates quantized param; only applicable if
requires_parameters_quantization == True
"""
if not self.requires_parameters_quantization:
raise AttributeError(
f"`.create_quantized_param()` method is not supported by quantizer class {self.__class__.__name__}."
)
|
takes needed components from state_dict and creates quantized param; only applicable if
requires_parameters_quantization == True
|
create_quantized_param
|
python
|
huggingface/transformers
|
src/transformers/quantizers/base.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/base.py
|
Apache-2.0
|
def preprocess_model(self, model: "PreTrainedModel", **kwargs):
"""
Setting model attributes and/or converting model before weights loading. At this point
the model should be initialized on the meta device so you can freely manipulate the skeleton
of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`.
Args:
model (`~transformers.PreTrainedModel`):
The model to quantize
kwargs (`dict`, *optional*):
The keyword arguments that are passed along `_process_model_before_weight_loading`.
"""
model.is_quantized = True
model.quantization_method = self.quantization_config.quant_method
if self.pre_quantized:
self._convert_model_for_quantization(model)
return self._process_model_before_weight_loading(model, **kwargs)
|
Setting model attributes and/or converting model before weights loading. At this point
the model should be initialized on the meta device so you can freely manipulate the skeleton
of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`.
Args:
model (`~transformers.PreTrainedModel`):
The model to quantize
kwargs (`dict`, *optional*):
The keyword arguments that are passed along `_process_model_before_weight_loading`.
|
preprocess_model
|
python
|
huggingface/transformers
|
src/transformers/quantizers/base.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/base.py
|
Apache-2.0
|
def dequantize(self, model):
"""
Potentially dequantize the model to retrieve the original model, with some loss in accuracy / performance.
Note not all quantization schemes support this.
"""
model = self._dequantize(model)
# Delete quantizer and quantization config
del model.hf_quantizer
del model.config.quantization_config
del model.config._pre_quantization_dtype
model.is_quantized = False
return model
|
Potentially dequantize the model to retrieve the original model, with some loss in accuracy / performance.
Note not all quantization schemes support this.
|
dequantize
|
python
|
huggingface/transformers
|
src/transformers/quantizers/base.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/base.py
|
Apache-2.0
|
def get_cuda_warm_up_factor(self):
"""
The factor to be used in `caching_allocator_warmup` to get the number of bytes to pre-allocate to warm up cuda.
A factor of 2 means we allocate all bytes in the empty model (since we allocate in fp16), a factor of 4 means
we allocate half the memory of the weights residing in the empty model, etc...
"""
# By default we return 4, i.e. half the model size (this corresponds to the case where the model is not
# really pre-processed, i.e. we do not have the info that weights are going to be 8 bits before actual
# weight loading)
return 4
|
The factor to be used in `caching_allocator_warmup` to get the number of bytes to pre-allocate to warm up cuda.
A factor of 2 means we allocate all bytes in the empty model (since we allocate in fp16), a factor of 4 means
we allocate half the memory of the weights residing in the empty model, etc...
|
get_cuda_warm_up_factor
|
python
|
huggingface/transformers
|
src/transformers/quantizers/base.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/base.py
|
Apache-2.0
|
def is_qat_trainable(self) -> bool:
"""Flag indicating whether the quantized model can carry out quantization aware training"""
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
|
Flag indicating whether the quantized model can carry out quantization aware training
|
is_qat_trainable
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_bitnet.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_bitnet.py
|
Apache-2.0
|
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: Dict[str, Any],
unexpected_keys: Optional[List[str]] = None,
):
"""
combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device()
"""
import bitsandbytes as bnb
module, tensor_name = get_module_from_name(model, param_name)
if tensor_name not in module._parameters:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
old_value = getattr(module, tensor_name)
# `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
if isinstance(target_device, int) and is_torch_npu_available():
target_device = f"npu:{target_device}"
if tensor_name == "bias":
if param_value is None:
new_value = old_value.to(target_device)
else:
new_value = param_value.to(target_device)
new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad)
module._parameters[tensor_name] = new_value
return
if not isinstance(module._parameters[tensor_name], bnb.nn.Params4bit):
raise ValueError("this function only loads `Linear4bit components`")
if (
old_value.device == torch.device("meta")
and target_device not in ["meta", torch.device("meta")]
and param_value is None
):
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.")
# construct `new_value` for the module._parameters[tensor_name]:
if self.pre_quantized:
# 4bit loading. Collecting components for restoring quantized weight
# This can be expanded to make a universal call for any quantized weight loading
if not self.is_serializable:
raise ValueError(
"Detected int4 weights but the version of bitsandbytes is not compatible with int4 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
)
if (param_name + ".quant_state.bitsandbytes__fp4" not in state_dict) and (
param_name + ".quant_state.bitsandbytes__nf4" not in state_dict
):
raise ValueError(
f"Supplied state dict for {param_name} does not contain `bitsandbytes__*` and possibly other `quantized_stats` components."
)
quantized_stats = {}
for k, v in state_dict.items():
if param_name + "." in k:
quantized_stats[k] = v
if unexpected_keys is not None and k in unexpected_keys:
unexpected_keys.remove(k)
param_kwargs = {}
if self.is_bnb_supports_quant_storage_module:
param_kwargs["module"] = module
new_value = bnb.nn.Params4bit.from_prequantized(
data=param_value,
quantized_stats=quantized_stats,
requires_grad=False,
device=target_device,
**param_kwargs,
)
else:
new_value = param_value.to("cpu")
# Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls, Conv1D):
new_value = new_value.T
kwargs = old_value.__dict__
new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(target_device)
module._parameters[tensor_name] = new_value
|
combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device()
|
create_quantized_param
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_bnb_4bit.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_bnb_4bit.py
|
Apache-2.0
|
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: Dict[str, Any],
unexpected_keys: Optional[List[str]] = None,
):
"""
combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device()
needs aux items from state dicts, if found - removes them from unexpected_keys
"""
import bitsandbytes as bnb
fp16_statistics_key = param_name.replace("weight", "SCB")
fp16_weights_format_key = param_name.replace("weight", "weight_format")
fp16_statistics = state_dict.get(fp16_statistics_key, None)
fp16_weights_format = state_dict.get(fp16_weights_format_key, None)
module, tensor_name = get_module_from_name(model, param_name)
if tensor_name not in module._parameters:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
old_value = getattr(module, tensor_name)
if not isinstance(module._parameters[tensor_name], bnb.nn.Int8Params):
raise ValueError(f"Parameter `{tensor_name}` should only be a `bnb.nn.Int8Params` instance.")
if (
old_value.device == torch.device("meta")
and target_device not in ["meta", torch.device("meta")]
and param_value is None
):
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.")
new_value = param_value.to("cpu")
if self.pre_quantized and not self.is_serializable():
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
)
# Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls, Conv1D):
if fp16_statistics is None:
new_value = new_value.T
kwargs = old_value.__dict__
new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(target_device)
module._parameters[tensor_name] = new_value
if fp16_statistics is not None:
setattr(module.weight, "SCB", fp16_statistics.to(target_device))
if unexpected_keys is not None:
unexpected_keys.remove(fp16_statistics_key)
# We just need to pop the `weight_format` keys from the state dict to remove unneeded
# messages. The correct format is correctly retrieved during the first forward pass.
if fp16_weights_format is not None and unexpected_keys is not None:
unexpected_keys.remove(fp16_weights_format_key)
|
combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device()
needs aux items from state dicts, if found - removes them from unexpected_keys
|
create_quantized_param
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_bnb_8bit.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_bnb_8bit.py
|
Apache-2.0
|
def update_missing_keys_after_loading(self, model, missing_keys: List[str], prefix: str) -> List[str]:
"""
Update missing keys after loading the model. This is necessary for compressed tensors
to load the model correctly. We expect weights to be present in missing keys.
The weight's are re-constructed by ModelCompressor in _process_model_after_weight_loading
This function cleans up expected missing keys and returns the remaining missing keys
"""
if self.run_compressed:
return missing_keys
# We expect some keys to be missing for
# compressed models
# This is fine as the weights are reconstructed by ModelCompressor
# in _process_model_after_weight_loading
expected_missing_keys = self.compressor.get_missing_module_keys(model)
return [
key for key in missing_keys if not any(re.match(f".*{pattern}", key) for pattern in expected_missing_keys)
]
|
Update missing keys after loading the model. This is necessary for compressed tensors
to load the model correctly. We expect weights to be present in missing keys.
The weight's are re-constructed by ModelCompressor in _process_model_after_weight_loading
This function cleans up expected missing keys and returns the remaining missing keys
|
update_missing_keys_after_loading
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_compressed_tensors.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_compressed_tensors.py
|
Apache-2.0
|
def update_unexpected_keys(self, model, unexpected_keys: List[str], prefix: str) -> List[str]:
"""
Override this method if you want to adjust the `unexpected_keys`.
Args:
unexpected_keys (`List[str]`, *optional*):
The list of unexpected keys in the checkpoint compared to the state dict of the model
"""
if self.run_compressed:
return unexpected_keys
# We expect some unexpected keys in model
# safetensors file for compressed models
keys_to_ignore = self.compressor.get_unexpected_file_keys(model)
return [key for key in unexpected_keys if not any(re.match(f".*{pattern}", key) for pattern in keys_to_ignore)]
|
Override this method if you want to adjust the `unexpected_keys`.
Args:
unexpected_keys (`List[str]`, *optional*):
The list of unexpected keys in the checkpoint compared to the state dict of the model
|
update_unexpected_keys
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_compressed_tensors.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_compressed_tensors.py
|
Apache-2.0
|
def _process_model_after_weight_loading(self, model, **kwargs):
"""Decompress loaded model if necessary - need for qat"""
if (
self.quantization_config.is_quantization_compressed and not self.run_compressed
) or self.quantization_config.is_sparsification_compressed:
config = kwargs.get("config", None)
cache_path = config._name_or_path
if not os.path.exists(cache_path):
from transformers.utils import cached_file
config_file_path = cached_file(cache_path, "config.json")
cache_path = os.path.sep.join(config_file_path.split(os.path.sep)[:-1])
if self.quantization_config.is_quantization_compressed and not self.run_compressed:
from compressed_tensors.quantization import QuantizationStatus
self.compressor.quantization_config.quantization_status = QuantizationStatus.FROZEN
self.compressor.decompress(model_path=cache_path, model=model)
|
Decompress loaded model if necessary - need for qat
|
_process_model_after_weight_loading
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_compressed_tensors.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_compressed_tensors.py
|
Apache-2.0
|
def is_qat_trainable(self) -> bool:
"""Loaded Models can carry out quantization aware training"""
# models need to be decompressed carry out qat
return not self.run_compressed or not self.quantization_config.is_quantization_compressed
|
Loaded Models can carry out quantization aware training
|
is_qat_trainable
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_compressed_tensors.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_compressed_tensors.py
|
Apache-2.0
|
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: Dict[str, Any],
unexpected_keys: Optional[List[str]] = None,
):
"""
Quantizes weights to FP8 format using Block-wise quantization
"""
from ..modeling_utils import _load_parameter_into_model
param_value = param_value.to(target_device)
# Get FP8 min/max values
fp8_min = torch.finfo(torch.float8_e4m3fn).min
fp8_max = torch.finfo(torch.float8_e4m3fn).max
block_size_m, block_size_n = self.quantization_config.weight_block_size
rows, cols = param_value.shape[-2:]
if rows % block_size_m != 0 or cols % block_size_n != 0:
raise ValueError(
f"Matrix dimensions ({rows}, {cols}) must be divisible by block sizes ({block_size_m}, {block_size_n})"
)
param_value_orig_shape = param_value.shape
param_value = param_value.reshape(
-1, rows // block_size_m, block_size_m, cols // block_size_n, block_size_n
).permute(0, 1, 3, 2, 4)
# Calculate scaling factor for each block
max_abs = torch.amax(torch.abs(param_value), dim=(-1, -2))
scale = fp8_max / max_abs
scale_orig_shape = scale.shape
scale = scale.unsqueeze(-1).unsqueeze(-1)
# Quantize the weights
quantized_param = torch.clamp(param_value * scale, min=fp8_min, max=fp8_max).to(torch.float8_e4m3fn)
quantized_param = quantized_param.permute(0, 1, 3, 2, 4)
# Reshape back to matrix shape
quantized_param = quantized_param.reshape(param_value_orig_shape)
# Reshape scale to match the number of blocks
scale = scale.reshape(scale_orig_shape).squeeze().reciprocal()
# Load into the model
_load_parameter_into_model(model, param_name, quantized_param)
_load_parameter_into_model(model, param_name.rsplit(".", 1)[0] + ".weight_scale_inv", scale)
|
Quantizes weights to FP8 format using Block-wise quantization
|
create_quantized_param
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_finegrained_fp8.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_finegrained_fp8.py
|
Apache-2.0
|
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: Dict[str, Any],
unexpected_keys: List[str],
):
"""
Each nn.Linear layer is processed here.
We first check if the corresponding module state_dict contains already HQQ quantized parameters.
If not, we create a temp linear layer with the module state_dict params and use it for quantization
"""
if is_hqq_available():
from hqq.core.quantize import HQQLinear
module, tensor_name = get_module_from_name(model, param_name)
layer_name = ".".join(param_name.split(".")[:-1])
parent_module = find_parent(model, layer_name)
node = layer_name.split(".")[-1]
if tensor_name == "bias":
# this should already be set
return
# set module state_dict
module_state_dict = {}
for k, v in state_dict.items():
if layer_name + "." in k:
module_state_dict[k.split(".")[-1]] = v
if unexpected_keys is not None and k in unexpected_keys:
unexpected_keys.remove(k)
if self.pre_quantized:
if isinstance(module, HQQLinear):
return
else:
hqq_layer = HQQLinear(
linear_layer=None,
quant_config=None,
compute_dtype=self.torch_dtype,
device=target_device,
del_orig=False,
)
hqq_layer.load_state_dict(module_state_dict)
if hqq_layer.bias is not None and isinstance(hqq_layer.bias, torch.Tensor):
hqq_layer.bias = torch.nn.Parameter(hqq_layer.bias)
if self.using_multi_gpu:
hqq_layer = self._patch_layer_for_multigpu(hqq_layer)
setattr(parent_module, node, hqq_layer)
# cleanup
del module.__dict__, module
torch.cuda.empty_cache()
return
# Step 1: populate module with weight/bias from module state dict
for key in module_state_dict:
setattr(module, key, torch.nn.Parameter(module_state_dict[key]))
# Step 2: Replace module with either HQQLinear or move it to device. We do this via setattr on the parent as doing on it on the module
# directly doesn't work.
quant_config = model.config.quantization_config["quant_config"]
skip_modules = model.config.quantization_config["skip_modules"]
module_tag = ".".join(module.name.split(".")[-2:])
module_quant_config = None
if "weight_quant_params" in quant_config:
module_quant_config = quant_config
elif module_tag in quant_config:
module_quant_config = quant_config[module_tag]
for skip_module in skip_modules:
if skip_module in module.name:
module_quant_config = None
break
if module_quant_config is not None:
hqq_layer = HQQLinear(
module,
quant_config=module_quant_config,
compute_dtype=self.torch_dtype,
device=target_device,
del_orig=True,
)
if hqq_layer.bias is not None and isinstance(hqq_layer.bias, torch.Tensor):
hqq_layer.bias = torch.nn.Parameter(hqq_layer.bias)
if self.using_multi_gpu:
hqq_layer = self._patch_layer_for_multigpu(hqq_layer)
setattr(parent_module, node, hqq_layer)
else:
module = module.to(dtype=self.torch_dtype, device=target_device)
setattr(parent_module, node, module)
torch.cuda.empty_cache()
|
Each nn.Linear layer is processed here.
We first check if the corresponding module state_dict contains already HQQ quantized parameters.
If not, we create a temp linear layer with the module state_dict params and use it for quantization
|
create_quantized_param
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_hqq.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_hqq.py
|
Apache-2.0
|
def check_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
state_dict: Dict[str, Any],
**kwargs,
) -> bool:
"""
Check if a parameter needs to be quantized.
"""
if is_optimum_quanto_available():
from optimum.quanto import QModuleMixin
device_map = kwargs.get("device_map", None)
param_device = kwargs.get("param_device", None)
# we don't quantize the model if the module is going to be offloaded to the cpu
if device_map is not None and param_device is not None:
device_map_values = set(device_map.values())
if param_device == "cpu" and len(device_map_values) > 1:
if not (device_map_values == {"cpu"} or device_map_values == {"cpu", "disk"}):
return False
module, tensor_name = get_module_from_name(model, param_name)
# We only quantize the weights and the bias is not quantized.
if isinstance(module, QModuleMixin) and "weight" in tensor_name:
# if the weights are quantized, don't need to recreate it again with `create_quantized_param`
return not module.frozen
else:
return False
|
Check if a parameter needs to be quantized.
|
check_quantized_param
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_quanto.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_quanto.py
|
Apache-2.0
|
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
*args,
**kwargs,
):
"""
Create the quantized parameter by calling .freeze() after setting it to the module.
"""
from accelerate.utils import set_module_tensor_to_device
set_module_tensor_to_device(model, param_name, target_device, param_value)
module, _ = get_module_from_name(model, param_name)
module.freeze()
module.weight.requires_grad = False
|
Create the quantized parameter by calling .freeze() after setting it to the module.
|
create_quantized_param
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_quanto.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_quanto.py
|
Apache-2.0
|
def fuzzy_match_size(config_name: str) -> Optional[str]:
"""
Extract the size digit from strings like "4weight", "8weight".
Returns the digit as an integer if found, otherwise None.
"""
config_name = config_name.lower()
str_match = re.search(r"(\d)weight", config_name)
if str_match:
return str_match.group(1)
return None
|
Extract the size digit from strings like "4weight", "8weight".
Returns the digit as an integer if found, otherwise None.
|
fuzzy_match_size
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_torchao.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_torchao.py
|
Apache-2.0
|
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
state_dict: Dict[str, Any],
unexpected_keys: List[str],
):
"""
Each nn.Linear layer that needs to be quantized is processed here.
First, we set the value the weight tensor, then we move it to the target device. Finally, we quantize the module.
"""
if self.quantization_config.quant_type == "autoquant":
return
from torchao.quantization import quantize_
module, tensor_name = get_module_from_name(model, param_name)
if self.pre_quantized:
module._parameters[tensor_name] = torch.nn.Parameter(
param_value.to(device=target_device), requires_grad=param_value.requires_grad
)
if isinstance(module, nn.Linear):
module.extra_repr = types.MethodType(_linear_extra_repr, module)
else:
assert isinstance(self.quantization_config, TorchAoConfig)
module._parameters[tensor_name] = torch.nn.Parameter(
param_value, requires_grad=param_value.requires_grad
).to(device=target_device)
# if we are quantizing tied parameters, to avoid tying the quantized weights
# the correct order to do it is
# 1. load the weight to model
# 2. run tie_weights to populate the weights
# 3. quantize
input_embed = model.get_input_embeddings()
if self.quantization_config.untie_embedding_weights and id(module) == id(input_embed):
model.tie_weights()
setattr(model.config.get_text_config(decoder=True), "tie_word_embeddings", False)
# handle ModuleFqnToConfig, introduced in torchao 0.12.0+
if self.quantization_config._get_ao_version() >= version.Version("0.12.0"):
from torchao.quantization import ModuleFqnToConfig
config = self.quantization_config.get_apply_tensor_subclass()
if isinstance(config, ModuleFqnToConfig):
module_fqn, _ = param_name.rsplit(".", 1)
c = None
if module_fqn in config.module_fqn_to_config:
c = config.module_fqn_to_config[module_fqn]
else:
c = config.module_fqn_to_config.get("_default", None)
if c is not None:
# filter_fn: not filtering out any modules
quantize_(module, c, filter_fn=lambda x, fqn: True)
return
quantize_(module, self.quantization_config.get_apply_tensor_subclass())
|
Each nn.Linear layer that needs to be quantized is processed here.
First, we set the value the weight tensor, then we move it to the target device. Finally, we quantize the module.
|
create_quantized_param
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_torchao.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_torchao.py
|
Apache-2.0
|
def _process_model_after_weight_loading(self, model, **kwargs):
"""No process required for torchao quantized model"""
if self.quantization_config.quant_type == "autoquant":
from torchao import autoquant
from torchao.quantization import ALL_AUTOQUANT_CLASS_LIST
model = torch.compile(model, mode="max-autotune")
model = autoquant(
model,
qtensor_class_list=ALL_AUTOQUANT_CLASS_LIST,
set_inductor_config=False,
**self.quantization_config.quant_type_kwargs,
)
return model
return
|
No process required for torchao quantized model
|
_process_model_after_weight_loading
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_torchao.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_torchao.py
|
Apache-2.0
|
def get_cuda_warm_up_factor(self):
"""
This factor is used in caching_allocator_warmup to determine how many bytes to pre-allocate for CUDA warmup.
- A factor of 2 means we pre-allocate the full memory footprint of the model.
- A factor of 4 means we pre-allocate half of that, and so on
However, when using TorchAO, calculating memory usage with param.numel() * param.element_size() doesn't give the correct size for quantized weights (like int4 or int8)
That's because TorchAO internally represents quantized tensors using subtensors and metadata, and the reported element_size() still corresponds to the torch_dtype
not the actual bit-width of the quantized data.
To correct for this:
- Use a division factor of 8 for int4 weights
- Use a division factor of 4 for int8 weights
"""
if self.quantization_config._get_ao_version() > version.Version("0.9.0"):
from torchao.core.config import AOBaseConfig
quant_type = self.quantization_config.quant_type
# For autoquant case, it will be treated in the string implementation below in map_to_target_dtype
if isinstance(quant_type, AOBaseConfig):
# Extract size digit using fuzzy match on the class name
config_name = quant_type.__class__.__name__
size_digit = fuzzy_match_size(config_name)
if size_digit == "4":
return 8
else:
return 4
# Original mapping for non-AOBaseConfig types
map_to_target_dtype = {
"int4_weight_only": 8,
"int8_weight_only": 4,
"int8_dynamic_activation_int8_weight": 4,
"autoquant": 4,
}
return map_to_target_dtype[self.quantization_config.quant_type]
|
This factor is used in caching_allocator_warmup to determine how many bytes to pre-allocate for CUDA warmup.
- A factor of 2 means we pre-allocate the full memory footprint of the model.
- A factor of 4 means we pre-allocate half of that, and so on
However, when using TorchAO, calculating memory usage with param.numel() * param.element_size() doesn't give the correct size for quantized weights (like int4 or int8)
That's because TorchAO internally represents quantized tensors using subtensors and metadata, and the reported element_size() still corresponds to the torch_dtype
not the actual bit-width of the quantized data.
To correct for this:
- Use a division factor of 8 for int4 weights
- Use a division factor of 4 for int8 weights
|
get_cuda_warm_up_factor
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_torchao.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_torchao.py
|
Apache-2.0
|
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[List[str]] = None,
**kwargs,
):
"""
we don't have param like modules_to_not_convert to indicate which layers should not be quantized
because `quantization_config` include the layers that should be quantized
"""
from ..integrations import replace_with_vptq_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
replace_with_vptq_linear(
model,
quantization_config=self.quantization_config,
modules_to_not_convert=self.modules_to_not_convert,
)
model.config.quantization_config = self.quantization_config
|
we don't have param like modules_to_not_convert to indicate which layers should not be quantized
because `quantization_config` include the layers that should be quantized
|
_process_model_before_weight_loading
|
python
|
huggingface/transformers
|
src/transformers/quantizers/quantizer_vptq.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/quantizers/quantizer_vptq.py
|
Apache-2.0
|
def equalize_indent(docstring, indent_level):
"""
Adjust the indentation of a docstring to match the specified indent level.
"""
# fully dedent the docstring
docstring = "\n".join([line.lstrip() for line in docstring.splitlines()])
return textwrap.indent(docstring, " " * indent_level)
|
Adjust the indentation of a docstring to match the specified indent level.
|
equalize_indent
|
python
|
huggingface/transformers
|
src/transformers/utils/args_doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/args_doc.py
|
Apache-2.0
|
def parse_docstring(docstring, max_indent_level=0):
"""
Parse the docstring to extract the Args section and return it as a dictionary.
The docstring is expected to be in the format:
Args:
arg1 (type): Description of arg1.
arg2 (type): Description of arg2.
# This function will also return the remaining part of the docstring after the Args section.
Returns:/Example:
...
"""
match = re.search(r"(?m)^([ \t]*)(?=Example|Return)", docstring)
if match:
remainder_docstring = docstring[match.start() :]
docstring = docstring[: match.start()]
else:
remainder_docstring = ""
args_pattern = re.compile(r"(?:Args:)(\n.*)?(\n)?$", re.DOTALL)
args_match = args_pattern.search(docstring)
# still try to find args description in the docstring, if args are not preceded by "Args:"
args_section = args_match.group(1).lstrip("\n") if args_match else docstring
if args_section.split("\n")[-1].strip() == '"""':
args_section = "\n".join(args_section.split("\n")[:-1])
if args_section.split("\n")[0].strip() == 'r"""' or args_section.split("\n")[0].strip() == '"""':
args_section = "\n".join(args_section.split("\n")[1:])
args_section = set_min_indent(args_section, 0)
params = {}
if args_section:
param_pattern = re.compile(
# |--- Group 1 ---|| Group 2 ||- Group 3 -||---------- Group 4 ----------|
rf"^\s{{0,{max_indent_level}}}(\w+)\s*\(\s*([^, \)]*)(\s*.*?)\s*\)\s*:\s*((?:(?!\n^\s{{0,{max_indent_level}}}\w+\s*\().)*)",
re.DOTALL | re.MULTILINE,
)
for match in param_pattern.finditer(args_section):
param_name = match.group(1)
param_type = match.group(2)
# param_type = match.group(2).replace("`", "")
additional_info = match.group(3)
optional = "optional" in additional_info
shape = parse_shape(additional_info)
default = parse_default(additional_info)
param_description = match.group(4).strip()
# set first line of param_description to 4 spaces:
param_description = re.sub(r"^", " " * 4, param_description, 1)
param_description = f"\n{param_description}"
params[param_name] = {
"type": param_type,
"description": param_description,
"optional": optional,
"shape": shape,
"default": default,
"additional_info": additional_info,
}
if params and remainder_docstring:
remainder_docstring = "\n" + remainder_docstring
remainder_docstring = set_min_indent(remainder_docstring, 0)
return params, remainder_docstring
|
Parse the docstring to extract the Args section and return it as a dictionary.
The docstring is expected to be in the format:
Args:
arg1 (type): Description of arg1.
arg2 (type): Description of arg2.
# This function will also return the remaining part of the docstring after the Args section.
Returns:/Example:
...
|
parse_docstring
|
python
|
huggingface/transformers
|
src/transformers/utils/args_doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/args_doc.py
|
Apache-2.0
|
def contains_type(type_hint, target_type) -> Tuple[bool, Optional[object]]:
"""
Check if a "nested" type hint contains a specific target type,
return the first-level type containing the target_type if found.
"""
args = get_args(type_hint)
if args == ():
try:
return issubclass(type_hint, target_type), type_hint
except Exception as _:
return issubclass(type(type_hint), target_type), type_hint
found_type_tuple = [contains_type(arg, target_type)[0] for arg in args]
found_type = any(found_type_tuple)
if found_type:
type_hint = args[found_type_tuple.index(True)]
return found_type, type_hint
|
Check if a "nested" type hint contains a specific target type,
return the first-level type containing the target_type if found.
|
contains_type
|
python
|
huggingface/transformers
|
src/transformers/utils/args_doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/args_doc.py
|
Apache-2.0
|
def get_placeholders_dict(placeholders: List, model_name: str) -> dict:
"""
Get the dictionary of placeholders for the given model name.
"""
# import here to avoid circular import
from transformers.models import auto as auto_module
placeholders_dict = {}
for placeholder in placeholders:
# Infer placeholders from the model name and the auto modules
if placeholder in PLACEHOLDER_TO_AUTO_MODULE:
place_holder_value = getattr(
getattr(auto_module, PLACEHOLDER_TO_AUTO_MODULE[placeholder][0]),
PLACEHOLDER_TO_AUTO_MODULE[placeholder][1],
)[model_name]
if isinstance(place_holder_value, (list, tuple)):
place_holder_value = place_holder_value[0]
placeholders_dict[placeholder] = place_holder_value
return placeholders_dict
|
Get the dictionary of placeholders for the given model name.
|
get_placeholders_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/args_doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/args_doc.py
|
Apache-2.0
|
def format_args_docstring(args, model_name):
"""
Replaces placeholders such as {image_processor_class} in the docstring with the actual values,
deducted from the model name and the auto modules.
"""
# first check if there are any placeholders in the args, if not return them as is
placeholders = set(re.findall(r"{(.*?)}", "".join((args[arg]["description"] for arg in args))))
if not placeholders:
return args
# get the placeholders dictionary for the given model name
placeholders_dict = get_placeholders_dict(placeholders, model_name)
# replace the placeholders in the args with the values from the placeholders_dict
for arg in args:
new_arg = args[arg]["description"]
placeholders = re.findall(r"{(.*?)}", new_arg)
placeholders = [placeholder for placeholder in placeholders if placeholder in placeholders_dict]
if placeholders:
new_arg = new_arg.format(**{placeholder: placeholders_dict[placeholder] for placeholder in placeholders})
args[arg]["description"] = new_arg
return args
|
Replaces placeholders such as {image_processor_class} in the docstring with the actual values,
deducted from the model name and the auto modules.
|
format_args_docstring
|
python
|
huggingface/transformers
|
src/transformers/utils/args_doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/args_doc.py
|
Apache-2.0
|
def _get_parameter_info(param_name, documented_params, source_args_dict, param_type, optional):
"""
Get parameter documentation details from the appropriate source.
Tensor shape, optional status and description are taken from the custom docstring in priority if available.
Type is taken from the function signature first, then from the custom docstring if missing from the signature
Args:
param_name (`str`): Name of the parameter
documented_params (`dict`): Dictionary of documented parameters (manually specified in the docstring)
source_args_dict (`dict`): Default source args dictionary to use if not in documented_params
param_type (`str`): Current parameter type (may be updated)
optional (`bool`): Whether the parameter is optional (may be updated)
"""
description = None
shape = None
shape_string = ""
is_documented = True
additional_info = None
if param_name in documented_params:
# Parameter is documented in the function's docstring
if param_type == "" and documented_params[param_name].get("type", None) is not None:
param_type = documented_params[param_name]["type"]
optional = documented_params[param_name]["optional"]
shape = documented_params[param_name]["shape"]
shape_string = shape if shape else ""
additional_info = documented_params[param_name]["additional_info"] or ""
description = f"{documented_params[param_name]['description']}\n"
elif param_name in source_args_dict:
# Parameter is documented in ModelArgs or ImageProcessorArgs
shape = source_args_dict[param_name]["shape"]
shape_string = " " + shape if shape else ""
description = source_args_dict[param_name]["description"]
additional_info = None
else:
# Parameter is not documented
is_documented = False
optional_string = r", *optional*" if optional else ""
return param_type, optional_string, shape_string, additional_info, description, is_documented
|
Get parameter documentation details from the appropriate source.
Tensor shape, optional status and description are taken from the custom docstring in priority if available.
Type is taken from the function signature first, then from the custom docstring if missing from the signature
Args:
param_name (`str`): Name of the parameter
documented_params (`dict`): Dictionary of documented parameters (manually specified in the docstring)
source_args_dict (`dict`): Default source args dictionary to use if not in documented_params
param_type (`str`): Current parameter type (may be updated)
optional (`bool`): Whether the parameter is optional (may be updated)
|
_get_parameter_info
|
python
|
huggingface/transformers
|
src/transformers/utils/args_doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/args_doc.py
|
Apache-2.0
|
def _process_parameters_section(
func_documentation, sig, func, class_name, model_name_lowercase, parent_class, indent_level
):
"""
Process the parameters section of the docstring.
Args:
func_documentation (`str`): Existing function documentation (manually specified in the docstring)
sig (`inspect.Signature`): Function signature
func (`function`): Function the parameters belong to
class_name (`str`): Name of the class the function belongs to
model_name_lowercase (`str`): Lowercase model name
parent_class (`class`): Parent class of the function (if any)
indent_level (`int`): Indentation level
"""
# Start Args section
docstring = set_min_indent("Args:\n", indent_level + 4)
undocumented_parameters = []
documented_params = {}
documented_kwargs = {}
# Parse existing docstring if available
if func_documentation is not None:
documented_params, func_documentation = parse_docstring(func_documentation)
if model_name_lowercase is not None:
documented_params = format_args_docstring(documented_params, model_name_lowercase)
# Process regular parameters
param_docstring, missing_args = _process_regular_parameters(
sig, func, class_name, documented_params, indent_level, undocumented_parameters
)
docstring += param_docstring
# Process **kwargs parameters if needed
kwargs_docstring = _process_kwargs_parameters(
sig, func, parent_class, model_name_lowercase, documented_kwargs, indent_level, undocumented_parameters
)
docstring += kwargs_docstring
# Report undocumented parameters
if len(undocumented_parameters) > 0:
print("\n".join(undocumented_parameters))
return docstring
|
Process the parameters section of the docstring.
Args:
func_documentation (`str`): Existing function documentation (manually specified in the docstring)
sig (`inspect.Signature`): Function signature
func (`function`): Function the parameters belong to
class_name (`str`): Name of the class the function belongs to
model_name_lowercase (`str`): Lowercase model name
parent_class (`class`): Parent class of the function (if any)
indent_level (`int`): Indentation level
|
_process_parameters_section
|
python
|
huggingface/transformers
|
src/transformers/utils/args_doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/args_doc.py
|
Apache-2.0
|
def _process_returns_section(func_documentation, sig, config_class, indent_level):
"""
Process the returns section of the docstring.
Args:
func_documentation (`str`): Existing function documentation (manually specified in the docstring)
sig (`inspect.Signature`): Function signature
config_class (`str`): Config class for the model
indent_level (`int`): Indentation level
"""
return_docstring = ""
# Extract returns section from existing docstring if available
if (
func_documentation is not None
and (match_start := re.search(r"(?m)^([ \t]*)(?=Return)", func_documentation)) is not None
):
match_end = re.search(r"(?m)^([ \t]*)(?=Example)", func_documentation)
if match_end:
return_docstring = func_documentation[match_start.start() : match_end.start()]
func_documentation = func_documentation[match_end.start() :]
else:
return_docstring = func_documentation[match_start.start() :]
func_documentation = ""
return_docstring = set_min_indent(return_docstring, indent_level + 4)
# Otherwise, generate return docstring from return annotation if available
elif sig.return_annotation is not None and sig.return_annotation != inspect._empty:
add_intro, return_annotation = contains_type(sig.return_annotation, ModelOutput)
return_docstring = _prepare_output_docstrings(return_annotation, config_class, add_intro=add_intro)
return_docstring = return_docstring.replace("typing.", "")
return_docstring = set_min_indent(return_docstring, indent_level + 4)
return return_docstring, func_documentation
|
Process the returns section of the docstring.
Args:
func_documentation (`str`): Existing function documentation (manually specified in the docstring)
sig (`inspect.Signature`): Function signature
config_class (`str`): Config class for the model
indent_level (`int`): Indentation level
|
_process_returns_section
|
python
|
huggingface/transformers
|
src/transformers/utils/args_doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/args_doc.py
|
Apache-2.0
|
def auto_class_docstring(cls, custom_intro=None, custom_args=None, checkpoint=None):
"""
Wrapper that automatically generates a docstring for classes based on their attributes and methods.
"""
# import here to avoid circular import
from transformers.models import auto as auto_module
docstring_init = auto_method_docstring(cls.__init__, parent_class=cls, custom_args=custom_args).__doc__.replace(
"Args:", "Parameters:"
)
indent_level = get_indent_level(cls)
model_name_lowercase = get_model_name(cls)
model_name_title = " ".join([k.title() for k in model_name_lowercase.split("_")]) if model_name_lowercase else None
if model_name_lowercase and model_name_lowercase not in getattr(
getattr(auto_module, PLACEHOLDER_TO_AUTO_MODULE["config_class"][0]),
PLACEHOLDER_TO_AUTO_MODULE["config_class"][1],
):
model_name_lowercase = model_name_lowercase.replace("_", "-")
name = re.findall(rf"({'|'.join(ClassDocstring.__dict__.keys())})$", cls.__name__)
if name == [] and cls.__doc__ is None and custom_intro is None:
raise ValueError(
f"`{cls.__name__}` is not part of the auto doc. Here are the available classes: {ClassDocstring.__dict__.keys()}"
)
if name != [] or custom_intro is not None:
name = name[0] if name else None
if custom_intro is not None:
pre_block = equalize_indent(custom_intro, indent_level)
if not pre_block.endswith("\n"):
pre_block += "\n"
elif model_name_title is None:
pre_block = ""
else:
pre_block = getattr(ClassDocstring, name).format(model_name=model_name_title)
# Start building the docstring
docstring = set_min_indent(f"{pre_block}", indent_level) if len(pre_block) else ""
if name != "PreTrainedModel" and "PreTrainedModel" in (x.__name__ for x in cls.__mro__):
docstring += set_min_indent(f"{ClassDocstring.PreTrainedModel}", indent_level)
# Add the __init__ docstring
docstring += set_min_indent(f"\n{docstring_init}", indent_level)
attr_docs = ""
# Get all attributes and methods of the class
for attr_name, attr_value in cls.__dict__.items():
if not callable(attr_value) and not attr_name.startswith("__"):
if attr_value.__class__.__name__ == "property":
attr_type = "property"
else:
attr_type = type(attr_value).__name__
if name and "Config" in name:
raise ValueError("Config should have explicit docstring")
indented_doc = getattr(ClassAttrs, attr_name, None)
if indented_doc is not None:
attr_docs += set_min_indent(f"{attr_name} (`{attr_type}`): {indented_doc}", 0)
# TODO: Add support for Attributes section in docs
# if len(attr_docs.replace(" ", "")):
# docstring += set_min_indent("\nAttributes:\n", indent_level)
# docstring += set_min_indent(attr_docs, indent_level + 4)
else:
print(
f"You used `@auto_class_docstring` decorator on `{cls.__name__}` but this class is not part of the AutoMappings. Remove the decorator"
)
# Assign the dynamically generated docstring to the wrapper class
cls.__doc__ = docstring
return cls
|
Wrapper that automatically generates a docstring for classes based on their attributes and methods.
|
auto_class_docstring
|
python
|
huggingface/transformers
|
src/transformers/utils/args_doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/args_doc.py
|
Apache-2.0
|
def auto_docstring(obj=None, *, custom_intro=None, custom_args=None, checkpoint=None):
"""
Automatically generates docstrings for classes and methods in the Transformers library.
This decorator can be used in the following forms:
@auto_docstring
def my_function(...):
...
or
@auto_docstring()
def my_function(...):
...
or
@auto_docstring(custom_intro="Custom intro", ...)
def my_function(...):
...
Args:
custom_intro (str, optional): Custom introduction text to add to the docstring. This will replace the default
introduction text generated by the decorator before the Args section.
checkpoint (str, optional): Checkpoint name to use in the docstring. This should be automatically inferred from the
model configuration class, but can be overridden if needed.
"""
def auto_docstring_decorator(obj):
if len(obj.__qualname__.split(".")) > 1:
return auto_method_docstring(
obj, custom_args=custom_args, custom_intro=custom_intro, checkpoint=checkpoint
)
else:
return auto_class_docstring(obj, custom_args=custom_args, custom_intro=custom_intro, checkpoint=checkpoint)
if obj:
return auto_docstring_decorator(obj)
return auto_docstring_decorator
|
Automatically generates docstrings for classes and methods in the Transformers library.
This decorator can be used in the following forms:
@auto_docstring
def my_function(...):
...
or
@auto_docstring()
def my_function(...):
...
or
@auto_docstring(custom_intro="Custom intro", ...)
def my_function(...):
...
Args:
custom_intro (str, optional): Custom introduction text to add to the docstring. This will replace the default
introduction text generated by the decorator before the Args section.
checkpoint (str, optional): Checkpoint name to use in the docstring. This should be automatically inferred from the
model configuration class, but can be overridden if needed.
|
auto_docstring
|
python
|
huggingface/transformers
|
src/transformers/utils/args_doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/args_doc.py
|
Apache-2.0
|
def generate_attention_matrix_from_mask(
words, mask, img_token="<img>", sliding_window=None, token_type_ids=None, image_seq_length=None
):
"""
Generates an attention matrix from a given attention mask.
Optionally applies a sliding window mask (e.g., for Gemma2/3) and
marks regions where image tokens occur based on the specified `img_token`.
"""
mask = mask.int()
if mask.ndim == 3:
mask = mask[0, :, :]
if mask.ndim == 4:
mask = mask[0, 0, :, :]
n = len(words)
max_word_length = max(len(repr(word)) for word in words)
first_img_idx = 0
output = []
for i, k in enumerate(words):
if k == img_token and not first_img_idx:
first_img_idx = i
mask[i, i] = 2 # Mark yellow regions
if first_img_idx > 0 and (k != img_token or i == n - 1):
if i == n - 1:
i += 1
mask[first_img_idx:i, first_img_idx:i] = 2 # Mark yellow regions
first_img_idx = 0
# Generate sliding window mask (size = 4), excluding img_token
sliding_window_mask = None
if sliding_window is not None:
sliding_window_mask = [[1 if (0 <= i - j < sliding_window) else 0 for j in range(n)] for i in range(n)]
row_dummy = " ".join(
f"{YELLOW}{BLACK_SQUARE}{RESET}"
if mask[0, j]
else f"{GREEN}{BLACK_SQUARE}{RESET}"
if 0 == j
else BLACK_SQUARE
if mask[0, j]
else WHITE_SQUARE
for j in range(n)
)
if token_type_ids is not None:
is_special = token_type_ids == 1
token_type_buckets = torch.where(
(token_type_ids.cumsum(-1) % 5 + is_special).bool(), token_type_ids.cumsum(-1), 0
)
boundaries = torch.arange(0, image_seq_length + 1, image_seq_length)
token_type_buckets = torch.bucketize(token_type_buckets, boundaries=boundaries)
# Print headers
legend = f"{GREEN}{BLACK_SQUARE}{RESET}: i == j (diagonal) {YELLOW}{BLACK_SQUARE}{RESET}: token_type_ids"
output.append(" " + legend)
f_string = " " * (max_word_length + 5) + "Attention Matrix".ljust(len(row_dummy) // 2)
if sliding_window is not None:
f_string += "Sliding Window Mask"
output.append(f_string)
vertical_header = []
for idx, word in enumerate(words):
if mask[idx, idx] == 2:
vertical_header.append([f"{YELLOW}{k}{RESET}" for k in list(str(idx).rjust(len(str(n))))])
else:
vertical_header.append(list(str(idx).rjust(len(str(n)))))
vertical_header = list(map(list, zip(*vertical_header))) # Transpose
for row in vertical_header:
output.append(
(max_word_length + 5) * " " + " ".join(row) + " | " + " ".join(row)
if sliding_window is not None
else ""
)
for i, word in enumerate(words):
word_repr = repr(word).ljust(max_word_length)
colored_word = f"{YELLOW}{word_repr}{RESET}" if img_token in word else word_repr
row_display = " ".join(
f"{YELLOW}{BLACK_SQUARE}{RESET}"
if img_token in words[j] and mask[i, j] and img_token in words[i]
else f"{GREEN}{BLACK_SQUARE}{RESET}"
if i == j
else BLACK_SQUARE
if mask[i, j]
else WHITE_SQUARE
for j in range(n)
)
sliding_window_row = ""
if sliding_window is not None:
sliding_window_row = " ".join(
f"{YELLOW}{BLACK_SQUARE}{RESET}"
if img_token in words[j]
and img_token in words[i]
and token_type_buckets[0, i] == token_type_buckets[0, j]
else f"{GREEN}{BLACK_SQUARE}{RESET}"
if i == j
else BLACK_SQUARE
if sliding_window_mask[i][j]
else WHITE_SQUARE
for j in range(n)
)
output.append(f"{colored_word}: {str(i).rjust(2)} {row_display} | {sliding_window_row}")
return "\n".join(output)
|
Generates an attention matrix from a given attention mask.
Optionally applies a sliding window mask (e.g., for Gemma2/3) and
marks regions where image tokens occur based on the specified `img_token`.
|
generate_attention_matrix_from_mask
|
python
|
huggingface/transformers
|
src/transformers/utils/attention_visualizer.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/attention_visualizer.py
|
Apache-2.0
|
def verify_out_features_out_indices(
out_features: Optional[Iterable[str]], out_indices: Optional[Iterable[int]], stage_names: Optional[Iterable[str]]
):
"""
Verify that out_indices and out_features are valid for the given stage_names.
"""
if stage_names is None:
raise ValueError("Stage_names must be set for transformers backbones")
if out_features is not None:
if not isinstance(out_features, (list,)):
raise ValueError(f"out_features must be a list got {type(out_features)}")
if any(feat not in stage_names for feat in out_features):
raise ValueError(f"out_features must be a subset of stage_names: {stage_names} got {out_features}")
if len(out_features) != len(set(out_features)):
raise ValueError(f"out_features must not contain any duplicates, got {out_features}")
if out_features != (sorted_feats := [feat for feat in stage_names if feat in out_features]):
raise ValueError(
f"out_features must be in the same order as stage_names, expected {sorted_feats} got {out_features}"
)
if out_indices is not None:
if not isinstance(out_indices, list):
raise ValueError(f"out_indices must be a list, got {type(out_indices)}")
# Convert negative indices to their positive equivalent: [-1,] -> [len(stage_names) - 1,]
positive_indices = tuple(idx % len(stage_names) if idx < 0 else idx for idx in out_indices)
if any(idx for idx in positive_indices if idx not in range(len(stage_names))):
raise ValueError(f"out_indices must be valid indices for stage_names {stage_names}, got {out_indices}")
if len(positive_indices) != len(set(positive_indices)):
msg = f"out_indices must not contain any duplicates, got {out_indices}"
msg += f"(equivalent to {positive_indices}))" if positive_indices != out_indices else ""
raise ValueError(msg)
if positive_indices != tuple(sorted(positive_indices)):
sorted_negative = [idx for _, idx in sorted(zip(positive_indices, out_indices), key=lambda x: x[0])]
raise ValueError(
f"out_indices must be in the same order as stage_names, expected {sorted_negative} got {out_indices}"
)
if out_features is not None and out_indices is not None:
if len(out_features) != len(out_indices):
raise ValueError("out_features and out_indices should have the same length if both are set")
if out_features != [stage_names[idx] for idx in out_indices]:
raise ValueError("out_features and out_indices should correspond to the same stages if both are set")
|
Verify that out_indices and out_features are valid for the given stage_names.
|
verify_out_features_out_indices
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def _align_output_features_output_indices(
out_features: Optional[list[str]],
out_indices: Optional[Union[list[int], tuple[int]]],
stage_names: list[str],
):
"""
Finds the corresponding `out_features` and `out_indices` for the given `stage_names`.
The logic is as follows:
- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the
`out_indices`.
- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the
`out_features`.
- `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.
- `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned.
Args:
out_features (`List[str]`): The names of the features for the backbone to output.
out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.
stage_names (`List[str]`): The names of the stages of the backbone.
"""
if out_indices is None and out_features is None:
out_indices = [len(stage_names) - 1]
out_features = [stage_names[-1]]
elif out_indices is None and out_features is not None:
out_indices = [stage_names.index(layer) for layer in out_features]
elif out_features is None and out_indices is not None:
out_features = [stage_names[idx] for idx in out_indices]
return out_features, out_indices
|
Finds the corresponding `out_features` and `out_indices` for the given `stage_names`.
The logic is as follows:
- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the
`out_indices`.
- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the
`out_features`.
- `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.
- `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned.
Args:
out_features (`List[str]`): The names of the features for the backbone to output.
out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.
stage_names (`List[str]`): The names of the stages of the backbone.
|
_align_output_features_output_indices
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def get_aligned_output_features_output_indices(
out_features: Optional[list[str]],
out_indices: Optional[Union[list[int], tuple[int]]],
stage_names: list[str],
) -> tuple[list[str], list[int]]:
"""
Get the `out_features` and `out_indices` so that they are aligned.
The logic is as follows:
- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the
`out_indices`.
- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the
`out_features`.
- `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.
- `out_indices` and `out_features` set: they are verified to be aligned.
Args:
out_features (`List[str]`): The names of the features for the backbone to output.
out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.
stage_names (`List[str]`): The names of the stages of the backbone.
"""
out_indices = list(out_indices) if out_indices is not None else None
# First verify that the out_features and out_indices are valid
verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names)
output_features, output_indices = _align_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=stage_names
)
# Verify that the aligned out_features and out_indices are valid
verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names)
return output_features, output_indices
|
Get the `out_features` and `out_indices` so that they are aligned.
The logic is as follows:
- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the
`out_indices`.
- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the
`out_features`.
- `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.
- `out_indices` and `out_features` set: they are verified to be aligned.
Args:
out_features (`List[str]`): The names of the features for the backbone to output.
out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.
stage_names (`List[str]`): The names of the stages of the backbone.
|
get_aligned_output_features_output_indices
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def _init_timm_backbone(self, config) -> None:
"""
Initialize the backbone model from timm The backbone must already be loaded to self._backbone
"""
if getattr(self, "_backbone", None) is None:
raise ValueError("self._backbone must be set before calling _init_timm_backbone")
# These will diagree with the defaults for the transformers models e.g. for resnet50
# the transformer model has out_features = ['stem', 'stage1', 'stage2', 'stage3', 'stage4']
# the timm model has out_features = ['act', 'layer1', 'layer2', 'layer3', 'layer4']
self.stage_names = [stage["module"] for stage in self._backbone.feature_info.info]
self.num_features = [stage["num_chs"] for stage in self._backbone.feature_info.info]
# In some timm versions, out_indices reflects the input type of out_indices on the `create_model` call,
# in later versions >= 1, it is always a tuple
out_indices = list(self._backbone.feature_info.out_indices)
out_features = self._backbone.feature_info.module_name()
# We verify the out indices and out features are valid
verify_out_features_out_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
self._out_features, self._out_indices = out_features, out_indices
|
Initialize the backbone model from timm The backbone must already be loaded to self._backbone
|
_init_timm_backbone
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def _init_backbone(self, config) -> None:
"""
Method to initialize the backbone. This method is called by the constructor of the base class after the
pretrained model weights have been loaded.
"""
self.config = config
self.use_timm_backbone = getattr(config, "use_timm_backbone", False)
self.backbone_type = BackboneType.TIMM if self.use_timm_backbone else BackboneType.TRANSFORMERS
if self.backbone_type == BackboneType.TIMM:
self._init_timm_backbone(config)
elif self.backbone_type == BackboneType.TRANSFORMERS:
self._init_transformers_backbone(config)
else:
raise ValueError(f"backbone_type {self.backbone_type} not supported.")
|
Method to initialize the backbone. This method is called by the constructor of the base class after the
pretrained model weights have been loaded.
|
_init_backbone
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def out_features(self, out_features: list[str]):
"""
Set the out_features attribute. This will also update the out_indices attribute to match the new out_features.
"""
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=None, stage_names=self.stage_names
)
|
Set the out_features attribute. This will also update the out_indices attribute to match the new out_features.
|
out_features
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def out_indices(self, out_indices: Union[tuple[int], list[int]]):
"""
Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices.
"""
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=None, out_indices=out_indices, stage_names=self.stage_names
)
|
Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices.
|
out_indices
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to
include the `out_features` and `out_indices` attributes.
"""
output = super().to_dict()
output["out_features"] = output.pop("_out_features")
output["out_indices"] = output.pop("_out_indices")
return output
|
Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to
include the `out_features` and `out_indices` attributes.
|
to_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def out_features(self, out_features: list[str]):
"""
Set the out_features attribute. This will also update the out_indices attribute to match the new out_features.
"""
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=None, stage_names=self.stage_names
)
|
Set the out_features attribute. This will also update the out_indices attribute to match the new out_features.
|
out_features
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def out_indices(self, out_indices: Union[tuple[int], list[int]]):
"""
Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices.
"""
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=None, out_indices=out_indices, stage_names=self.stage_names
)
|
Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices.
|
out_indices
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to
include the `out_features` and `out_indices` attributes.
"""
output = super().to_dict()
output["out_features"] = output.pop("_out_features")
output["out_indices"] = output.pop("_out_indices")
return output
|
Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to
include the `out_features` and `out_indices` attributes.
|
to_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def load_backbone(config):
"""
Loads the backbone model from a config object.
If the config is from the backbone model itself, then we return a backbone model with randomly initialized
weights.
If the config is from the parent model of the backbone model itself, then we load the pretrained backbone weights
if specified.
"""
from transformers import AutoBackbone, AutoConfig
backbone_config = getattr(config, "backbone_config", None)
use_timm_backbone = getattr(config, "use_timm_backbone", None)
use_pretrained_backbone = getattr(config, "use_pretrained_backbone", None)
backbone_checkpoint = getattr(config, "backbone", None)
backbone_kwargs = getattr(config, "backbone_kwargs", None)
backbone_kwargs = {} if backbone_kwargs is None else backbone_kwargs
if backbone_kwargs and backbone_config is not None:
raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
# If there is a backbone_config and a backbone checkpoint, and use_pretrained_backbone=False then the desired
# behaviour is ill-defined: do you want to load from the checkpoint's config or the backbone_config?
if backbone_config is not None and backbone_checkpoint is not None and use_pretrained_backbone is not None:
raise ValueError("Cannot specify both config.backbone_config and config.backbone")
# If any of thhe following are set, then the config passed in is from a model which contains a backbone.
if (
backbone_config is None
and use_timm_backbone is None
and backbone_checkpoint is None
and backbone_checkpoint is None
):
return AutoBackbone.from_config(config=config, **backbone_kwargs)
# config from the parent model that has a backbone
if use_timm_backbone:
if backbone_checkpoint is None:
raise ValueError("config.backbone must be set if use_timm_backbone is True")
# Because of how timm backbones were originally added to models, we need to pass in use_pretrained_backbone
# to determine whether to load the pretrained weights.
backbone = AutoBackbone.from_pretrained(
backbone_checkpoint,
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
**backbone_kwargs,
)
elif use_pretrained_backbone:
if backbone_checkpoint is None:
raise ValueError("config.backbone must be set if use_pretrained_backbone is True")
backbone = AutoBackbone.from_pretrained(backbone_checkpoint, **backbone_kwargs)
else:
if backbone_config is None and backbone_checkpoint is None:
raise ValueError("Either config.backbone_config or config.backbone must be set")
if backbone_config is None:
backbone_config = AutoConfig.from_pretrained(backbone_checkpoint, **backbone_kwargs)
backbone = AutoBackbone.from_config(config=backbone_config)
return backbone
|
Loads the backbone model from a config object.
If the config is from the backbone model itself, then we return a backbone model with randomly initialized
weights.
If the config is from the parent model of the backbone model itself, then we load the pretrained backbone weights
if specified.
|
load_backbone
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def verify_backbone_config_arguments(
use_timm_backbone: bool,
use_pretrained_backbone: bool,
backbone: Optional[str],
backbone_config: Optional[Union[dict, "PretrainedConfig"]],
backbone_kwargs: Optional[dict],
):
"""
Verify that the config arguments to be passed to load_backbone are valid
"""
if backbone_config is not None and backbone is not None:
raise ValueError("You can't specify both `backbone` and `backbone_config`.")
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
|
Verify that the config arguments to be passed to load_backbone are valid
|
verify_backbone_config_arguments
|
python
|
huggingface/transformers
|
src/transformers/utils/backbone_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/backbone_utils.py
|
Apache-2.0
|
def parse_google_format_docstring(docstring: str) -> tuple[Optional[str], Optional[dict], Optional[str]]:
"""
Parses a Google-style docstring to extract the function description,
argument descriptions, and return description.
Args:
docstring (str): The docstring to parse.
Returns:
The function description, arguments, and return description.
"""
# Extract the sections
description_match = description_re.search(docstring)
args_match = args_re.search(docstring)
returns_match = returns_re.search(docstring)
# Clean and store the sections
description = description_match.group(1).strip() if description_match else None
docstring_args = args_match.group(1).strip() if args_match else None
returns = returns_match.group(1).strip() if returns_match else None
# Parsing the arguments into a dictionary
if docstring_args is not None:
docstring_args = "\n".join([line for line in docstring_args.split("\n") if line.strip()]) # Remove blank lines
matches = args_split_re.findall(docstring_args)
args_dict = {match[0]: re.sub(r"\s*\n+\s*", " ", match[1].strip()) for match in matches}
else:
args_dict = {}
return description, args_dict, returns
|
Parses a Google-style docstring to extract the function description,
argument descriptions, and return description.
Args:
docstring (str): The docstring to parse.
Returns:
The function description, arguments, and return description.
|
parse_google_format_docstring
|
python
|
huggingface/transformers
|
src/transformers/utils/chat_template_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/chat_template_utils.py
|
Apache-2.0
|
def get_json_schema(func: Callable) -> dict:
"""
This function generates a JSON schema for a given function, based on its docstring and type hints. This is
mostly used for passing lists of tools to a chat template. The JSON schema contains the name and description of
the function, as well as the names, types and descriptions for each of its arguments. `get_json_schema()` requires
that the function has a docstring, and that each argument has a description in the docstring, in the standard
Google docstring format shown below. It also requires that all the function arguments have a valid Python type hint.
Although it is not required, a `Returns` block can also be added, which will be included in the schema. This is
optional because most chat templates ignore the return value of the function.
Args:
func: The function to generate a JSON schema for.
Returns:
A dictionary containing the JSON schema for the function.
Examples:
```python
>>> def multiply(x: float, y: float):
>>> '''
>>> A function that multiplies two numbers
>>>
>>> Args:
>>> x: The first number to multiply
>>> y: The second number to multiply
>>> '''
>>> return x * y
>>>
>>> print(get_json_schema(multiply))
{
"name": "multiply",
"description": "A function that multiplies two numbers",
"parameters": {
"type": "object",
"properties": {
"x": {"type": "number", "description": "The first number to multiply"},
"y": {"type": "number", "description": "The second number to multiply"}
},
"required": ["x", "y"]
}
}
```
The general use for these schemas is that they are used to generate tool descriptions for chat templates that
support them, like so:
```python
>>> from transformers import AutoTokenizer
>>> from transformers.utils import get_json_schema
>>>
>>> def multiply(x: float, y: float):
>>> '''
>>> A function that multiplies two numbers
>>>
>>> Args:
>>> x: The first number to multiply
>>> y: The second number to multiply
>>> return x * y
>>> '''
>>>
>>> multiply_schema = get_json_schema(multiply)
>>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
>>> messages = [{"role": "user", "content": "What is 179 x 4571?"}]
>>> formatted_chat = tokenizer.apply_chat_template(
>>> messages,
>>> tools=[multiply_schema],
>>> chat_template="tool_use",
>>> return_dict=True,
>>> return_tensors="pt",
>>> add_generation_prompt=True
>>> )
>>> # The formatted chat can now be passed to model.generate()
```
Each argument description can also have an optional `(choices: ...)` block at the end, such as
`(choices: ["tea", "coffee"])`, which will be parsed into an `enum` field in the schema. Note that this will
only be parsed correctly if it is at the end of the line:
```python
>>> def drink_beverage(beverage: str):
>>> '''
>>> A function that drinks a beverage
>>>
>>> Args:
>>> beverage: The beverage to drink (choices: ["tea", "coffee"])
>>> '''
>>> pass
>>>
>>> print(get_json_schema(drink_beverage))
```
{
'name': 'drink_beverage',
'description': 'A function that drinks a beverage',
'parameters': {
'type': 'object',
'properties': {
'beverage': {
'type': 'string',
'enum': ['tea', 'coffee'],
'description': 'The beverage to drink'
}
},
'required': ['beverage']
}
}
"""
doc = inspect.getdoc(func)
if not doc:
raise DocstringParsingException(
f"Cannot generate JSON schema for {func.__name__} because it has no docstring!"
)
doc = doc.strip()
main_doc, param_descriptions, return_doc = parse_google_format_docstring(doc)
json_schema = _convert_type_hints_to_json_schema(func)
if (return_dict := json_schema["properties"].pop("return", None)) is not None:
if return_doc is not None: # We allow a missing return docstring since most templates ignore it
return_dict["description"] = return_doc
for arg, schema in json_schema["properties"].items():
if arg not in param_descriptions:
raise DocstringParsingException(
f"Cannot generate JSON schema for {func.__name__} because the docstring has no description for the argument '{arg}'"
)
desc = param_descriptions[arg]
enum_choices = re.search(r"\(choices:\s*(.*?)\)\s*$", desc, flags=re.IGNORECASE)
if enum_choices:
schema["enum"] = [c.strip() for c in json.loads(enum_choices.group(1))]
desc = enum_choices.string[: enum_choices.start()].strip()
schema["description"] = desc
output = {"name": func.__name__, "description": main_doc, "parameters": json_schema}
if return_dict is not None:
output["return"] = return_dict
return {"type": "function", "function": output}
|
This function generates a JSON schema for a given function, based on its docstring and type hints. This is
mostly used for passing lists of tools to a chat template. The JSON schema contains the name and description of
the function, as well as the names, types and descriptions for each of its arguments. `get_json_schema()` requires
that the function has a docstring, and that each argument has a description in the docstring, in the standard
Google docstring format shown below. It also requires that all the function arguments have a valid Python type hint.
Although it is not required, a `Returns` block can also be added, which will be included in the schema. This is
optional because most chat templates ignore the return value of the function.
Args:
func: The function to generate a JSON schema for.
Returns:
A dictionary containing the JSON schema for the function.
Examples:
```python
>>> def multiply(x: float, y: float):
>>> '''
>>> A function that multiplies two numbers
>>>
>>> Args:
>>> x: The first number to multiply
>>> y: The second number to multiply
>>> '''
>>> return x * y
>>>
>>> print(get_json_schema(multiply))
{
"name": "multiply",
"description": "A function that multiplies two numbers",
"parameters": {
"type": "object",
"properties": {
"x": {"type": "number", "description": "The first number to multiply"},
"y": {"type": "number", "description": "The second number to multiply"}
},
"required": ["x", "y"]
}
}
```
The general use for these schemas is that they are used to generate tool descriptions for chat templates that
support them, like so:
```python
>>> from transformers import AutoTokenizer
>>> from transformers.utils import get_json_schema
>>>
>>> def multiply(x: float, y: float):
>>> '''
>>> A function that multiplies two numbers
>>>
>>> Args:
>>> x: The first number to multiply
>>> y: The second number to multiply
>>> return x * y
>>> '''
>>>
>>> multiply_schema = get_json_schema(multiply)
>>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
>>> messages = [{"role": "user", "content": "What is 179 x 4571?"}]
>>> formatted_chat = tokenizer.apply_chat_template(
>>> messages,
>>> tools=[multiply_schema],
>>> chat_template="tool_use",
>>> return_dict=True,
>>> return_tensors="pt",
>>> add_generation_prompt=True
>>> )
>>> # The formatted chat can now be passed to model.generate()
```
Each argument description can also have an optional `(choices: ...)` block at the end, such as
`(choices: ["tea", "coffee"])`, which will be parsed into an `enum` field in the schema. Note that this will
only be parsed correctly if it is at the end of the line:
```python
>>> def drink_beverage(beverage: str):
>>> '''
>>> A function that drinks a beverage
>>>
>>> Args:
>>> beverage: The beverage to drink (choices: ["tea", "coffee"])
>>> '''
>>> pass
>>>
>>> print(get_json_schema(drink_beverage))
```
{
'name': 'drink_beverage',
'description': 'A function that drinks a beverage',
'parameters': {
'type': 'object',
'properties': {
'beverage': {
'type': 'string',
'enum': ['tea', 'coffee'],
'description': 'The beverage to drink'
}
},
'required': ['beverage']
}
}
|
get_json_schema
|
python
|
huggingface/transformers
|
src/transformers/utils/chat_template_utils.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/chat_template_utils.py
|
Apache-2.0
|
def deprecate_kwarg(
old_name: str,
version: str,
new_name: Optional[str] = None,
warn_if_greater_or_equal_version: bool = False,
raise_if_greater_or_equal_version: bool = False,
raise_if_both_names: bool = False,
additional_message: Optional[str] = None,
):
"""
Function or method decorator to notify users about deprecated keyword arguments, replacing them with a new name if specified.
Note that is decorator is `torch.compile`-safe, i.e. it will not cause graph breaks (but no warning will be displayed if compiling).
This decorator allows you to:
- Notify users when a keyword argument is deprecated.
- Automatically replace deprecated keyword arguments with new ones.
- Raise an error if deprecated arguments are used, depending on the specified conditions.
By default, the decorator notifies the user about the deprecated argument while the `transformers.__version__` < specified `version`
in the decorator. To keep notifications with any version `warn_if_greater_or_equal_version=True` can be set.
Parameters:
old_name (`str`):
Name of the deprecated keyword argument.
version (`str`):
The version in which the keyword argument was (or will be) deprecated.
new_name (`Optional[str]`, *optional*):
The new name for the deprecated keyword argument. If specified, the deprecated keyword argument will be replaced with this new name.
warn_if_greater_or_equal_version (`bool`, *optional*, defaults to `False`):
Whether to show warning if current `transformers` version is greater or equal to the deprecated version.
raise_if_greater_or_equal_version (`bool`, *optional*, defaults to `False`):
Whether to raise `ValueError` if current `transformers` version is greater or equal to the deprecated version.
raise_if_both_names (`bool`, *optional*, defaults to `False`):
Whether to raise `ValueError` if both deprecated and new keyword arguments are set.
additional_message (`Optional[str]`, *optional*):
An additional message to append to the default deprecation message.
Raises:
ValueError:
If raise_if_greater_or_equal_version is True and the current version is greater than or equal to the deprecated version, or if raise_if_both_names is True and both old and new keyword arguments are provided.
Returns:
Callable:
A wrapped function that handles the deprecated keyword arguments according to the specified parameters.
Example usage with renaming argument:
```python
@deprecate_kwarg("reduce_labels", new_name="do_reduce_labels", version="6.0.0")
def my_function(do_reduce_labels):
print(do_reduce_labels)
my_function(reduce_labels=True) # Will show a deprecation warning and use do_reduce_labels=True
```
Example usage without renaming argument:
```python
@deprecate_kwarg("max_size", version="6.0.0")
def my_function(max_size):
print(max_size)
my_function(max_size=1333) # Will show a deprecation warning
```
"""
deprecated_version = packaging.version.parse(version)
current_version = packaging.version.parse(__version__)
is_greater_or_equal_version = current_version >= deprecated_version
if is_greater_or_equal_version:
version_message = f"and removed starting from version {version}"
else:
version_message = f"and will be removed in version {version}"
def wrapper(func):
# Required for better warning message
sig = inspect.signature(func)
function_named_args = set(sig.parameters.keys())
is_instance_method = "self" in function_named_args
is_class_method = "cls" in function_named_args
@wraps(func)
def wrapped_func(*args, **kwargs):
# Get class + function name (just for better warning message)
func_name = func.__name__
if is_instance_method:
func_name = f"{args[0].__class__.__name__}.{func_name}"
elif is_class_method:
func_name = f"{args[0].__name__}.{func_name}"
minimum_action = Action.NONE
message = None
# deprecated kwarg and its new version are set for function call -> replace it with new name
if old_name in kwargs and new_name in kwargs:
minimum_action = Action.RAISE if raise_if_both_names else Action.NOTIFY_ALWAYS
message = f"Both `{old_name}` and `{new_name}` are set for `{func_name}`. Using `{new_name}={kwargs[new_name]}` and ignoring deprecated `{old_name}={kwargs[old_name]}`."
kwargs.pop(old_name)
# only deprecated kwarg is set for function call -> replace it with new name
elif old_name in kwargs and new_name is not None and new_name not in kwargs:
minimum_action = Action.NOTIFY
message = f"`{old_name}` is deprecated {version_message} for `{func_name}`. Use `{new_name}` instead."
kwargs[new_name] = kwargs.pop(old_name)
# deprecated kwarg is not set for function call and new name is not specified -> just notify
elif old_name in kwargs:
minimum_action = Action.NOTIFY
message = f"`{old_name}` is deprecated {version_message} for `{func_name}`."
if message is not None and additional_message is not None:
message = f"{message} {additional_message}"
# update minimum_action if argument is ALREADY deprecated (current version >= deprecated version)
if is_greater_or_equal_version:
# change to (NOTIFY, NOTIFY_ALWAYS) -> RAISE if specified
# in case we want to raise error for already deprecated arguments
if raise_if_greater_or_equal_version and minimum_action != Action.NONE:
minimum_action = Action.RAISE
# change to NOTIFY -> NONE if specified (NOTIFY_ALWAYS can't be changed to NONE)
# in case we want to ignore notifications for already deprecated arguments
elif not warn_if_greater_or_equal_version and minimum_action == Action.NOTIFY:
minimum_action = Action.NONE
# raise error or notify user
if minimum_action == Action.RAISE:
raise ValueError(message)
# If we are compiling, we do not raise the warning as it would break compilation
elif minimum_action in (Action.NOTIFY, Action.NOTIFY_ALWAYS) and not is_torchdynamo_compiling():
# DeprecationWarning is ignored by default, so we use FutureWarning instead
warnings.warn(message, FutureWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapped_func
return wrapper
|
Function or method decorator to notify users about deprecated keyword arguments, replacing them with a new name if specified.
Note that is decorator is `torch.compile`-safe, i.e. it will not cause graph breaks (but no warning will be displayed if compiling).
This decorator allows you to:
- Notify users when a keyword argument is deprecated.
- Automatically replace deprecated keyword arguments with new ones.
- Raise an error if deprecated arguments are used, depending on the specified conditions.
By default, the decorator notifies the user about the deprecated argument while the `transformers.__version__` < specified `version`
in the decorator. To keep notifications with any version `warn_if_greater_or_equal_version=True` can be set.
Parameters:
old_name (`str`):
Name of the deprecated keyword argument.
version (`str`):
The version in which the keyword argument was (or will be) deprecated.
new_name (`Optional[str]`, *optional*):
The new name for the deprecated keyword argument. If specified, the deprecated keyword argument will be replaced with this new name.
warn_if_greater_or_equal_version (`bool`, *optional*, defaults to `False`):
Whether to show warning if current `transformers` version is greater or equal to the deprecated version.
raise_if_greater_or_equal_version (`bool`, *optional*, defaults to `False`):
Whether to raise `ValueError` if current `transformers` version is greater or equal to the deprecated version.
raise_if_both_names (`bool`, *optional*, defaults to `False`):
Whether to raise `ValueError` if both deprecated and new keyword arguments are set.
additional_message (`Optional[str]`, *optional*):
An additional message to append to the default deprecation message.
Raises:
ValueError:
If raise_if_greater_or_equal_version is True and the current version is greater than or equal to the deprecated version, or if raise_if_both_names is True and both old and new keyword arguments are provided.
Returns:
Callable:
A wrapped function that handles the deprecated keyword arguments according to the specified parameters.
Example usage with renaming argument:
```python
@deprecate_kwarg("reduce_labels", new_name="do_reduce_labels", version="6.0.0")
def my_function(do_reduce_labels):
print(do_reduce_labels)
my_function(reduce_labels=True) # Will show a deprecation warning and use do_reduce_labels=True
```
Example usage without renaming argument:
```python
@deprecate_kwarg("max_size", version="6.0.0")
def my_function(max_size):
print(max_size)
my_function(max_size=1333) # Will show a deprecation warning
```
|
deprecate_kwarg
|
python
|
huggingface/transformers
|
src/transformers/utils/deprecation.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/deprecation.py
|
Apache-2.0
|
def get_docstring_indentation_level(func):
"""Return the indentation level of the start of the docstring of a class or function (or method)."""
# We assume classes are always defined in the global scope
if inspect.isclass(func):
return 4
source = inspect.getsource(func)
first_line = source.splitlines()[0]
function_def_level = len(first_line) - len(first_line.lstrip())
return 4 + function_def_level
|
Return the indentation level of the start of the docstring of a class or function (or method).
|
get_docstring_indentation_level
|
python
|
huggingface/transformers
|
src/transformers/utils/doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/doc.py
|
Apache-2.0
|
def _get_indent(t):
"""Returns the indentation in the first line of t"""
search = re.search(r"^(\s*)\S", t)
return "" if search is None else search.groups()[0]
|
Returns the indentation in the first line of t
|
_get_indent
|
python
|
huggingface/transformers
|
src/transformers/utils/doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/doc.py
|
Apache-2.0
|
def _prepare_output_docstrings(output_type, config_class, min_indent=None, add_intro=True):
"""
Prepares the return part of the docstring using `output_type`.
"""
output_docstring = output_type.__doc__
params_docstring = None
if output_docstring is not None:
# Remove the head of the docstring to keep the list of args only
lines = output_docstring.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None:
i += 1
if i < len(lines):
params_docstring = "\n".join(lines[(i + 1) :])
params_docstring = _convert_output_args_doc(params_docstring)
elif add_intro:
raise ValueError(
f"No `Args` or `Parameters` section is found in the docstring of `{output_type.__name__}`. Make sure it has "
"docstring and contain either `Args` or `Parameters`."
)
# Add the return introduction
if add_intro:
full_output_type = f"{output_type.__module__}.{output_type.__name__}"
intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION
intro = intro.format(full_output_type=full_output_type, config_class=config_class)
else:
full_output_type = str(output_type)
intro = f"\nReturns:\n `{full_output_type}`"
if params_docstring is not None:
intro += ":\n"
result = intro
if params_docstring is not None:
result += params_docstring
# Apply minimum indent if necessary
if min_indent is not None:
lines = result.split("\n")
# Find the indent of the first nonempty line
i = 0
while len(lines[i]) == 0:
i += 1
indent = len(_get_indent(lines[i]))
# If too small, add indentation to all nonempty lines
if indent < min_indent:
to_add = " " * (min_indent - indent)
lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines]
result = "\n".join(lines)
return result
|
Prepares the return part of the docstring using `output_type`.
|
_prepare_output_docstrings
|
python
|
huggingface/transformers
|
src/transformers/utils/doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/doc.py
|
Apache-2.0
|
def filter_outputs_from_example(docstring, **kwargs):
"""
Removes the lines testing an output with the doctest syntax in a code sample when it's set to `None`.
"""
for key, value in kwargs.items():
if value is not None:
continue
doc_key = "{" + key + "}"
docstring = re.sub(rf"\n([^\n]+)\n\s+{doc_key}\n", "\n", docstring)
return docstring
|
Removes the lines testing an output with the doctest syntax in a code sample when it's set to `None`.
|
filter_outputs_from_example
|
python
|
huggingface/transformers
|
src/transformers/utils/doc.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/doc.py
|
Apache-2.0
|
def gen_constructor_wrapper(target: Callable) -> tuple[Callable, Callable]:
"""
Wraps `target` to be proxyable. Used for tensor creators like `torch.ones`, `torch.arange` and so on.
"""
wrapper = create_wrapper(target, "call_function")
return wrapper, target
|
Wraps `target` to be proxyable. Used for tensor creators like `torch.ones`, `torch.arange` and so on.
|
gen_constructor_wrapper
|
python
|
huggingface/transformers
|
src/transformers/utils/fx.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/fx.py
|
Apache-2.0
|
def _proxies_to_metas(v):
"""Returns the underlying metadata for HFProxies, and behaves like the identity for the others."""
if isinstance(v, MetaDeviceAttribute):
return "meta"
if isinstance(v, torch.fx.Proxy):
if not (isinstance(v, HFProxy) and hasattr(v, "_metadata")):
raise RuntimeError(f"No metadata was found for {v}")
return v._metadata
return v
|
Returns the underlying metadata for HFProxies, and behaves like the identity for the others.
|
_proxies_to_metas
|
python
|
huggingface/transformers
|
src/transformers/utils/fx.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/fx.py
|
Apache-2.0
|
def _generate_dummy_input(
self, model: "PreTrainedModel", input_name: str, shape: list[int], input_names: list[str]
) -> dict[str, torch.Tensor]:
"""Generates dummy input for model inference recording."""
# Retrieving the model class, either from the "class_for_deserialization" attribute if the model was restored
# from pickle, or from the "__class__" attribute in the general case.
model_class_name = getattr(model, "class_for_deserialization", model.__class__).__name__
device = model.device
inputs_dict = {}
# when tracing a model with KV cache, we simply need to unsure that the KV cache length is larger than one to
# rightfully pass certain controlflows (Example: https://github.com/huggingface/transformers/blob/5c8d941d66734811d2ef6f57f15b44f7fb7a98c4/src/transformers/modeling_attn_mask_utils.py#L162).
# After tracing, the model can then still be used with arbitrary lengths different than the one used during tracing.
kv_cache_length = 5
if input_name in ["labels", "start_positions", "end_positions"]:
batch_size = shape[0]
if model_class_name in [
*get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES),
*get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES),
*get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES),
*get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES),
*get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES),
]:
inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device)
elif model_class_name in [
*get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES),
*get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES),
"XLNetForQuestionAnswering",
]:
inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device)
inputs_dict["end_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device)
elif model_class_name in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES):
if not hasattr(model.config, "problem_type") or model.config.problem_type is None:
raise ValueError(
"Could not retrieve the problem type for the sequence classification task, please set "
'model.config.problem_type to one of the following values: "regression", '
'"single_label_classification", or "multi_label_classification".'
)
if model.config.problem_type == "regression":
labels_shape = (batch_size, model.config.num_labels)
labels_dtype = torch.float32
elif model.config.problem_type == "single_label_classification":
labels_shape = (batch_size,)
labels_dtype = torch.long
elif model.config.problem_type == "multi_label_classification":
labels_shape = (batch_size, model.config.num_labels)
labels_dtype = torch.float32
else:
raise ValueError(
'Expected model.config.problem_type to be either: "regression", "single_label_classification"'
f', or "multi_label_classification", but "{model.config.problem_type}" was provided.'
)
inputs_dict["labels"] = torch.zeros(*labels_shape, dtype=labels_dtype, device=device)
elif model_class_name in [
*get_values(MODEL_FOR_PRETRAINING_MAPPING_NAMES),
*get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES),
*get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES),
*get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES),
*get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES),
*get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES),
"GPT2DoubleHeadsModel",
"PeftModelForCausalLM",
"PeftModelForSeq2SeqLM",
]:
inputs_dict["labels"] = torch.zeros(shape, dtype=torch.long, device=device)
elif model_class_name in [*get_values(MODEL_FOR_CTC_MAPPING_NAMES)]:
inputs_dict["labels"] = torch.zeros(shape, dtype=torch.float32, device=device)
else:
raise NotImplementedError(
f"Generating the dummy input named {input_name} for {model_class_name} is not supported yet."
)
elif "pixel_values" in input_name:
batch_size = shape[0]
image_size = getattr(model.config, "image_size", None)
if image_size is None:
if hasattr(model.config, "vision_config"):
image_size = model.config.vision_config.image_size
elif hasattr(model.config, "encoder"):
image_size = model.config.encoder.image_size
else:
image_size = (_generate_random_int(), _generate_random_int())
# If no num_channels is in the config, use some arbitrary value.
num_channels = getattr(model.config, "num_channels", 3)
if not isinstance(image_size, collections.abc.Iterable):
image_size = (image_size, image_size)
height, width = image_size
inputs_dict[input_name] = torch.zeros(
batch_size, num_channels, height, width, dtype=torch.float32, device=device
)
elif "bbox" in input_name:
inputs_dict[input_name] = torch.zeros(*shape, 4, dtype=torch.float, device=device)
elif "input_features" in input_name:
inputs_dict[input_name] = torch.zeros(
*shape, model.config.input_feat_per_channel, dtype=torch.float, device=device
)
elif "inputs_embeds" in input_name:
batch_size = shape[0]
if (
getattr(model.config, "embedding_size", None) is not None
and model.config.model_type != "megatron-bert"
):
embedding_size = model.config.embedding_size
else:
embedding_size = model.config.hidden_size
if len(shape) == 3:
# (batch_size, num_choices, sequence_length, embedding_size)
embedding_shape = (batch_size, shape[1], shape[2], embedding_size)
else:
# (batch_size, sequence_length, embedding_size)
embedding_shape = (batch_size, shape[1], embedding_size)
inputs_dict[input_name] = torch.zeros(embedding_shape, dtype=torch.float, device=device)
elif "visual_feats" in input_name:
inputs_dict[input_name] = torch.zeros(
shape
+ [
model.config.visual_feat_dim,
],
dtype=torch.float,
device=device,
)
elif "visual_pos" in input_name:
inputs_dict[input_name] = torch.zeros(
shape
+ [
model.config.visual_pos_dim,
],
dtype=torch.float,
device=device,
)
elif "inputs" in input_name:
inputs_dict[input_name] = torch.zeros(*shape, dtype=torch.float, device=device)
elif "input_values" in input_name:
batch_size, _ = shape
# Generating big sequence length for audio inputs.
seq_length = _generate_random_int(low=10000, high=20000)
inputs_dict[input_name] = torch.zeros(batch_size, seq_length, dtype=torch.float, device=device)
elif "mask" in input_name:
if "past_key_values" in input_names:
mask_shape = [shape[0], shape[1] + kv_cache_length]
else:
mask_shape = shape
inputs_dict[input_name] = torch.zeros(mask_shape, dtype=torch.long, device=device)
elif "ids" in input_name:
inputs_dict[input_name] = torch.zeros(shape, dtype=torch.long, device=device)
elif "past_key_values" in input_name:
if model.config.model_type not in _FX_SUPPORTED_MODELS_WITH_KV_CACHE:
raise NotImplementedError(
f"Symbolic trace with past_key_values input is not supported yet for the model {model.config.model_type}. Please open an issue or a PR in Transformers repository if you would like to see the support added."
)
num_heads = model.config.num_attention_heads
head_dim = model.config.hidden_size // model.config.num_attention_heads
cache_shape = (shape[0], num_heads, kv_cache_length, head_dim)
pkv = tuple(
(
torch.rand(cache_shape, dtype=torch.float, device=device),
torch.rand(cache_shape, dtype=torch.float, device=device),
)
for i in range(model.config.num_hidden_layers)
)
inputs_dict[input_name] = pkv
else:
shape_with_hidden_size = shape + [model.config.hidden_size]
inputs_dict[input_name] = torch.zeros(shape_with_hidden_size, dtype=torch.float, device=device)
return inputs_dict
|
Generates dummy input for model inference recording.
|
_generate_dummy_input
|
python
|
huggingface/transformers
|
src/transformers/utils/fx.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/fx.py
|
Apache-2.0
|
def trace(
self,
root: Union[torch.nn.Module, Callable[..., Any]],
concrete_args: Optional[dict[str, Any]] = None,
dummy_inputs: Optional[dict[str, Any]] = None,
complete_concrete_args_with_inputs_not_in_dummy_inputs: bool = True,
) -> Graph:
"""
Traces `root` and returns the corresponding FX `torch.fx.Graph` representation. `root` can either be a
`torch.nn.Module` instance or a Python callable. Note that after this call, `self.root` may be different from
the `root` passed in here. For example, when a free function is passed to `trace()`, we will create a
`torch.nn.Module` instance to use as the root and add embedded constants to.
Args:
root (`torch.nn.Module` or `Callable`):
Either a `torch.nn.Module`` or a function to be traced through. If root is not a
[`~transformers.PreTrainedModel`], then `dummy_inputs` must be passed, otherwise tracing will fail.
concrete_args (`Dict[str, Any], *optional*):
Concrete arguments that should not be treated as Proxies
dummy_inputs (`Dict[str, Any]`, *optional*):
The dummy inputs needed to handle data-dependent control-flow if `root` is not a
[`~transformers.PreTrainedModel`]. It can also be used when `root` is a
[`~transformers.PreTrainedModel`] to specify custom dummy inputs for a subset or all the model inputs.
complete_concrete_args_with_inputs_not_in_dummy_inputs (`bool`, *optional*, defaults to `True`):
If `True`, and `dummy_inputs` is specified, every argument that `root` can take that is not in
`dummy_inputs` and not in `concrete_args` will be added to `concrete_args`, otherwise does nothing.
Returns:
`torch.fx.Graph`:
A FX `torch.fx.Graph` representing the semantics of the passed-in `root`.
"""
sig = inspect.signature(root.forward if isinstance(root, torch.nn.Module) else root)
if concrete_args is None:
concrete_args = {}
if dummy_inputs is not None and complete_concrete_args_with_inputs_not_in_dummy_inputs:
for param in sig.parameters.values():
if param.name in dummy_inputs:
continue
if param.default is inspect.Parameter.empty:
raise ValueError(f"You need to specify a default value for the parameter {param.name}.")
concrete_args.update(
{
p.name: p.default
for p in sig.parameters.values()
if (p.name not in dummy_inputs and p.name not in concrete_args)
}
)
input_names = sig.parameters.keys() - concrete_args.keys()
# Creating a random input shape to generate dummy inputs.
batch_size = _generate_random_int()
sequence_length = _generate_random_int()
shape = [batch_size, sequence_length]
if root.__class__.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES):
num_choices = _generate_random_int(low=2, high=5)
shape.insert(1, num_choices)
inputs = dict(dummy_inputs) if dummy_inputs is not None else {}
for input_name in input_names:
if input_name in inputs:
continue
# We enforce that root must either be a PreTrainedModel or deserialized from a serialized traced model to
# be able to use HFTracer._generate_dummy_input.
if isinstance(root, self.supported_archs) or type(root).__qualname__.startswith(
("_deserialize_graph_module", "_CodeOnlyModule")
):
inputs.update(self._generate_dummy_input(root, input_name, shape, input_names=input_names))
else:
raise RuntimeError(
f"Could not generate input named {input_name} for because root is not a"
" transformers.PreTrainedModel."
)
def to_meta(value):
if isinstance(value, torch.Tensor):
return value.to("meta")
return value
concrete_metas = pytree.tree_map(to_meta, inputs)
for param in sig.parameters.values():
if param.kind == inspect.Parameter.VAR_KEYWORD and param.name not in input_names:
concrete_metas[f"**{param.name}"] = {}
self.meta_args = concrete_metas
global _CURRENT_TRACER
_CURRENT_TRACER = self
with self.patch_for_tracing(root):
try:
self.graph = super().trace(root, concrete_args=concrete_args)
finally:
_CURRENT_TRACER = None
# This is necessary because concrete args are added as input to the traced module since
# https://github.com/pytorch/pytorch/pull/55888.
for node in self.graph.nodes:
if node.op == "placeholder":
# Removing default values for inputs as the forward pass will fail with them.
if node.target in input_names:
node.args = ()
# Without this, torch.jit.script fails because the inputs type is Optional[torch.Tensor].
# It cannot infer on the attributes and methods the input should have, and fails.
node.type = torch.Tensor
# It is a concrete arg so it is not used and should be removed.
else:
to_visit = [node]
to_delete = collections.OrderedDict()
while to_visit:
n = to_visit.pop(0)
to_delete[n] = None
to_visit += list(n.users.keys())
for user in reversed(to_delete.keys()):
self.graph.erase_node(user)
# TODO: solves GraphModule creation.
# Without this, return type annotation "Tuple" is causing code execution failure.
if node.op == "output":
node.type = None
return self.graph
|
Traces `root` and returns the corresponding FX `torch.fx.Graph` representation. `root` can either be a
`torch.nn.Module` instance or a Python callable. Note that after this call, `self.root` may be different from
the `root` passed in here. For example, when a free function is passed to `trace()`, we will create a
`torch.nn.Module` instance to use as the root and add embedded constants to.
Args:
root (`torch.nn.Module` or `Callable`):
Either a `torch.nn.Module`` or a function to be traced through. If root is not a
[`~transformers.PreTrainedModel`], then `dummy_inputs` must be passed, otherwise tracing will fail.
concrete_args (`Dict[str, Any], *optional*):
Concrete arguments that should not be treated as Proxies
dummy_inputs (`Dict[str, Any]`, *optional*):
The dummy inputs needed to handle data-dependent control-flow if `root` is not a
[`~transformers.PreTrainedModel`]. It can also be used when `root` is a
[`~transformers.PreTrainedModel`] to specify custom dummy inputs for a subset or all the model inputs.
complete_concrete_args_with_inputs_not_in_dummy_inputs (`bool`, *optional*, defaults to `True`):
If `True`, and `dummy_inputs` is specified, every argument that `root` can take that is not in
`dummy_inputs` and not in `concrete_args` will be added to `concrete_args`, otherwise does nothing.
Returns:
`torch.fx.Graph`:
A FX `torch.fx.Graph` representing the semantics of the passed-in `root`.
|
trace
|
python
|
huggingface/transformers
|
src/transformers/utils/fx.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/fx.py
|
Apache-2.0
|
def _insert_module_as_submodule(self, mod: nn.Module) -> str:
"""
Helper method which tries to insert a module that was not declared as submodule.
"""
# If one of the module attributes is a Proxy, it means that its instantiation is input-dependent.
# It is not possible to insert such modules, those should be traced through.
if self._stateless_mod_instanciation_depends_on_proxies(mod):
return ""
idx = 0
mod_name = mod.__class__.__name__.lower()
path = f"{mod_name}_{idx}"
already_inserted = False
while hasattr(self.root, path):
if getattr(self.root, path) is mod:
already_inserted = True
break
path = f"{mod_name}_{idx}"
idx += 1
# No need to add multiple instances of the same module.
if not already_inserted:
self.root.add_module(path, mod)
return path
|
Helper method which tries to insert a module that was not declared as submodule.
|
_insert_module_as_submodule
|
python
|
huggingface/transformers
|
src/transformers/utils/fx.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/fx.py
|
Apache-2.0
|
def path_of_module(self, mod: nn.Module) -> str:
"""
Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has
a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the
string "foo.bar".
Args:
mod (str): The `Module` to retrieve the qualified name for.
"""
try:
return super().path_of_module(mod)
except NameError as e:
if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0:
path = self._insert_module_as_submodule(mod)
return path
raise e
|
Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has
a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the
string "foo.bar".
Args:
mod (str): The `Module` to retrieve the qualified name for.
|
path_of_module
|
python
|
huggingface/transformers
|
src/transformers/utils/fx.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/fx.py
|
Apache-2.0
|
def keys(self, obj: "Proxy") -> Any:
"""Called when a proxy object is has the keys() method called.
This is what happens when ** is called on a proxy. This should return an iterator if ** is supposed to work in
your custom tracer.
"""
attribute = HFAttribute(obj, "keys")()
if obj.node.target.startswith("**"):
return attribute._metadata
return attribute
|
Called when a proxy object is has the keys() method called.
This is what happens when ** is called on a proxy. This should return an iterator if ** is supposed to work in
your custom tracer.
|
keys
|
python
|
huggingface/transformers
|
src/transformers/utils/fx.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/fx.py
|
Apache-2.0
|
def symbolic_trace(
model: "PreTrainedModel",
input_names: Optional[list[str]] = None,
disable_check: bool = False,
tracer_cls: type[HFTracer] = HFTracer,
) -> GraphModule:
"""
Performs symbolic tracing on the model.
Args:
model ([`PretrainedModel`]):
The model to trace.
input_names (`List[str]`, *optional*):
The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead.
disable_check (`bool`, *optional*, defaults to `False`):
If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes.
tracer_cls (`Type[HFTracer]`, *optional*, defaults to `HFTracer`):
The tracer class to use for instantiating the tracer. If unset, `HFTracer` is used instead.
Returns:
`torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model.
Example:
```python
from transformers.utils.fx import symbolic_trace
traced_model = symbolic_trace(model, input_names=["input_ids", "attention_mask", "token_type_ids"])
```
"""
if input_names is None:
input_names = model.dummy_inputs.keys()
input_names = list(input_names)
concrete_args = get_concrete_args(model, input_names)
if not disable_check:
check_if_model_is_supported(model)
if "past_key_values" in input_names and not getattr(model.config, "use_cache", False):
logger.warning(
"`past_key_values` were specified as input names, but model.config.use_cache = False, this might lead to "
"unexpected behavior."
)
if "past_key_values" not in input_names and getattr(model.config, "use_cache", False):
logger.warning(
"`past_key_values` were not specified as input names, but model.config.use_cache = True. Setting "
"model.config.use_cache = False."
)
model.config.use_cache = False
# Tracing.
tracer = tracer_cls()
traced_graph = tracer.trace(model, concrete_args=concrete_args)
traced = torch.fx.GraphModule(model, traced_graph)
traced.config = model.config
# The model class must be stored as an attribute to allow model deserialization, which uses trace, and thus
# _generate_dummy_input, where the model class is needed.
traced.class_for_deserialization = model.__class__
traced.device = model.device
return traced
|
Performs symbolic tracing on the model.
Args:
model ([`PretrainedModel`]):
The model to trace.
input_names (`List[str]`, *optional*):
The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead.
disable_check (`bool`, *optional*, defaults to `False`):
If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes.
tracer_cls (`Type[HFTracer]`, *optional*, defaults to `HFTracer`):
The tracer class to use for instantiating the tracer. If unset, `HFTracer` is used instead.
Returns:
`torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model.
Example:
```python
from transformers.utils.fx import symbolic_trace
traced_model = symbolic_trace(model, input_names=["input_ids", "attention_mask", "token_type_ids"])
```
|
symbolic_trace
|
python
|
huggingface/transformers
|
src/transformers/utils/fx.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/fx.py
|
Apache-2.0
|
def strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'.
Raises ValueError if 'val' is anything else.
"""
val = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"invalid truth value {val!r}")
|
Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'.
Raises ValueError if 'val' is anything else.
|
strtobool
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def infer_framework_from_repr(x):
"""
Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the
frameworks in a smart order, without the need to import the frameworks).
"""
representation = str(type(x))
if representation.startswith("<class 'torch."):
return "pt"
elif representation.startswith("<class 'tensorflow."):
return "tf"
elif representation.startswith("<class 'jax"):
return "jax"
elif representation.startswith("<class 'numpy."):
return "np"
elif representation.startswith("<class 'mlx."):
return "mlx"
|
Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the
frameworks in a smart order, without the need to import the frameworks).
|
infer_framework_from_repr
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def _get_frameworks_and_test_func(x):
"""
Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework
we can guess from the repr first, then Numpy, then the others.
"""
framework_to_test = {
"pt": is_torch_tensor,
"tf": is_tf_tensor,
"jax": is_jax_tensor,
"np": is_numpy_array,
"mlx": is_mlx_array,
}
preferred_framework = infer_framework_from_repr(x)
# We will test this one first, then numpy, then the others.
frameworks = [] if preferred_framework is None else [preferred_framework]
if preferred_framework != "np":
frameworks.append("np")
frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, "np"]])
return {f: framework_to_test[f] for f in frameworks}
|
Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework
we can guess from the repr first, then Numpy, then the others.
|
_get_frameworks_and_test_func
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def is_tensor(x):
"""
Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray`, `np.ndarray` or `mlx.array`
in the order defined by `infer_framework_from_repr`
"""
# This gives us a smart order to test the frameworks with the corresponding tests.
framework_to_test_func = _get_frameworks_and_test_func(x)
for test_func in framework_to_test_func.values():
if test_func(x):
return True
# Tracers
if is_torch_fx_proxy(x):
return True
if is_flax_available():
from jax.core import Tracer
if isinstance(x, Tracer):
return True
return False
|
Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray`, `np.ndarray` or `mlx.array`
in the order defined by `infer_framework_from_repr`
|
is_tensor
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def to_py_obj(obj):
"""
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.
"""
if isinstance(obj, (int, float)):
return obj
elif isinstance(obj, (dict, UserDict)):
return {k: to_py_obj(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
try:
arr = np.array(obj)
if np.issubdtype(arr.dtype, np.integer) or np.issubdtype(arr.dtype, np.floating):
return arr.tolist()
except Exception:
pass
return [to_py_obj(o) for o in obj]
framework_to_py_obj = {
"pt": lambda obj: obj.tolist(),
"tf": lambda obj: obj.numpy().tolist(),
"jax": lambda obj: np.asarray(obj).tolist(),
"np": lambda obj: obj.tolist(),
}
# This gives us a smart order to test the frameworks with the corresponding tests.
framework_to_test_func = _get_frameworks_and_test_func(obj)
for framework, test_func in framework_to_test_func.items():
if test_func(obj):
return framework_to_py_obj[framework](obj)
# tolist also works on 0d np arrays
if isinstance(obj, np.number):
return obj.tolist()
else:
return obj
|
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.
|
to_py_obj
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def to_numpy(obj):
"""
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.
"""
framework_to_numpy = {
"pt": lambda obj: obj.detach().cpu().numpy(),
"tf": lambda obj: obj.numpy(),
"jax": lambda obj: np.asarray(obj),
"np": lambda obj: obj,
}
if isinstance(obj, (dict, UserDict)):
return {k: to_numpy(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return np.array(obj)
# This gives us a smart order to test the frameworks with the corresponding tests.
framework_to_test_func = _get_frameworks_and_test_func(obj)
for framework, test_func in framework_to_test_func.items():
if test_func(obj):
return framework_to_numpy[framework](obj)
return obj
|
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.
|
to_numpy
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def __init_subclass__(cls) -> None:
"""Register subclasses as pytree nodes.
This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with
`static_graph=True` with modules that output `ModelOutput` subclasses.
"""
if is_torch_available():
if version.parse(get_torch_version()) >= version.parse("2.2"):
from torch.utils._pytree import register_pytree_node
register_pytree_node(
cls,
_model_output_flatten,
partial(_model_output_unflatten, output_type=cls),
serialized_type_name=f"{cls.__module__}.{cls.__name__}",
)
else:
from torch.utils._pytree import _register_pytree_node
_register_pytree_node(
cls,
_model_output_flatten,
partial(_model_output_unflatten, output_type=cls),
)
|
Register subclasses as pytree nodes.
This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with
`static_graph=True` with modules that output `ModelOutput` subclasses.
|
__init_subclass__
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def __post_init__(self):
"""Check the ModelOutput dataclass.
Only occurs if @dataclass decorator has been used.
"""
class_fields = fields(self)
# Safety and consistency checks
if not len(class_fields):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
first_field = getattr(self, class_fields[0].name)
other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(first_field):
if isinstance(first_field, dict):
iterator = first_field.items()
first_field_iterator = True
else:
try:
iterator = iter(first_field)
first_field_iterator = True
except TypeError:
first_field_iterator = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(iterator):
if (
not isinstance(element, (list, tuple))
or not len(element) == 2
or not isinstance(element[0], str)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
self[class_fields[0].name] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value)."
)
break
setattr(self, element[0], element[1])
if element[1] is not None:
self[element[0]] = element[1]
elif first_field is not None:
self[class_fields[0].name] = first_field
else:
for field in class_fields:
v = getattr(self, field.name)
if v is not None:
self[field.name] = v
|
Check the ModelOutput dataclass.
Only occurs if @dataclass decorator has been used.
|
__post_init__
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def can_return_loss(model_class):
"""
Check if a given model can return loss.
Args:
model_class (`type`): The class of the model.
"""
framework = infer_framework(model_class)
if framework == "tf":
signature = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
signature = inspect.signature(model_class.forward) # PyTorch models
else:
signature = inspect.signature(model_class.__call__) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
|
Check if a given model can return loss.
Args:
model_class (`type`): The class of the model.
|
can_return_loss
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def find_labels(model_class):
"""
Find the labels used by a given model.
Args:
model_class (`type`): The class of the model.
"""
model_name = model_class.__name__
framework = infer_framework(model_class)
if framework == "tf":
signature = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
signature = inspect.signature(model_class.forward) # PyTorch models
else:
signature = inspect.signature(model_class.__call__) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
|
Find the labels used by a given model.
Args:
model_class (`type`): The class of the model.
|
find_labels
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def flatten_dict(d: MutableMapping, parent_key: str = "", delimiter: str = "."):
"""Flatten a nested dict into a single level dict."""
def _flatten_dict(d, parent_key="", delimiter="."):
for k, v in d.items():
key = str(parent_key) + delimiter + str(k) if parent_key else k
if v and isinstance(v, MutableMapping):
yield from flatten_dict(v, key, delimiter=delimiter).items()
else:
yield key, v
return dict(_flatten_dict(d, parent_key, delimiter))
|
Flatten a nested dict into a single level dict.
|
flatten_dict
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def transpose(array, axes=None):
"""
Framework-agnostic version of `numpy.transpose` that will work on torch/TensorFlow/Jax tensors as well as NumPy
arrays.
"""
if is_numpy_array(array):
return np.transpose(array, axes=axes)
elif is_torch_tensor(array):
return array.T if axes is None else array.permute(*axes)
elif is_tf_tensor(array):
import tensorflow as tf
return tf.transpose(array, perm=axes)
elif is_jax_tensor(array):
import jax.numpy as jnp
return jnp.transpose(array, axes=axes)
else:
raise ValueError(f"Type not supported for transpose: {type(array)}.")
|
Framework-agnostic version of `numpy.transpose` that will work on torch/TensorFlow/Jax tensors as well as NumPy
arrays.
|
transpose
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def reshape(array, newshape):
"""
Framework-agnostic version of `numpy.reshape` that will work on torch/TensorFlow/Jax tensors as well as NumPy
arrays.
"""
if is_numpy_array(array):
return np.reshape(array, newshape)
elif is_torch_tensor(array):
return array.reshape(*newshape)
elif is_tf_tensor(array):
import tensorflow as tf
return tf.reshape(array, newshape)
elif is_jax_tensor(array):
import jax.numpy as jnp
return jnp.reshape(array, newshape)
else:
raise ValueError(f"Type not supported for reshape: {type(array)}.")
|
Framework-agnostic version of `numpy.reshape` that will work on torch/TensorFlow/Jax tensors as well as NumPy
arrays.
|
reshape
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def squeeze(array, axis=None):
"""
Framework-agnostic version of `numpy.squeeze` that will work on torch/TensorFlow/Jax tensors as well as NumPy
arrays.
"""
if is_numpy_array(array):
return np.squeeze(array, axis=axis)
elif is_torch_tensor(array):
return array.squeeze() if axis is None else array.squeeze(dim=axis)
elif is_tf_tensor(array):
import tensorflow as tf
return tf.squeeze(array, axis=axis)
elif is_jax_tensor(array):
import jax.numpy as jnp
return jnp.squeeze(array, axis=axis)
else:
raise ValueError(f"Type not supported for squeeze: {type(array)}.")
|
Framework-agnostic version of `numpy.squeeze` that will work on torch/TensorFlow/Jax tensors as well as NumPy
arrays.
|
squeeze
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def expand_dims(array, axis):
"""
Framework-agnostic version of `numpy.expand_dims` that will work on torch/TensorFlow/Jax tensors as well as NumPy
arrays.
"""
if is_numpy_array(array):
return np.expand_dims(array, axis)
elif is_torch_tensor(array):
return array.unsqueeze(dim=axis)
elif is_tf_tensor(array):
import tensorflow as tf
return tf.expand_dims(array, axis=axis)
elif is_jax_tensor(array):
import jax.numpy as jnp
return jnp.expand_dims(array, axis=axis)
else:
raise ValueError(f"Type not supported for expand_dims: {type(array)}.")
|
Framework-agnostic version of `numpy.expand_dims` that will work on torch/TensorFlow/Jax tensors as well as NumPy
arrays.
|
expand_dims
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
def tensor_size(array):
"""
Framework-agnostic version of `numpy.size` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays.
"""
if is_numpy_array(array):
return np.size(array)
elif is_torch_tensor(array):
return array.numel()
elif is_tf_tensor(array):
import tensorflow as tf
return tf.size(array)
elif is_jax_tensor(array):
return array.size
else:
raise ValueError(f"Type not supported for tensor_size: {type(array)}.")
|
Framework-agnostic version of `numpy.size` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays.
|
tensor_size
|
python
|
huggingface/transformers
|
src/transformers/utils/generic.py
|
https://github.com/huggingface/transformers/blob/master/src/transformers/utils/generic.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.