text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
<jupyter_start><jupyter_text>Using VeRA for sequence classification In this example, we fine-tune Roberta on a sequence classification task using VeRA. Imports<jupyter_code>import torch
from torch.optim import AdamW
from torch.utils.data import DataLoader
from peft import (
get_peft_model,
VeraConfig,
PeftType,
)
import evaluate
from datasets import load_dataset
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed, AutoConfig
from tqdm import tqdm<jupyter_output><empty_output><jupyter_text>Parameters<jupyter_code>batch_size = 128
model_name_or_path = "roberta-base"
task = "mrpc"
peft_type = PeftType.VERA
device = "cuda"
num_epochs = 5 # for best results, increase this number
rank = 8 # for best results, increase this number
max_length = 128
torch.manual_seed(0)
peft_config = VeraConfig(
task_type="SEQ_CLS",
r=rank,
d_initial=0.1,
target_modules=["query", "value", "intermediate.dense"],
save_projection=True,
)
head_lr = 1e-2
vera_lr = 2e-2<jupyter_output><empty_output><jupyter_text>Loading data<jupyter_code>if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")):
padding_side = "left"
else:
padding_side = "right"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side)
if getattr(tokenizer, "pad_token_id") is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
datasets = load_dataset("glue", task)
metric = evaluate.load("glue", task)
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=max_length)
return outputs
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
# Instantiate dataloaders.
train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size
)<jupyter_output><empty_output><jupyter_text>Preparing the VeRA model<jupyter_code>model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True, max_length=None)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
optimizer = AdamW(
[
{"params": [p for n, p in model.named_parameters() if "vera_lambda_" in n], "lr": vera_lr},
{"params": [p for n, p in model.named_parameters() if "classifier" in n], "lr": head_lr},
]
)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs),
num_training_steps=(len(train_dataloader) * num_epochs),
)<jupyter_output><empty_output><jupyter_text>Training<jupyter_code>model.to(device)
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(tqdm(train_dataloader)):
batch.to(device)
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(tqdm(eval_dataloader)):
batch.to(device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = predictions, batch["labels"]
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
print(f"epoch {epoch}:", eval_metric)<jupyter_output>0%| | 0/29 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.
100%|██████████| 29/29 [00:18<00:00, 1.58it/s]
100%|██████████| 4/4 [00:01<00:00, 3.52it/s]<jupyter_text>Share adapters on the 🤗 Hub<jupyter_code>account_id = ... # your Hugging Face Hub account ID
model.push_to_hub(f"{account_id}/roberta-large-peft-vera")<jupyter_output><empty_output><jupyter_text>Load adapters from the HubYou can also directly load adapters from the Hub using the commands below:<jupyter_code>import torch
from peft import PeftModel, PeftConfig
from transformers import AutoTokenizer
peft_model_id = f"{account_id}/roberta-large-peft-vera"
config = PeftConfig.from_pretrained(peft_model_id)
inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
# Load the Vera model
inference_model = PeftModel.from_pretrained(inference_model, peft_model_id)
inference_model.to(device)
inference_model.eval()
for step, batch in enumerate(tqdm(eval_dataloader)):
batch.to(device)
with torch.no_grad():
outputs = inference_model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = predictions, batch["labels"]
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
print(eval_metric)<jupyter_output>0%| | 0/4 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:01<00:00, 3.14it/s] | peft/examples/sequence_classification/VeRA.ipynb/0 | {
"file_path": "peft/examples/sequence_classification/VeRA.ipynb",
"repo_id": "peft",
"token_count": 2545
} |
# Copyright 2023 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to close stale issue. Taken in part from the AllenNLP repository.
https://github.com/allenai/allennlp.
"""
import os
from datetime import datetime as dt
from datetime import timezone
from github import Github
LABELS_TO_EXEMPT = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
"PRs welcome to address this",
]
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("huggingface/peft")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True)
last_comment = comments[0] if len(comments) > 0 else None
if (
(last_comment is not None and last_comment.user.login == "github-actions[bot]")
and (dt.now(timezone.utc) - issue.updated_at).days > 7
and (dt.now(timezone.utc) - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
issue.edit(state="closed")
elif (
(dt.now(timezone.utc) - issue.updated_at).days > 23
and (dt.now(timezone.utc) - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\n"
)
if __name__ == "__main__":
main()
| peft/scripts/stale.py/0 | {
"file_path": "peft/scripts/stale.py",
"repo_id": "peft",
"token_count": 890
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.utils import register_peft_method
from .config import AdaLoraConfig
from .gptq import SVDQuantLinear
from .layer import AdaLoraLayer, RankAllocator, SVDLinear
from .model import AdaLoraModel
__all__ = ["AdaLoraConfig", "AdaLoraLayer", "AdaLoraModel", "RankAllocator", "SVDLinear", "SVDQuantLinear"]
register_peft_method(
name="adalora", config_cls=AdaLoraConfig, model_cls=AdaLoraModel, prefix="lora_", is_mixed_compatible=True
)
def __getattr__(name):
if (name == "SVDLinear8bitLt") and is_bnb_available():
from .bnb import SVDLinear8bitLt
return SVDLinear8bitLt
if (name == "SVDLinear4bit") and is_bnb_4bit_available():
from .bnb import SVDLinear4bit
return SVDLinear4bit
raise AttributeError(f"module {__name__} has no attribute {name}")
| peft/src/peft/tuners/adalora/__init__.py/0 | {
"file_path": "peft/src/peft/tuners/adalora/__init__.py",
"repo_id": "peft",
"token_count": 498
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The implementation is based on "Parameter-Efficient Orthogonal Finetuning
# via Butterfly Factorization" (https://arxiv.org/abs/2311.06243) in ICLR 2024.
from __future__ import annotations
import math
import os
import warnings
from contextlib import contextmanager
from typing import Any, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
_FBD_CUDA = None
# this function is a 1:1 copy from accelerate
@contextmanager
def patch_environment(**kwargs):
"""
A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.
Will convert the values in `kwargs` to strings and upper-case all the keys.
Example:
```python
>>> import os
>>> from accelerate.utils import patch_environment
>>> with patch_environment(FOO="bar"):
... print(os.environ["FOO"]) # prints "bar"
>>> print(os.environ["FOO"]) # raises KeyError
```
"""
existing_vars = {}
for key, value in kwargs.items():
key = key.upper()
if key in os.environ:
existing_vars[key] = os.environ[key]
os.environ[key] = str(value)
yield
for key in kwargs:
key = key.upper()
if key in existing_vars:
# restore previous value
os.environ[key] = existing_vars[key]
else:
os.environ.pop(key, None)
def get_fbd_cuda():
global _FBD_CUDA
if _FBD_CUDA is not None:
return _FBD_CUDA
# This import initializes cuda context and should thus be local, see issue 1877
from torch.utils.cpp_extension import load
curr_dir = os.path.dirname(__file__)
# need ninja to build the extension
try:
with patch_environment(CC="gcc", CXX="gcc"):
fbd_cuda = load(
name="fbd_cuda",
sources=[f"{curr_dir}/fbd/fbd_cuda.cpp", f"{curr_dir}/fbd/fbd_cuda_kernel.cu"],
verbose=True,
# build_directory='/tmp/' # for debugging
)
# extra_cuda_cflags = ['-std=c++14', '-ccbin=$$(which gcc-7)']) # cuda10.2 is not compatible with gcc9. Specify gcc 7
except Exception as e:
warnings.warn(f"Failed to load the CUDA extension: {e}, check if ninja is available.")
warnings.warn("Setting boft_n_butterfly_factor to 1 to speed up the finetuning process.")
fbd_cuda = None
_FBD_CUDA = fbd_cuda
return _FBD_CUDA
class FastBlockDiag(Function):
"""
Implements a custom autograd Function for a fast block diagonal operation using CUDA.
This function is optimized for 4D tensors where the last two dimensions are equal, representing block diagonal
matrices for efficient computation on CUDA devices.
"""
@staticmethod
def forward(ctx, input):
"""
The forward method for FastBlockDiag.
Computes the block diagonal operation on the input tensor using a CUDA-optimized function. This method assumes
that the input is a 4D tensor where the last two dimensions are equal, which represent the blocks to be
diagonalized.
Parameters:
ctx: A context object that can be used to stash information for backward computation.
input (Tensor): The input tensor of shape (N, D, H, H), where `N` is the batch size,
`D` represents one additional dimension (In BOFT, the number of BOFT blocks), and `H` is the
size of the square blocks along the last two dimensions (In BOFT, the block size).
Returns:
Tensor: The resulting tensor after applying the block diagonal operation,
will have the shape (N, DxH, DxH).
"""
output = get_fbd_cuda().forward(input)[0]
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
grad_input = get_fbd_cuda().backward(grad_output, input)[0]
return grad_input
class MultiplicativeDropoutLayer(nn.Module):
"""
Implements the multiplicative dropout layer for BOFT.
"""
def __init__(self, p=0.0):
"""
Initializes the multiplicative dropout layer.
Parameters:
p (float): The probability of dropping out a block. Defaults to 0.0.
"""
super().__init__()
self.p = p
def forward(self, x):
"""
Applies multiplicative dropout to the input tensor.
Parameters:
x (Tensor): The input tensor of shape (N, D, H, H), where `N` is the batch size, `D` represents
one additional dimension (In BOFT, the number of BOFT blocks), and `H` is the size of the square
blocks along the last two dimensions (In BOFT, the block size).
"""
if self.training:
# Ensure the last two dimensions are the same
if x.shape[-1] != x.shape[-2]:
raise ValueError("The last two dimensions of input should be the same!")
N, D, H, _ = x.shape
# Randomly select one from N
n_random = torch.randint(0, N, (1,)).item()
# Create a mask with 1s for matrices to be replaced with identity and 0s otherwise
num_to_replace = int(self.p * D)
num_zeros = D - num_to_replace
# Generate a flat tensor with desired number of 1s and 0s
mask = torch.cat([torch.ones(num_to_replace, device=x.device), torch.zeros(num_zeros, device=x.device)])
# Shuffle and reshape the mask
mask = mask[torch.randperm(D)].view(1, D, 1, 1)
full_mask = torch.zeros(N, D, 1, 1, device=x.device)
full_mask[n_random] = mask
# Use the mask to combine original matrices and identity matrices
eye_matrix = torch.eye(H, device=x.device).repeat(N, D, 1, 1)
x = (1 - full_mask) * x + full_mask * eye_matrix
return x
class BOFTLayer(BaseTunerLayer):
"""
Implements the BOFT layer.
"""
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = ("boft_R", "boft_s")
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("boft_block_size", "boft_block_num", "boft_dropout")
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
"""
Initializes the BOFT layer.
Note, currently only support linear layer and convolutional layer, with further support for other layers to be
added soon.
Parameters:
base_layer: the pretrained model layer
"""
self.base_layer = base_layer
self.boft_block_size = {}
self.boft_block_num = {}
self.boft_dropout = nn.ModuleDict({})
self.boft_R = nn.ParameterDict({})
self.boft_s = nn.ParameterDict({})
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
in_features, out_features = base_layer.in_features, base_layer.out_features
elif isinstance(base_layer, nn.Conv2d):
in_features, out_features = base_layer.in_channels, base_layer.out_channels
else:
raise ValueError(f"Unsupported layer type {type(base_layer)}")
self.in_features = in_features
self.out_features = out_features
def set_scale(self, adapter, scale):
if adapter not in self.scaling:
# Ignore the case where the adapter is not in the layer
return
warnings.warn("Scaling operation for BOFT not supported! Automatically set scale to 1.")
def scale_layer(self, scale: float) -> None:
if scale == 1:
return
for active_adapter in self.active_adapters:
if active_adapter not in self.boft_R.keys():
continue
warnings.warn("Scaling operation for BOFT not supported! Automatically set scale to 1.")
def unscale_layer(self, scale=None) -> None:
for active_adapter in self.active_adapters:
if active_adapter not in self.boft_R.keys():
continue
warnings.warn("Unscaling operation for BOFT not supported! Keeping scale to 1.")
def update_layer(
self, adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights
):
"""
Update the linear layer with trainable BOFT weights. Override for other layer types.
"""
# Attempt to load the CUDA extension during model initialization
if not get_fbd_cuda():
self.fbd_cuda_available = False
# If the CUDA extension is not available, set the butterfly factor to 1 to speed up the finetuning process
boft_n_butterfly_factor = 1
else:
self.fbd_cuda_available = True
# to be consistent with the paper notation
boft_n_butterfly_factor = boft_n_butterfly_factor - 1
if boft_n_butterfly_factor < 0:
raise ValueError(
f"You can only specify boft_n_butterfly_factor {boft_n_butterfly_factor + 1} to be a positive integer number."
)
# Initialize the MultiplicativeDropoutLayer for boft_dropout > 0.0.
if boft_dropout > 0.0:
boft_dropout_layer = MultiplicativeDropoutLayer(p=boft_dropout)
else:
boft_dropout_layer = nn.Identity()
self.boft_dropout.update(nn.ModuleDict({adapter_name: boft_dropout_layer}))
if boft_block_size == 0 and boft_block_num != 0:
if self.in_features % boft_block_num != 0:
raise ValueError(
f"in_features ({self.in_features}) must be divisible by boft_block_num ({boft_block_num})!"
)
if boft_n_butterfly_factor != 0:
if boft_n_butterfly_factor > int(math.log2(boft_block_num)):
raise ValueError(
f"Invalid combination of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_num ({boft_block_num})!"
)
if boft_block_num % (2**boft_n_butterfly_factor) != 0:
raise ValueError(
f"boft_block_num ({boft_block_num}) must be a multiple of 2 raised to the power of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1})!"
)
boft_block_size = int(self.in_features // boft_block_num)
elif boft_block_size != 0 and boft_block_num == 0:
if self.in_features % boft_block_size != 0:
raise ValueError(
f"in_features ({self.in_features}) must be divisible by boft_block_size ({boft_block_size})!"
)
if boft_n_butterfly_factor != 0:
if self.in_features < (boft_block_size * (2**boft_n_butterfly_factor)):
raise ValueError(
f"Invalid combination of in_features ({self.in_features}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!"
)
if self.in_features % (boft_block_size * (2**boft_n_butterfly_factor)) != 0:
raise ValueError(
f"Invalid combination of in_features ({self.in_features}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!"
)
boft_block_num = int(self.in_features // boft_block_size)
else:
raise ValueError(
"Something went wrong, please report this error: https://github.com/huggingface/peft/issues"
)
# In OFT you can specify the number of blocks to be 1
if boft_n_butterfly_factor != 0:
if boft_block_num % 2 != 0:
raise ValueError(f"boft_block_num ({boft_block_num}) must be an even number!")
if boft_block_size % 2 != 0:
raise ValueError(f"boft_block_size ({boft_block_size}) must be an even number!")
# If there is no butterfly factor, then permutation matrix P will be an identity matrix.
P = torch.empty((boft_n_butterfly_factor + 1, self.in_features, self.in_features))
for i in range(boft_n_butterfly_factor + 1):
perm = self.block_butterfly_perm(
self.in_features, int(boft_block_num / (2 ** (i))), int(boft_block_size / 2), boft_n_butterfly_factor
)
perm_mat = self.perm2mat(perm)
P[i] = perm_mat
self.register_buffer("boft_P", P, persistent=False)
self.boft_R[adapter_name] = nn.Parameter(
torch.zeros(boft_n_butterfly_factor + 1, boft_block_num, boft_block_size, boft_block_size)
)
self.boft_s[adapter_name] = nn.Parameter(torch.ones(int(self.out_features), 1))
self.reset_boft_parameters(adapter_name, init_weights)
# set the boft block size and number
self.boft_block_size[adapter_name] = boft_block_size
self.boft_block_num[adapter_name] = boft_block_num
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_boft_parameters(self, adapter_name, init_weights):
"""
Reset the BOFT parameters.
"""
if init_weights is False:
nn.init.normal_(self.boft_R[adapter_name], mean=0.0, std=0.1)
nn.init.normal_(self.boft_s[adapter_name], mean=1.0, std=0.1)
return
if adapter_name in self.boft_R.keys():
if init_weights is True:
# initialize R to zero
nn.init.zeros_(self.boft_R[adapter_name])
nn.init.ones_(self.boft_s[adapter_name])
else:
raise ValueError(f"Unknown initialization {init_weights=}")
def perm2mat(self, indices):
"""
Convert permutation indices to permutation matrix.
Args:
indices: A list of indices representing the permutation.
"""
# Number of indices determines the size of the square matrix
n = len(indices)
# Initialize a matrix of zeros
perm_mat = torch.zeros((n, n))
# Set the 1s according to the indices
for i, idx in enumerate(indices):
perm_mat[i, idx] = 1
return perm_mat
def block_butterfly_perm(self, n, b, r=3, n_butterfly_factor=1):
"""
Define the permutation matrix for the block butterfly permutation.
Args:
n: size of the permutation matrix
b: desired number of blocks after multiplying with the permutation matrix
r: base block size of the block diagonal matrix, e.g. 2x2, 3x3, 5x5 etc.
"""
if n_butterfly_factor == 0:
return torch.arange(n)
if b * r * 2 > n:
raise ValueError("Invalid number of blocks!")
block_size = int(n // b)
indices = torch.arange(n)
def sort_block(b, r):
step = b / r
initial_order = torch.arange(b)
sorted_order = torch.empty(b, dtype=torch.long)
evens = torch.arange(0, step, 2)
odds = torch.arange(1, step, 2)
sorted_seq = torch.cat((evens, odds), dim=0)
for i, pos in enumerate(sorted_seq):
sorted_order[int(i * r) : int(i * r + r)] = initial_order[int(pos * r) : int(pos * r + r)]
return sorted_order
sorted_order = sort_block(block_size, r)
for i in range(0, n, block_size):
block_end = i + block_size
tmp_indices = indices[i:block_end]
indices[i:block_end] = tmp_indices[sorted_order]
return indices
def cayley_batch(self, data):
"""
Perform the Cayley parametrization on a batch of skew-symmetric matrices.
Args:
data: A batch of skew-symmetric matrices of shape (b, r, c).
"""
b, r, c = data.shape
# Ensure the input matrix is skew-symmetric
skew_mat = 0.5 * (data - data.transpose(1, 2))
id_mat = torch.eye(r, device=data.device).unsqueeze(0).expand(b, r, c)
# Perform the Cayley parametrization
Q = torch.linalg.solve(id_mat + skew_mat, id_mat - skew_mat, left=False)
return Q
class Linear(nn.Module, BOFTLayer):
"""
BOFT implemented in a dense layer.
"""
def __init__(
self,
base_layer,
adapter_name: str,
boft_block_size: int = 8,
boft_block_num: int = 0,
boft_n_butterfly_factor: int = 0,
boft_dropout: float = 0.1,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
init_weights: Union[bool, str] = True,
is_target_conv_1d_layer: bool = False,
**kwargs,
) -> None:
super().__init__()
BOFTLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(
adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights
)
self.is_target_conv_1d_layer = is_target_conv_1d_layer
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.boft_R.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weight = base_layer.weight.data.clone()
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = torch.mm(butterfly_oft_mat, orig_weight)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = orig_weight * boft_s
if not torch.isfinite(orig_weight).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.base_layer.weight.data = orig_weight.contiguous()
else:
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
orig_weight = base_layer.weight.data.clone()
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = torch.mm(butterfly_oft_mat, orig_weight)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = orig_weight * boft_s
self.base_layer.weight.data = orig_weight.contiguous()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.boft_R.keys():
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
orig_weight = self.get_base_layer().weight.data.clone()
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = torch.mm(butterfly_oft_mat.t(), orig_weight)
orig_weight = torch.transpose(orig_weight, 0, 1)
self.get_base_layer().weight.data = orig_weight * (1 / boft_s)
def get_delta_weight(self, adapter) -> tuple[torch.Tensor, torch.Tensor]:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
boft_R = self.boft_R[adapter]
boft_s = self.boft_s[adapter]
N, D, H, _ = boft_R.shape
boft_R = boft_R.view(N * D, H, H)
orth_rotate_butterfly = self.cayley_batch(boft_R)
orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H)
if self.fbd_cuda_available:
block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly)
else:
orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0)
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
boft_P = self.boft_P.to(block_diagonal_butterfly.device)
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
butterfly_oft_mat = butterfly_oft_mat_batch[0]
for i in range(1, butterfly_oft_mat_batch.shape[0]):
butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat
return butterfly_oft_mat, boft_s
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
boft_rotation = torch.eye(self.in_features, device=x.device, dtype=previous_dtype)
boft_scale = torch.ones((int(self.out_features), 1), device=x.device, dtype=previous_dtype)
for active_adapter in self.active_adapters:
if active_adapter not in self.boft_R.keys():
continue
boft_R = self.boft_R[active_adapter]
boft_s = self.boft_s[active_adapter]
dropout = self.boft_dropout[active_adapter]
N, D, H, _ = boft_R.shape
boft_R = boft_R.view(N * D, H, H)
orth_rotate_butterfly = self.cayley_batch(boft_R)
orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H)
orth_rotate_butterfly = dropout(orth_rotate_butterfly)
if self.fbd_cuda_available:
block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly)
else:
orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0)
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
# The BOFT author's cayley_batch, dropout and FastBlockDiag ONLY return fp32 outputs.
boft_P = self.boft_P.to(x)
block_diagonal_butterfly = block_diagonal_butterfly.to(x)
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
butterfly_oft_mat = butterfly_oft_mat_batch[0]
for i in range(1, butterfly_oft_mat_batch.shape[0]):
butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat
boft_rotation = butterfly_oft_mat @ boft_rotation
boft_scale = boft_s * boft_scale
x = x.to(self.get_base_layer().weight.data.dtype)
orig_weight = self.get_base_layer().weight.data
orig_weight = torch.transpose(orig_weight, 0, 1)
boft_rotation = boft_rotation.to(previous_dtype)
orig_weight = orig_weight.to(previous_dtype)
rotated_weight = torch.mm(boft_rotation, orig_weight)
rotated_weight = torch.transpose(rotated_weight, 0, 1)
scaled_rotated_weight = rotated_weight * boft_scale
scaled_rotated_weight = scaled_rotated_weight.to(previous_dtype)
if self.base_layer.bias is not None:
self.base_layer.bias = self.base_layer.bias.to(previous_dtype)
result = F.linear(input=x, weight=scaled_rotated_weight, bias=self.base_layer.bias)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "boft." + rep
class Conv2d(nn.Module, BOFTLayer):
"""
BOFT implemented in a Conv2d layer.
"""
def __init__(
self,
base_layer: nn.Module,
adapter_name: str,
boft_block_size: int = 8,
boft_block_num: int = 0,
boft_n_butterfly_factor: int = 0,
boft_dropout: float = 0.1,
init_weights: Union[bool, str] = True,
**kwargs,
) -> None:
super().__init__()
BOFTLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(
adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights
)
def update_layer(
self, adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights
):
"""
Update the conv2d layer with trainable BOFT weights.
"""
# Attempt to load the CUDA extension during model initialization
if not get_fbd_cuda():
self.fbd_cuda_available = False
# If the CUDA extension is not available, set the butterfly factor to 1 to speed up the finetuning process
boft_n_butterfly_factor = 1
else:
self.fbd_cuda_available = True
# to be consistent with the paper notation
boft_n_butterfly_factor = boft_n_butterfly_factor - 1
if boft_n_butterfly_factor < 0:
raise ValueError(
f"You can only specify boft_n_butterfly_factor {boft_n_butterfly_factor + 1} to be a positive integer number."
)
# Initialize the MultiplicativeDropoutLayer for boft_dropout > 0.0.
if boft_dropout > 0.0:
boft_dropout_layer = MultiplicativeDropoutLayer(p=boft_dropout)
else:
boft_dropout_layer = nn.Identity()
self.boft_dropout.update(nn.ModuleDict({adapter_name: boft_dropout_layer}))
# layer information from the base layer
base_layer = self.get_base_layer()
conv_filter_dim = self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0]
# Initialize the BOFT parameters.
if boft_block_size == 0 and boft_block_num != 0:
if conv_filter_dim % boft_block_num != 0:
raise ValueError(
f"Convolutional kernel dimension ({conv_filter_dim}) must be divisible by boft_block_num ({boft_block_num})!"
)
if boft_n_butterfly_factor != 0:
if boft_n_butterfly_factor > int(math.log2(boft_block_num)):
raise ValueError(
f"Invalid combination of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_num ({boft_block_num})!"
)
if boft_block_num % (2**boft_n_butterfly_factor) != 0:
raise ValueError(
f"boft_block_num ({boft_block_num}) must be a multiple of 2 raised to the power of boft_n_butterfly_factor ({boft_n_butterfly_factor + 1})!"
)
boft_block_size = int(conv_filter_dim // boft_block_num)
elif boft_block_size != 0 and boft_block_num == 0:
if conv_filter_dim % boft_block_size != 0:
raise ValueError(
f"Convolutional kernel dimension ({conv_filter_dim}) must be divisible by boft_block_size ({boft_block_size})!"
)
if boft_n_butterfly_factor != 0:
if conv_filter_dim < (boft_block_size * (2**boft_n_butterfly_factor)):
raise ValueError(
f"Invalid combination of convolutional kernel dimension ({conv_filter_dim}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!"
)
if conv_filter_dim % (boft_block_size * (2**boft_n_butterfly_factor)) != 0:
raise ValueError(
f"Invalid combination of convolutional kernel dimension ({conv_filter_dim}), boft_n_butterfly_factor ({boft_n_butterfly_factor + 1}) and boft_block_size ({boft_block_size})!"
)
boft_block_num = int(conv_filter_dim // boft_block_size)
else:
raise ValueError(
"Something went wrong, please report this error: https://github.com/huggingface/peft/issues"
)
# In OFT you can specify the number of blocks to be 1
if boft_n_butterfly_factor != 0:
if boft_block_num % 2 != 0:
raise ValueError(f"boft_block_num ({boft_block_num}) must be an even number!")
if boft_block_size % 2 != 0:
raise ValueError(f"boft_block_size ({boft_block_size}) must be an even number!")
# If there is no butterfly factor, then permutation matrix P will be an identity matrix.
P = torch.empty((boft_n_butterfly_factor + 1, conv_filter_dim, conv_filter_dim))
for i in range(boft_n_butterfly_factor + 1):
perm = self.block_butterfly_perm(
conv_filter_dim, int(boft_block_num / (2 ** (i))), int(boft_block_size / 2), boft_n_butterfly_factor
)
perm_mat = self.perm2mat(perm)
P[i] = perm_mat
self.register_buffer("boft_P", P, persistent=False)
self.boft_R[adapter_name] = nn.Parameter(
torch.zeros(boft_n_butterfly_factor + 1, boft_block_num, boft_block_size, boft_block_size)
)
self.boft_s[adapter_name] = nn.Parameter(torch.ones(1, int(self.out_features)))
self.reset_boft_parameters(adapter_name, init_weights)
# set the boft block size and number
self.boft_block_size[adapter_name] = boft_block_size
self.boft_block_num[adapter_name] = boft_block_num
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.boft_R.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weight = base_layer.weight.data.clone()
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
orig_weight = orig_weight.view(
self.out_features, self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0]
)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = torch.mm(butterfly_oft_mat, orig_weight)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = orig_weight * boft_s
orig_weight = orig_weight.view(
self.out_features, self.in_features, base_layer.kernel_size[0], base_layer.kernel_size[0]
)
self.base_layer.weight.data = orig_weight.contiguous()
else:
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
orig_weight = base_layer.weight.data.clone()
orig_weight = orig_weight.view(
self.out_features, self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0]
)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = torch.mm(butterfly_oft_mat, orig_weight)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = orig_weight * boft_s
orig_weight = orig_weight.view(
self.out_features, self.in_features, base_layer.kernel_size[0], base_layer.kernel_size[0]
)
self.base_layer.weight.data = orig_weight.contiguous()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.boft_R.keys():
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
orig_weight = self.get_base_layer().weight.data.clone()
orig_weight = orig_weight.view(
self.out_features,
self.in_features * self.get_base_layer().kernel_size[0] * self.get_base_layer().kernel_size[0],
)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = torch.mm(butterfly_oft_mat.t(), orig_weight)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = orig_weight * (1 / boft_s)
orig_weight = orig_weight.view(
self.out_features,
self.in_features,
self.get_base_layer().kernel_size[0],
self.get_base_layer().kernel_size[0],
)
self.get_base_layer().weight.data = orig_weight
def get_delta_weight(self, adapter) -> tuple[torch.Tensor, torch.Tensor]:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
boft_R = self.boft_R[adapter]
boft_s = self.boft_s[adapter].transpose(0, 1)
N, D, H, _ = boft_R.shape
boft_R = boft_R.view(N * D, H, H)
orth_rotate_butterfly = self.cayley_batch(boft_R)
orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H)
if self.fbd_cuda_available:
block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly)
else:
orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0)
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
boft_P = self.boft_P.to(block_diagonal_butterfly.device)
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
butterfly_oft_mat = butterfly_oft_mat_batch[0]
for i in range(1, butterfly_oft_mat_batch.shape[0]):
butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat
return butterfly_oft_mat, boft_s
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
boft_rotation = torch.eye(
self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0],
device=x.device,
dtype=x.dtype,
)
boft_scale = torch.ones((int(self.out_features), 1), device=x.device, dtype=x.dtype)
for active_adapter in self.active_adapters:
if active_adapter not in self.boft_R.keys():
continue
boft_R = self.boft_R[active_adapter]
boft_s = self.boft_s[active_adapter].transpose(0, 1)
dropout = self.boft_dropout[active_adapter]
N, D, H, _ = boft_R.shape
boft_R = boft_R.view(N * D, H, H)
orth_rotate_butterfly = self.cayley_batch(boft_R)
orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H)
orth_rotate_butterfly = dropout(orth_rotate_butterfly)
if self.fbd_cuda_available:
block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly)
else:
orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0)
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
boft_P = self.boft_P.to(x)
block_diagonal_butterfly = block_diagonal_butterfly.to(x)
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
butterfly_oft_mat = butterfly_oft_mat_batch[0]
for i in range(1, butterfly_oft_mat_batch.shape[0]):
butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat
boft_rotation = butterfly_oft_mat @ boft_rotation
boft_scale = boft_s * boft_scale
x = x.to(self.base_layer.weight.data.dtype)
orig_weight = self.base_layer.weight.data
orig_weight = orig_weight.view(
self.out_features,
self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0],
)
orig_weight = torch.transpose(orig_weight, 0, 1)
rotated_weight = torch.mm(boft_rotation, orig_weight)
rotated_weight = torch.transpose(rotated_weight, 0, 1)
scaled_rotated_weight = rotated_weight * boft_scale
scaled_rotated_weight = scaled_rotated_weight.view(
self.out_features, self.in_features, self.base_layer.kernel_size[0], self.base_layer.kernel_size[0]
)
result = F.conv2d(
input=x,
weight=scaled_rotated_weight,
bias=self.base_layer.bias,
padding=self.base_layer.padding[0],
stride=self.base_layer.stride[0],
)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "boft." + rep
| peft/src/peft/tuners/boft/layer.py/0 | {
"file_path": "peft/src/peft/tuners/boft/layer.py",
"repo_id": "peft",
"token_count": 19780
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Any, Optional, Set, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from peft.tuners.lycoris_utils import LycorisLayer
class LoKrLayer(nn.Module, LycorisLayer):
# All names of layers that may contain adapter weights
adapter_layer_names = (
"lokr_w1",
"lokr_w1_a",
"lokr_w1_b",
"lokr_w2",
"lokr_w2_a",
"lokr_w2_b",
"lokr_t2",
)
# other_param_names is defined on parent class
def __init__(self, base_layer: nn.Module) -> None:
super().__init__()
LycorisLayer.__init__(self, base_layer)
# LoKr info
self.lokr_w1 = nn.ParameterDict({})
self.lokr_w1_a = nn.ParameterDict({})
self.lokr_w1_b = nn.ParameterDict({})
self.lokr_w2 = nn.ParameterDict({})
self.lokr_w2_a = nn.ParameterDict({})
self.lokr_w2_b = nn.ParameterDict({})
self.lokr_t2 = nn.ParameterDict({})
@property
def _available_adapters(self) -> Set[str]:
return {
*self.lokr_w1,
*self.lokr_w1_a,
*self.lokr_w1_b,
*self.lokr_w2,
*self.lokr_w2_a,
*self.lokr_w2_b,
*self.lokr_t2,
}
def create_adapter_parameters(
self,
adapter_name: str,
r: int,
shape,
use_w1: bool,
use_w2: bool,
use_effective_conv2d: bool,
):
if use_w1:
self.lokr_w1[adapter_name] = nn.Parameter(torch.empty(shape[0][0], shape[1][0]))
else:
self.lokr_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0][0], r))
self.lokr_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][0]))
if len(shape) == 4:
# Conv2d
if use_w2:
self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1], *shape[2:]))
elif use_effective_conv2d:
self.lokr_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0][1])) # b, 1-mode
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1])) # d, 2-mode
else:
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1] * shape[2] * shape[3]))
else:
# Linear
if use_w2:
self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1]))
else:
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1]))
def reset_adapter_parameters(self, adapter_name: str):
if adapter_name in self.lokr_w1:
nn.init.zeros_(self.lokr_w1[adapter_name])
else:
nn.init.zeros_(self.lokr_w1_a[adapter_name])
nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_w2:
nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_t2:
nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
def reset_adapter_parameters_random(self, adapter_name: str):
if adapter_name in self.lokr_w1:
nn.init.kaiming_uniform_(self.lokr_w1[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w1_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_w2:
nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_t2:
nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
# Initializes weight matrices similar to the way initialized in the LyCORIS repository.
def reset_adapter_parameters_lycoris_way(self, adapter_name):
if adapter_name in self.lokr_w1:
nn.init.kaiming_uniform_(self.lokr_w1[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w1_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_w2:
nn.init.zeros_(self.lokr_w2[adapter_name])
else:
nn.init.zeros_(self.lokr_w2_b[adapter_name])
nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_t2:
nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
def update_layer(
self,
adapter_name: str,
r: int,
alpha: float,
rank_dropout: float,
module_dropout: float,
init_weights: bool,
use_effective_conv2d: bool,
decompose_both: bool,
decompose_factor: int,
**kwargs,
) -> None:
"""Internal function to create lokr adapter
Args:
adapter_name (`str`): Name for the adapter to add.
r (`int`): Rank for the added adapter.
alpha (`float`): Alpha for the added adapter.
rank_dropout (`float`): The dropout probability for rank dimension during training
module_dropout (`float`): The dropout probability for disabling adapter during training.
init_weights (`bool`): Whether to initialize adapter weights.
use_effective_conv2d (`bool`): Use parameter effective decomposition for Conv2d with ksize > 1.
decompose_both (`bool`): Perform rank decomposition of left kronecker product matrix.
decompose_factor (`int`): Kronecker product decomposition factor.
"""
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.r[adapter_name] = r
self.alpha[adapter_name] = alpha
self.scaling[adapter_name] = alpha / r
self.rank_dropout[adapter_name] = rank_dropout
self.module_dropout[adapter_name] = module_dropout
self.rank_dropout_scale[adapter_name] = kwargs["rank_dropout_scale"]
base_layer = self.get_base_layer()
# Determine shape of LoKr weights
if isinstance(base_layer, nn.Linear):
in_dim, out_dim = base_layer.in_features, base_layer.out_features
in_m, in_n = factorization(in_dim, decompose_factor)
out_l, out_k = factorization(out_dim, decompose_factor)
shape = ((out_l, out_k), (in_m, in_n)) # ((a, b), (c, d)), out_dim = a*c, in_dim = b*d
use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
use_w2 = not (r < max(shape[0][1], shape[1][1]) / 2)
use_effective_conv2d = False
elif isinstance(base_layer, nn.Conv2d):
in_dim, out_dim = base_layer.in_channels, base_layer.out_channels
k_size = base_layer.kernel_size
in_m, in_n = factorization(in_dim, decompose_factor)
out_l, out_k = factorization(out_dim, decompose_factor)
shape = ((out_l, out_k), (in_m, in_n), *k_size) # ((a, b), (c, d), *k_size)
use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
use_w2 = r >= max(shape[0][1], shape[1][1]) / 2
use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1)
else:
raise TypeError(f"LoKr is not implemented for base layers of type {type(base_layer).__name__}")
# Create weights with provided shape
self.create_adapter_parameters(adapter_name, r, shape, use_w1, use_w2, use_effective_conv2d)
# Initialize weights
if init_weights:
if init_weights == "lycoris":
self.reset_adapter_parameters_lycoris_way(adapter_name)
else:
self.reset_adapter_parameters(adapter_name)
else:
self.reset_adapter_parameters_random(adapter_name)
# Move new weights to device
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
# https://github.com/KohakuBlueleaf/LyCORIS/blob/e4259b870d3354a9615a96be61cb5d07455c58ea/lycoris/modules/lokr.py#L224
if adapter_name in self.lokr_w1:
w1 = self.lokr_w1[adapter_name]
else:
w1 = self.lokr_w1_a[adapter_name] @ self.lokr_w1_b[adapter_name]
if adapter_name in self.lokr_w2:
w2 = self.lokr_w2[adapter_name]
elif adapter_name in self.lokr_t2:
w2 = make_weight_cp(self.lokr_t2[adapter_name], self.lokr_w2_a[adapter_name], self.lokr_w2_b[adapter_name])
else:
w2 = self.lokr_w2_a[adapter_name] @ self.lokr_w2_b[adapter_name]
# Make weights with Kronecker product
weight = make_kron(w1, w2, self.scaling[adapter_name])
weight = weight.reshape(self.get_base_layer().weight.shape)
# Perform rank dropout during training - drop rows of addition weights
rank_dropout = self.rank_dropout[adapter_name]
if self.training and rank_dropout:
drop = (torch.rand(weight.size(0)) > rank_dropout).float()
drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device)
if self.rank_dropout_scale[adapter_name]:
drop /= drop.mean()
weight *= drop
return weight
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
# Execute all the adapters
for active_adapter in self.active_adapters:
if active_adapter not in self._available_adapters:
continue
module_dropout = self.module_dropout[active_adapter]
# Modify current execution weights
if (not self.training) or (self.training and torch.rand(1) > module_dropout):
result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs)
result = result.to(previous_dtype)
return result
class Linear(LoKrLayer):
"""LoKr implemented in Linear layer"""
def __init__(
self,
base_layer: nn.Module,
device: Optional[Union[str, torch.device]] = None,
dtype: Optional[torch.dtype] = None,
adapter_name: str = "default",
r: int = 0,
alpha: float = 0.0,
rank_dropout: float = 0.0,
module_dropout: float = 0.0,
init_weights: bool = True,
**kwargs,
):
super().__init__(base_layer)
# Create adapter and set it active
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs)
def _get_delta_activations(
self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
) -> torch.Tensor:
delta_weight = self.get_delta_weight(adapter_name)
# don't add bias here, because the bias is already included in the output of the base_layer
return F.linear(input, delta_weight)
def __repr__(self) -> str:
rep = super().__repr__()
return "lokr." + rep
class Conv2d(LoKrLayer):
"""LoKr implemented in Conv2d layer"""
def __init__(
self,
base_layer: nn.Module,
device: Optional[Union[str, torch.device]] = None,
dtype: Optional[torch.dtype] = None,
adapter_name: str = "default",
r: int = 0,
alpha: float = 0.0,
rank_dropout: float = 0.0,
module_dropout: float = 0.0,
use_effective_conv2d: bool = False,
init_weights: bool = True,
**kwargs,
):
super().__init__(base_layer)
# Create adapter and set it active
self._active_adapter = adapter_name
self.update_layer(
adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs
)
def _get_delta_activations(
self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
) -> torch.Tensor:
delta_weight = self.get_delta_weight(adapter_name)
# don't add bias here, because the bias is already included in the output of the base_layer
base_layer = self.get_base_layer()
return F.conv2d(
input,
delta_weight,
stride=base_layer.stride,
padding=base_layer.padding,
dilation=base_layer.dilation,
groups=base_layer.groups,
)
def __repr__(self) -> str:
rep = super().__repr__()
return "lokr." + rep
# Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py#L11
def factorization(dimension: int, factor: int = -1) -> Tuple[int, int]:
"""Factorizes the provided number into the product of two numbers
Args:
dimension (`int`): The number that needs to be factorized.
factor (`int`, optional):
Factorization divider. The algorithm will try to output two numbers, one of each will be as close to the
factor as possible. If -1 is provided, the decomposition algorithm would try to search dividers near the
square root of the dimension. Defaults to -1.
Returns:
Tuple[`int`, `int`]: A tuple of two numbers, whose product is equal to the provided number. The first number is
always less than or equal to the second.
Example:
```py
>>> factorization(256, factor=-1)
(16, 16)
>>> factorization(128, factor=-1)
(8, 16)
>>> factorization(127, factor=-1)
(1, 127)
>>> factorization(128, factor=4)
(4, 32)
```
"""
if factor > 0 and (dimension % factor) == 0:
m = factor
n = dimension // factor
return m, n
if factor == -1:
factor = dimension
m, n = 1, dimension
length = m + n
while m < n:
new_m = m + 1
while dimension % new_m != 0:
new_m += 1
new_n = dimension // new_m
if new_m + new_n > length or new_m > factor:
break
else:
m, n = new_m, new_n
if m > n:
n, m = m, n
return m, n
def make_weight_cp(t, wa, wb):
rebuild2 = torch.einsum("i j k l, i p, j r -> p r k l", t, wa, wb) # [c, d, k1, k2]
return rebuild2
def make_kron(w1, w2, scale=1.0):
if len(w2.shape) == 4:
w1 = w1.unsqueeze(2).unsqueeze(2)
w2 = w2.contiguous()
rebuild = torch.kron(w1, w2)
return rebuild * scale
| peft/src/peft/tuners/lokr/layer.py/0 | {
"file_path": "peft/src/peft/tuners/lokr/layer.py",
"repo_id": "peft",
"token_count": 7994
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import importlib
import math
import warnings
from typing import Any, Optional, Union
import torch
import torch.nn as nn
import torch.nn.init as init
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils import transpose
from peft.utils.integrations import gather_params_ctx
from .layer import LoraLayer
class LoraParallelLinear(nn.Module, LoraLayer):
"""
When the target layer parallel_linear is RowParallelLinear, in order to keep the input and output shapes
consistent, we need to split the lora matrix A into rows, and the lora_B at this time should be a complete linear
layer; In the same way, when the target layer is ColumnParallelLinear, we perform column segmentation on lora_B,
while lora_A is still a complete linear layer.
"""
def __init__(
self,
base_layer,
adapter_name: str,
backend,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
fan_in_fan_out: bool = False,
is_target_conv_1d_layer: bool = False,
init_lora_weights: Union[bool, str] = True,
use_rslora: bool = False,
use_dora: bool = False,
lora_bias: bool = False,
**kwargs,
):
if lora_bias:
raise ValueError(f"{self.__class__.__name__} does not support lora_bias yet, set it to False")
super().__init__()
LoraLayer.__init__(self, base_layer=base_layer, **kwargs)
if use_dora:
raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False")
self.backend = backend
self.is_parallel_a = isinstance(base_layer, backend.RowParallelLinear)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
megatron_config = kwargs["megatron_config"]
parallel_linear_kwargs = {"megatron_config": megatron_config}
init_method = init.xavier_normal_
if hasattr(megatron_config, "init_method"):
init_method = megatron_config.init_method
input_is_parallel = True
gather_output = False
if isinstance(base_layer, self.backend.RowParallelLinear):
input_is_parallel = base_layer.input_is_parallel
else:
gather_output = base_layer.gather_output
self.update_layer(
adapter_name,
r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
init_lora_weights=init_lora_weights,
use_rslora=use_rslora,
use_dora=use_dora,
init_method=init_method,
input_is_parallel=input_is_parallel,
gather_output=gather_output,
**parallel_linear_kwargs,
)
if is_target_conv_1d_layer:
raise ValueError(
f"{self.__class__.__name__} does not support target_conv_1d_layer yet, please set it to False"
)
self.is_target_conv_1d_layer = False
def update_layer(
self,
adapter_name,
r,
lora_alpha,
lora_dropout,
init_lora_weights,
use_rslora,
use_dora=False,
init_method=init.xavier_normal_,
input_is_parallel=True,
gather_output=False,
**parallel_linear_kwargs,
):
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout[adapter_name] = lora_dropout_layer
megatron_config = parallel_linear_kwargs["megatron_config"]
# lora needs to be forced to upgrade to 32-bit precision, otherwise it will overflow
megatron_config.params_dtype = torch.float32
if self.is_parallel_a:
lora_a = self.backend.RowParallelLinear(
input_size=self.in_features,
output_size=r,
bias=False,
input_is_parallel=input_is_parallel,
skip_bias_add=True,
init_method=init_method,
config=megatron_config,
)
lora_b = nn.Linear(in_features=r, out_features=self.out_features, bias=False, dtype=torch.float32)
else:
lora_a = nn.Linear(in_features=self.in_features, out_features=r, bias=False, dtype=torch.float32)
lora_b = self.backend.ColumnParallelLinear(
input_size=r,
output_size=self.out_features,
bias=False,
gather_output=gather_output,
init_method=init_method,
config=megatron_config,
)
self.lora_A[adapter_name] = lora_a
self.lora_B[adapter_name] = lora_b
if use_rslora:
self.scaling[adapter_name] = lora_alpha / math.sqrt(r)
else:
self.scaling[adapter_name] = lora_alpha / r
# for inits that require access to the base weight, use gather_param_ctx so that the weight is gathered when using DeepSpeed
if isinstance(init_lora_weights, str) and init_lora_weights.startswith("pissa"):
with gather_params_ctx(self.get_base_layer().weight):
self.pissa_init(adapter_name, init_lora_weights)
elif isinstance(init_lora_weights, str) and init_lora_weights.startswith("corda"):
with gather_params_ctx(self.get_base_layer().weight):
self.corda_init(adapter_name, init_lora_weights)
elif isinstance(init_lora_weights, str) and init_lora_weights.lower() == "olora":
with gather_params_ctx(self.get_base_layer().weight):
self.olora_init(adapter_name)
elif init_lora_weights == "loftq":
with gather_params_ctx(self.get_base_layer().weight):
self.loftq_init(adapter_name)
elif init_lora_weights:
self.reset_lora_parameters(adapter_name, init_lora_weights)
# call this before dora_init
self._move_adapter_to_device_of_base_layer(adapter_name)
if use_dora:
self.dora_init(adapter_name)
self.use_dora[adapter_name] = True
else:
self.use_dora[adapter_name] = False
self.set_adapter(self.active_adapters)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any):
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop("adapter_names", None)
# If weight is used for matrix multiplication here, the final aggregation operation of the original
# parallel_linear layer will be missing, so we need to directly call its forward function to obtain the
# output of the original parallel_linear layer.
if self.disable_adapters:
if self.merged:
self.unmerge()
result, bias = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
raise ValueError(f"{self.__class__.__name__} does not support mixed_batch_forward yet.")
elif self.merged:
result, bias = self.base_layer(x, *args, **kwargs)
else:
result, bias = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
x = self._cast_input_dtype(x, lora_A.weight.dtype)
if not self.use_dora[active_adapter]:
result = result + lora_B(lora_A(dropout(x))) * scaling
else:
if isinstance(dropout, torch.nn.Identity) or not self.training:
base_result = result
else:
x = dropout(x)
base_result = None
result = result + self.lora_magnitude_vector[active_adapter](
x,
lora_A=lora_A,
lora_B=lora_B,
scaling=scaling,
base_layer=self.get_base_layer(),
base_result=base_result,
)
result = result.to(torch_result_dtype)
return result, bias
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.lora_A.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
orig_weights = orig_weights + delta_weight
else:
# handle dora
# since delta_weight already includes scaling, set it to 1 here
weight_norm = (
self.lora_magnitude_vector[active_adapter]
.get_weight_norm(orig_weights, transpose(delta_weight, self.fan_in_fan_out), scaling=1)
.detach()
)
# We need to cache weight_norm because it has to be based on the original weights. We
# cannot calculate it on the fly based on the merged weights when unmerging because its a
# different value
self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
dora_factor = transpose(dora_factor.view(-1, 1), self.fan_in_fan_out)
orig_weights = dora_factor * (orig_weights + delta_weight)
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
base_layer.weight.data = base_layer.weight.data + delta_weight
else:
# handle dora
# since delta_weight already includes scaling, set it to 1 here
weight_norm = (
self.lora_magnitude_vector[active_adapter]
.get_weight_norm(
base_layer.weight, transpose(delta_weight, self.fan_in_fan_out), scaling=1
)
.detach()
)
# We need to cache weight_norm because it has to be based on the original weights. We
# cannot calculate it on the fly based on the merged weights when unmerging because its a
# different value
self._cache_store(f"{active_adapter}-weight_norm", weight_norm)
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
dora_factor = transpose(dora_factor.view(-1, 1), self.fan_in_fan_out)
new_weight = dora_factor * (base_layer.weight.data + delta_weight)
base_layer.weight.data = new_weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.lora_A.keys():
weight = self.get_base_layer().weight
delta_weight = self.get_delta_weight(active_adapter)
if not self.use_dora[active_adapter]:
weight.data -= delta_weight
else:
weight_norm = self._cache_pop(f"{active_adapter}-weight_norm")
dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm
weight_orig = weight.data / dora_factor.view(-1, 1) - delta_weight
weight.data = weight_orig
def get_delta_weight(self, adapter) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
device = self.lora_B[adapter].weight.device
dtype = self.lora_B[adapter].weight.dtype
# In case users wants to merge the adapter weights that are in
# (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
# (b)float16 because some CPUs have slow bf16/fp16 matmuls.
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
weight_A = self.lora_A[adapter].weight
weight_B = self.lora_B[adapter].weight
if cast_to_fp32:
weight_A = weight_A.float()
weight_B = weight_B.float()
output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter]
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
# cast back the weights
self.lora_A[adapter].weight.data = weight_A.to(dtype)
self.lora_B[adapter].weight.data = weight_B.to(dtype)
return output_tensor
def __repr__(self) -> str:
rep = super().__repr__()
return "lora." + rep
def dispatch_megatron(
target: torch.nn.Module,
adapter_name: str,
lora_config,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if lora_config.megatron_config:
megatron_core = importlib.import_module(lora_config.megatron_core)
else:
megatron_core = None
if megatron_core and isinstance(
target_base_layer,
(megatron_core.tensor_parallel.ColumnParallelLinear, megatron_core.tensor_parallel.RowParallelLinear),
):
megatron_kwargs = kwargs.copy()
megatron_config = lora_config.megatron_config
if isinstance(megatron_config, dict):
transformer_config_class = megatron_core.transformer.transformer_config.TransformerConfig
megatron_config = transformer_config_class(**lora_config.megatron_config)
megatron_kwargs["megatron_config"] = megatron_config
if megatron_kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `ColumnParallelLinear` "
"or `RowParallelLinear`. "
"Setting fan_in_fan_out to False."
)
megatron_kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
new_module = LoraParallelLinear(
base_layer=target, adapter_name=adapter_name, backend=megatron_core.tensor_parallel, **megatron_kwargs
)
return new_module
| peft/src/peft/tuners/lora/tp_layer.py/0 | {
"file_path": "peft/src/peft/tuners/lora/tp_layer.py",
"repo_id": "peft",
"token_count": 8506
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Any
import torch
import torch.nn as nn
from peft.tuners.tuners_utils import BaseTunerLayer
from .config import PolyConfig
from .router import get_router
class PolyLayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = ("poly_lora_A", "poly_lora_B", "poly_router")
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("r", "n_tasks", "n_skills", "n_splits")
def __init__(self, base_layer: nn.Module, **kwargs):
self.base_layer = base_layer
self.r = {}
self.n_tasks = {}
self.n_skills = {}
self.n_splits = {}
self.poly_type = {}
self.poly_router = nn.ModuleDict()
self.poly_lora_A = nn.ParameterDict()
self.poly_lora_B = nn.ParameterDict()
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
in_features, out_features = base_layer.in_features, base_layer.out_features
else:
raise ValueError(f"Unsupported layer type {type(base_layer)}")
self.in_features = in_features
self.out_features = out_features
def update_layer(self, adapter_name, poly_config):
if poly_config.r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {poly_config.r}")
self.r[adapter_name] = poly_config.r
self.n_tasks[adapter_name] = poly_config.n_tasks
self.n_skills[adapter_name] = poly_config.n_skills
self.n_splits[adapter_name] = poly_config.n_splits
self.poly_type[adapter_name] = poly_config.poly_type
self.poly_lora_A[adapter_name] = nn.Parameter(
torch.empty(
poly_config.n_splits,
poly_config.n_skills,
self.in_features // poly_config.n_splits,
poly_config.r,
)
)
self.poly_lora_B[adapter_name] = nn.Parameter(
torch.empty(
poly_config.n_splits,
poly_config.n_skills,
poly_config.r,
self.out_features // poly_config.n_splits,
)
)
self.poly_router[adapter_name] = get_router(poly_config)
self.reset_poly_parameters(adapter_name, init_weights=poly_config.init_weights)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_poly_parameters(self, adapter_name, init_weights):
if adapter_name in self.poly_lora_A.keys():
# initialize A the same way as the default for nn.Linear
# https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L269
n_splits, n_skills, d, r = self.poly_lora_A[adapter_name].shape
for skill in range(n_skills):
for split in range(n_splits):
param = torch.empty((r, d))
torch.nn.init.kaiming_uniform_(param, a=math.sqrt(5))
self.poly_lora_A[adapter_name].data[split, skill, :, :] = param.T
if init_weights:
# initialize B to zero
torch.nn.init.zeros_(self.poly_lora_B[adapter_name])
else:
# initialize B the same way as the default for nn.Linear
n_splits, n_skills, r, d = self.poly_lora_B[adapter_name].shape
for skill in range(n_skills):
for split in range(n_splits):
param = torch.empty((d, r))
torch.nn.init.kaiming_uniform_(param, a=math.sqrt(5))
self.poly_lora_B[adapter_name].data[split, skill, :, :] = param.T
# initialized router
self.poly_router[adapter_name].reset()
class Linear(nn.Module, PolyLayer):
# Lora implemented in a dense layer
def __init__(
self,
base_layer,
adapter_name: str,
poly_config: PolyConfig,
**kwargs,
) -> None:
super().__init__()
PolyLayer.__init__(self, base_layer, **kwargs)
self._active_adapter = adapter_name
self.update_layer(adapter_name, poly_config)
def forward(self, x: torch.Tensor, *args: Any, task_ids: torch.Tensor = None, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.poly_lora_A.keys():
continue
r = self.r[active_adapter]
poly_router = self.poly_router[active_adapter]
poly_lora_A = self.poly_lora_A[active_adapter]
poly_lora_B = self.poly_lora_B[active_adapter]
# Combine the output of LoRAs
# https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L293
mixing_weights = poly_router(task_ids=task_ids, input_ids=x)
bs, n_splits, n_skills = mixing_weights.size()
# A is n_splits, n_skills, D // n_splits, rank
# we want bs, n_splits, D // n_splits, rank
A = torch.einsum("bqs,qsdr->bqdr", (mixing_weights, poly_lora_A))
B = torch.einsum("bqs,qsrd->bqrd", (mixing_weights, poly_lora_B))
A = A.reshape(bs, self.in_features, r)
B = B.transpose(1, 2).reshape(bs, r, self.out_features)
x = x.to(A.dtype)
result += x.bmm(A).bmm(B) / r
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "poly." + rep
| peft/src/peft/tuners/poly/layer.py/0 | {
"file_path": "peft/src/peft/tuners/poly/layer.py",
"repo_id": "peft",
"token_count": 3184
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class VeraConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`VeraModel`].
Paper: https://arxiv.org/abs/2310.11454.
Args:
r (`int`, *optional*, defaults to `256`):
VeRA parameter dimension ("rank"). Choose higher values than LoRA ranks here, since VeRA uses far fewer
parameters than LoRA (see Table 1).
target_modules (`Union[List[str], str]`):
The names of the modules to apply Vera to. Only linear layers are supported.
projection_prng_key (`int`):
Vera PRNG init key. Used for initialising vera_A and vera_B for new models or when loading a checkpoint
that did not include these projections. Defaults to `0`.
save_projection (`bool`):
Whether to save the vera_A / vera_B projections in the state dict alongside per layer lambda_b / lambda_d
weights. This will increase the size of the checkpoint, but guarantee that we can reload the checkpoint on
all system configurations. Defaults to `True`.
vera_dropout (`float`):
The dropout probability for Vera layers.
d_initial (`float`, *optional*, defaults to `0.1`):
Initial init value for `vera_lambda_d` vector used when initializing the VeRA parameters. Small values
(<=0.1) are recommended (see Table 6c in the paper).
fan_in_fan_out (`bool`):
Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses
`Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.
bias (`str`):
Bias type for Vera. Can be 'none', 'all' or 'vera_only'. If 'all' or 'vera_only', the corresponding biases
will be updated during training. Be aware that this means that, even when disabling the adapters, the model
will not produce the same output as the base model would have without adaptation.
modules_to_save (`List[str]`):
List of modules apart from Vera layers to be set as trainable and saved in the final checkpoint.
init_weights (`bool`):
Whether to initialize the weights of the Vera layers with their default initialization. Don't change this
setting, except if you know exactly what you're doing.
layers_to_transform (`Union[List[int],int]`):
The layer indexes to transform, if this argument is specified, it will apply the Vera transformations on
the layer indexes that are specified in this list. If a single integer is passed, it will apply the Vera
transformations on the layer at this index.
layers_pattern (`Optional[Union[List[str], str]]`):
The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the
`nn.ModuleList` of the model, which is often called `'layers'` or `'h'`.
"""
r: int = field(default=256, metadata={"help": "Vera attention dimension"})
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or regex expression of the module names to replace with Vera."
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. "
"Only linear layers are supported."
)
},
)
projection_prng_key: int = field(
default=0,
metadata={
"help": (
"Vera PRNG init key. Used for initialising vera_A and vera_B for new models or when loading a "
"checkpoint that did not include these projections."
)
},
)
save_projection: bool = field(
default=True,
metadata={
"help": (
"Whether to save the vera_A / vera_B projections in the state dict alongside per layer lambda_b / "
"lambda_d weights. This will increase the size of the checkpoint, but guarantee that we can reload "
"the checkpoint on all system configurations."
)
},
)
vera_dropout: float = field(default=0.0, metadata={"help": "Vera dropout"})
d_initial: float = field(default=0.1, metadata={"help": "Initial init value for d vector."})
fan_in_fan_out: bool = field(
default=False,
metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
)
bias: str = field(default="none", metadata={"help": "Bias type for Vera. Can be 'none', 'all' or 'vera_only'"})
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": (
"List of modules apart from Vera layers to be set as trainable and saved in the final checkpoint. For"
" example, in Sequence Classification or Token Classification tasks, the final layer"
" `classifier/score` are randomly initialized and as such need to be trainable and saved."
)
},
)
init_weights: bool = field(
default=True,
metadata={
"help": (
"Whether to initialize the weights of the Vera layers with their default initialization. Don't change "
"this setting, except if you know exactly what you're doing."
),
},
)
layers_to_transform: Optional[Union[list[int], int]] = field(
default=None,
metadata={
"help": (
"The layer indexes to transform, is this argument is specified, PEFT will transform only the layers"
" indexes that are specified inside this list. If a single integer is passed, PEFT will transform only"
" the layer at this index."
)
},
)
layers_pattern: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"The layer pattern name, used only if `layers_to_transform` is different to None and if the layer "
"pattern is not in the common layers pattern. This should target the `nn.ModuleList` of the "
"model, which is often called `'layers'` or `'h'`."
)
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.VERA
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
# check for layers_to_transform and layers_pattern
if self.layers_pattern and not self.layers_to_transform:
raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ")
if not self.save_projection:
warnings.warn(
"Specified to not save vera_A and vera_B within the state dictionary, instead they will be restored "
"using the PRNG key store in `config.projection_prng_key`. Consider setting `config.save_projection` "
"to `True` to guarantee restoring the checkpoint correctly on all system configurations."
)
| peft/src/peft/tuners/vera/config.py/0 | {
"file_path": "peft/src/peft/tuners/vera/config.py",
"repo_id": "peft",
"token_count": 3107
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from typing import Optional
class PeftType(str, enum.Enum):
"""
Enum class for the different types of adapters in PEFT.
Supported PEFT types:
- PROMPT_TUNING
- MULTITASK_PROMPT_TUNING
- P_TUNING
- PREFIX_TUNING
- LORA
- ADALORA
- BOFT
- ADAPTION_PROMPT
- IA3
- LOHA
- LOKR
- OFT
- XLORA
- POLY
- LN_TUNING
- VERA
- FOURIERFT
- HRA
- BONE
"""
PROMPT_TUNING = "PROMPT_TUNING"
MULTITASK_PROMPT_TUNING = "MULTITASK_PROMPT_TUNING"
P_TUNING = "P_TUNING"
PREFIX_TUNING = "PREFIX_TUNING"
LORA = "LORA"
ADALORA = "ADALORA"
BOFT = "BOFT"
ADAPTION_PROMPT = "ADAPTION_PROMPT"
IA3 = "IA3"
LOHA = "LOHA"
LOKR = "LOKR"
OFT = "OFT"
POLY = "POLY"
LN_TUNING = "LN_TUNING"
VERA = "VERA"
FOURIERFT = "FOURIERFT"
XLORA = "XLORA"
HRA = "HRA"
VBLORA = "VBLORA"
CPT = "CPT"
BONE = "BONE"
class TaskType(str, enum.Enum):
"""
Enum class for the different types of tasks supported by PEFT.
Overview of the supported task types:
- SEQ_CLS: Text classification.
- SEQ_2_SEQ_LM: Sequence-to-sequence language modeling.
- CAUSAL_LM: Causal language modeling.
- TOKEN_CLS: Token classification.
- QUESTION_ANS: Question answering.
- FEATURE_EXTRACTION: Feature extraction. Provides the hidden states which can be used as embeddings or features
for downstream tasks.
"""
SEQ_CLS = "SEQ_CLS"
SEQ_2_SEQ_LM = "SEQ_2_SEQ_LM"
CAUSAL_LM = "CAUSAL_LM"
TOKEN_CLS = "TOKEN_CLS"
QUESTION_ANS = "QUESTION_ANS"
FEATURE_EXTRACTION = "FEATURE_EXTRACTION"
def register_peft_method(
*, name: str, config_cls, model_cls, prefix: Optional[str] = None, is_mixed_compatible=False
) -> None:
"""
Function to register a finetuning method like LoRA to be available in PEFT.
This method takes care of registering the PEFT method's configuration class, the model class, and optionally the
prefix.
Args:
name (str):
The name of the PEFT method. It must be unique.
config_cls:
The configuration class of the PEFT method.
model_cls:
The model class of the PEFT method.
prefix (Optional[str], optional):
The prefix of the PEFT method. It should be unique. If not provided, the name of the PEFT method is used as
the prefix.
is_mixed_compatible (bool, optional):
Whether the PEFT method is compatible with `PeftMixedModel`. If you're not sure, leave it as False
(default).
Example:
```py
# inside of peft/tuners/my_peft_method/__init__.py
from peft.utils import register_peft_method
register_peft_method(name="my_peft_method", config_cls=MyConfig, model_cls=MyModel)
```
"""
from peft.mapping import (
PEFT_TYPE_TO_CONFIG_MAPPING,
PEFT_TYPE_TO_MIXED_MODEL_MAPPING,
PEFT_TYPE_TO_PREFIX_MAPPING,
PEFT_TYPE_TO_TUNER_MAPPING,
)
if name.endswith("_"):
raise ValueError(f"Please pass the name of the PEFT method without '_' suffix, got {name}.")
if not name.islower():
raise ValueError(f"The name of the PEFT method should be in lower case letters, got {name}.")
if name.upper() not in list(PeftType):
raise ValueError(f"Unknown PEFT type {name.upper()}, please add an entry to peft.utils.peft_types.PeftType.")
peft_type = getattr(PeftType, name.upper())
# model_cls can be None for prompt learning methods, which don't have dedicated model classes
if prefix is None:
prefix = name + "_"
if (
(peft_type in PEFT_TYPE_TO_CONFIG_MAPPING)
or (peft_type in PEFT_TYPE_TO_TUNER_MAPPING)
or (peft_type in PEFT_TYPE_TO_MIXED_MODEL_MAPPING)
):
raise KeyError(f"There is already PEFT method called '{name}', please choose a unique name.")
if prefix in PEFT_TYPE_TO_PREFIX_MAPPING:
raise KeyError(f"There is already a prefix called '{prefix}', please choose a unique prefix.")
model_cls_prefix = getattr(model_cls, "prefix", None)
if (model_cls_prefix is not None) and (model_cls_prefix != prefix):
raise ValueError(
f"Inconsistent prefixes found: '{prefix}' and '{model_cls_prefix}' (they should be the same)."
)
PEFT_TYPE_TO_PREFIX_MAPPING[peft_type] = prefix
PEFT_TYPE_TO_CONFIG_MAPPING[peft_type] = config_cls
PEFT_TYPE_TO_TUNER_MAPPING[peft_type] = model_cls
if is_mixed_compatible:
PEFT_TYPE_TO_MIXED_MODEL_MAPPING[peft_type] = model_cls
| peft/src/peft/utils/peft_types.py/0 | {
"file_path": "peft/src/peft/utils/peft_types.py",
"repo_id": "peft",
"token_count": 2234
} |
# Note: These tests were copied from test_common_gpu.py and test_gpu_examples.py as they can run on CPU too.
#
# Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import os
import tempfile
import unittest
import pytest
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
)
from peft import (
AdaLoraConfig,
LoraConfig,
PeftModel,
get_peft_model,
prepare_model_for_kbit_training,
)
from peft.utils import SAFETENSORS_WEIGHTS_NAME, infer_device
from .testing_utils import (
require_gptqmodel,
require_optimum,
require_torch_multi_gpu,
)
@require_gptqmodel
class PeftGPTQModelCommonTests(unittest.TestCase):
r"""
A common tester to run common operations that are performed on GPU/CPU such as generation, loading in 8bit, etc.
"""
def setUp(self):
self.causal_lm_model_id = "facebook/opt-350m"
self.device = infer_device()
def tearDown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
def test_lora_gptq_quantization_from_pretrained_safetensors(self):
r"""
Tests that the gptqmodel quantization using LoRA works as expected with safetensors weights.
"""
from transformers import GPTQConfig
model_id = "marcsun13/opt-350m-gptq-4bit"
quantization_config = GPTQConfig(bits=4, use_exllama=False)
kwargs = {
"pretrained_model_name_or_path": model_id,
"torch_dtype": torch.float16,
"device_map": "auto",
"quantization_config": quantization_config,
}
model = AutoModelForCausalLM.from_pretrained(**kwargs)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(task_type="CAUSAL_LM")
peft_model = get_peft_model(model, config)
peft_model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(peft_model.device))
with tempfile.TemporaryDirectory() as tmp_dir:
peft_model.save_pretrained(tmp_dir)
model = AutoModelForCausalLM.from_pretrained(**kwargs)
model = PeftModel.from_pretrained(model, tmp_dir)
model = prepare_model_for_kbit_training(model)
model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(peft_model.device))
# loading a 2nd adapter works, #1239
model.load_adapter(tmp_dir, "adapter2")
model.set_adapter("adapter2")
model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(peft_model.device))
# check that both adapters are in the same layer
assert "default" in model.base_model.model.model.decoder.layers[0].self_attn.q_proj.lora_A
assert "adapter2" in model.base_model.model.model.decoder.layers[0].self_attn.q_proj.lora_A
@require_gptqmodel
@require_optimum
class PeftGPTQModelTests(unittest.TestCase):
r"""
GPTQ + peft tests
"""
def setUp(self):
from transformers import GPTQConfig
self.causal_lm_model_id = "marcsun13/opt-350m-gptq-4bit"
self.quantization_config = GPTQConfig(bits=4, backend="auto_trainable")
self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
def tearDown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
torch.cuda.empty_cache()
def _check_inference_finite(self, model, batch):
# try inference without Trainer class
training = model.training
model.eval()
output = model(**batch.to(model.device))
assert torch.isfinite(output.logits).all()
model.train(training)
def test_causal_lm_training(self):
r"""
Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set
correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
device_map="auto",
quantization_config=self.quantization_config,
)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
def test_adalora_causalLM(self):
r"""
Tests the gptq training with adalora
"""
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
device_map="auto",
quantization_config=self.quantization_config,
)
tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
model = prepare_model_for_kbit_training(model)
peft_config = AdaLoraConfig(
init_r=6,
target_r=4,
tinit=50,
tfinal=100,
deltaT=5,
beta1=0.3,
beta2=0.3,
orth_reg_weight=0.2,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, peft_config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
batch = tokenizer(data["train"][:3]["quote"], return_tensors="pt", padding=True)
self._check_inference_finite(model, batch)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.multi_gpu_tests
@require_torch_multi_gpu
def test_causal_lm_training_multi_gpu(self):
r"""
Test the CausalLM training on a multi-GPU device. The test would simply fail if the adapters are not set
correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
device_map="auto",
quantization_config=self.quantization_config,
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
model = prepare_model_for_kbit_training(model)
setattr(model, "model_parallel", True)
setattr(model, "is_parallelizable", True)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("Abirate/english_quotes")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
def test_non_default_adapter_name(self):
# See issue 1346
config = LoraConfig(
r=16,
target_modules=["q_proj", "v_proj"],
task_type="CAUSAL_LM",
)
# default adapter name
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
device_map="auto",
quantization_config=self.quantization_config,
)
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, config)
n_trainable_default, n_total_default = model.get_nb_trainable_parameters()
# other adapter name
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
device_map="auto",
quantization_config=self.quantization_config,
)
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, config, adapter_name="other")
n_trainable_other, n_total_other = model.get_nb_trainable_parameters()
assert n_trainable_other > 0
# sanity check
assert n_trainable_default == n_trainable_other
assert n_total_default == n_total_other
| peft/tests/test_gptqmodel.py/0 | {
"file_path": "peft/tests/test_gptqmodel.py",
"repo_id": "peft",
"token_count": 6031
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The intent of the tests contained in this file is to check as many PEFT features as possible with torch.compile. This
# is thus a document on how well torch.compile is supported by PEFT. Currently, we know that certain features do not
# work with torch.compile. The corresponding tests should be marked with `@pytest.mark.xfail(strict=True)`.
#
# When adding a new test that fails with torch.compile, please make sure first that it does NOT fail without
# torch.compile.
import gc
import os
import pytest
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
DataCollatorForLanguageModeling,
Trainer,
TrainerCallback,
TrainingArguments,
)
from peft import (
AdaLoraConfig,
BOFTConfig,
BoneConfig,
HRAConfig,
IA3Config,
LNTuningConfig,
LoHaConfig,
LoKrConfig,
LoraConfig,
OFTConfig,
PeftModel,
TaskType,
VBLoRAConfig,
VeraConfig,
get_peft_model,
)
from .testing_utils import require_bitsandbytes
# only run (very slow) torch.compile tests when explicitly asked to
if os.environ.get("PEFT_DEBUG_WITH_TORCH_COMPILE") != "1":
pytest.skip(allow_module_level=True)
# Mapping: name of the setting -> (Peft config instance, torch.compile kwargs)
SETTINGS = {
"adalora": (AdaLoraConfig(task_type=TaskType.CAUSAL_LM, total_step=5), {}),
"boft": (BOFTConfig(task_type=TaskType.CAUSAL_LM), {}),
"dora": (LoraConfig(task_type=TaskType.CAUSAL_LM, use_dora=True), {}),
"ia3": (IA3Config(task_type=TaskType.CAUSAL_LM), {}),
"ln_tuning": (LNTuningConfig(task_type=TaskType.CAUSAL_LM, target_modules=["final_layer_norm"]), {}),
"loha": (LoHaConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"]), {}),
"lokr": pytest.param(
(LoKrConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"]), {}),
),
"lora": (LoraConfig(task_type=TaskType.CAUSAL_LM), {}),
"lora-target-embeddings": pytest.param(
(LoraConfig(task_type=TaskType.CAUSAL_LM, target_modules=["embed_tokens"]), {}),
),
"lora-with-modules-to-save": (LoraConfig(task_type=TaskType.CAUSAL_LM, modules_to_save=["embed_tokens"]), {}),
"oft": (OFTConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"]), {}),
"vblora": (VBLoRAConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"], vector_length=2), {}),
"vera": (VeraConfig(task_type=TaskType.CAUSAL_LM), {}),
"hra": (HRAConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"]), {}),
"bone": (BoneConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"], r=2), {}),
"bone-bat": (
BoneConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"], r=2, init_weights="bat"),
{},
),
}
@pytest.mark.single_gpu_tests
class TestTorchCompileCausalLM:
"""
Tests for using torch.compile with causal LM.
Tip: When adding a new test, set `fake_compile = True` below. With this setting, torch.compile is being skipped.
This is useful for two reasons:
- compile is slow, so to quickly iterate on the test, it's best to disable it and only enable it at the very end
- even if you expect the test to fail with compile, as compile does not work with every PEFT feature, it still MUST
succeed without compile, otherwise the test is incorrect.
Before creating the PR, disable `fake_compile`.
"""
fake_compile = False
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
max_train_loss = 15.0 # generous threshold for maximum loss after training
@pytest.fixture(autouse=True)
def teardown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
@pytest.fixture(scope="class")
def tokenizer(self):
return AutoTokenizer.from_pretrained(self.model_id)
@pytest.fixture(scope="class")
def data(self, tokenizer):
def tokenize(samples):
# For some reason, the max sequence length is not honored by the tokenizer, resulting in IndexErrors. Thus,
# manually ensure that sequences are not too long.
tokenized = tokenizer(samples["quote"])
tokenized["input_ids"] = [input_ids[: tokenizer.model_max_length] for input_ids in tokenized["input_ids"]]
tokenized["attention_mask"] = [
input_ids[: tokenizer.model_max_length] for input_ids in tokenized["attention_mask"]
]
return tokenized
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(tokenize, batched=True)
# We need to manually remove unused columns. This is because we cannot use remove_unused_columns=True in the
# Trainer, as this leads to errors with torch.compile. We also cannot just leave them in, as they contain
# strings. Therefore, manually remove all unused columns.
data = data.remove_columns(["quote", "author", "tags"])
return data
def compile(self, model, compile_kwargs):
compile_kwargs = compile_kwargs.copy()
# those are only for the Trainer arguments
compile_kwargs.pop("torch_compile_backend", None)
compile_kwargs.pop("torch_compile_mode", None)
if self.fake_compile:
return model
return torch.compile(model, **compile_kwargs)
@pytest.mark.parametrize("settings", SETTINGS.values(), ids=SETTINGS.keys())
def test_causal_lm_training_trainer_compile(self, settings, tokenizer, data, tmp_path):
r"""Train a PEFT model with torch.compile using Trainer"""
tmp_dir = tmp_path / "model"
config, compile_kwargs = settings
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
)
model = get_peft_model(model, config)
# record outputs before training
model.eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_before = model(sample)
model.train()
train_kwargs = {
"per_device_train_batch_size": 4,
"max_steps": 5,
"learning_rate": 1e-3,
"logging_steps": 1,
"output_dir": tmp_dir,
"seed": 0,
}
if isinstance(config, AdaLoraConfig):
train_kwargs["learning_rate"] = 1e-2
training_args = TrainingArguments(
torch_compile=not self.fake_compile,
torch_compile_backend=compile_kwargs.get("torch_compile_backend", None),
torch_compile_mode=compile_kwargs.get("torch_compile_mode", None),
**train_kwargs,
)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=training_args,
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
if isinstance(config, AdaLoraConfig):
class OptimizerStepCallback(TrainerCallback):
def on_optimizer_step(self, args, state, control, **kwargs):
model.update_and_allocate(state.global_step)
trainer.add_callback(OptimizerStepCallback())
trainer.train()
model.eval()
atol, rtol = 1e-4, 1e-4
with torch.inference_mode():
output_after = model(sample)
tokens_after = model.generate(sample)
assert torch.isfinite(output_after.logits).all()
# sanity check: model was updated
assert not torch.allclose(output_before.logits, output_after.logits, atol=atol, rtol=rtol)
assert trainer.state.log_history[-1]["train_loss"] < self.max_train_loss
# check saving the model and loading it without compile
model.save_pretrained(tmp_path)
del model
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(self.model_id, device_map="auto")
model = PeftModel.from_pretrained(model, tmp_path)
with torch.inference_mode():
output_loaded = model(sample)
tokens_loaded = model.generate(sample)
assert torch.allclose(output_after.logits, output_loaded.logits, atol=atol, rtol=rtol)
assert (tokens_after == tokens_loaded).all()
@pytest.mark.parametrize("settings", SETTINGS.values(), ids=SETTINGS.keys())
def test_causal_lm_training_pytorch_compile(self, settings, tokenizer, data, tmp_path):
r"""Train a PEFT model with torch.compile using PyTorch training loop"""
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
)
config, compile_kwargs = settings
model = get_peft_model(model, config)
if isinstance(config, AdaLoraConfig):
model.base_model.peft_config["default"].total_step = 5
model = self.compile(model, compile_kwargs)
# record outputs before training
model.eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_before = model(sample)
model.train()
model.config.use_cache = False
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3)
batch_size = 4
losses = []
max_steps = 5 * batch_size
for i in range(0, max_steps, batch_size):
batch = tokenizer.pad(data["train"][i : i + batch_size], return_tensors="pt").to(model.device)
# add targets
batch["labels"] = batch["input_ids"].clone()
optimizer.zero_grad()
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
losses.append(loss.item())
if isinstance(config, AdaLoraConfig):
model.base_model.update_and_allocate(i)
model.eval()
with torch.inference_mode():
output_after = model(sample)
tokens_after = model.generate(sample)
assert torch.isfinite(output_after.logits).all()
atol, rtol = 1e-4, 1e-4
# sanity check: model was updated
assert not torch.allclose(output_before.logits, output_after.logits, atol=atol, rtol=rtol)
assert losses[-1] < self.max_train_loss
# check saving the model and loading it without compile
model.save_pretrained(tmp_path)
del model
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(self.model_id, device_map="auto")
model = PeftModel.from_pretrained(model, tmp_path)
with torch.inference_mode():
output_loaded = model(sample)
tokens_loaded = model.generate(sample)
assert torch.allclose(output_after.logits, output_loaded.logits, atol=atol, rtol=rtol)
assert (tokens_after == tokens_loaded).all()
@require_bitsandbytes
def test_causal_lm_training_lora_bnb_compile(self, tokenizer, data, tmp_path):
r"""Train a bnb quantized LoRA model with torch.compile using PyTorch training loop"""
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
)
config = LoraConfig(task_type=TaskType.CAUSAL_LM)
model = get_peft_model(model, config)
model = self.compile(model, {})
# record outputs before training
model.eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_before = model(sample)
model.train()
model.config.use_cache = False
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3)
batch_size = 4
losses = []
max_steps = 5 * batch_size
for i in range(0, max_steps, batch_size):
batch = tokenizer.pad(data["train"][i : i + batch_size], return_tensors="pt").to(model.device)
# add targets
batch["labels"] = batch["input_ids"].clone()
optimizer.zero_grad()
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
losses.append(loss.item())
model.eval()
with torch.inference_mode():
output_after = model(sample)
assert torch.isfinite(output_after.logits).all()
atol, rtol = 5e-4, 5e-4
# sanity check: model was updated
assert not torch.allclose(output_before.logits, output_after.logits, atol=atol, rtol=rtol)
assert losses[-1] < self.max_train_loss
# check saving the model and loading it without compile
model.save_pretrained(tmp_path)
del model
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
model = PeftModel.from_pretrained(model, tmp_path)
with torch.inference_mode():
# after loading, outputs are float32 for some reason
output_loaded = model(sample)
assert torch.allclose(output_after.logits, output_loaded.logits, atol=atol, rtol=rtol)
@require_bitsandbytes
def test_causal_lm_multiple_lora_adapter_compile(self, tokenizer, data):
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_base = model(sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config)
model.add_adapter("other", config)
model = self.compile(model, {})
model.eval()
with torch.inference_mode():
output_default_adapter = model(sample)
model.set_adapter("other")
with torch.inference_mode():
output_other_adapter = model(sample)
atol, rtol = 1e-4, 1e-4
# outputs of the base model != output of default adapter != output of other adapter
assert not torch.allclose(output_base.logits, output_default_adapter.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_base.logits, output_other_adapter.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_default_adapter.logits, output_other_adapter.logits, atol=atol, rtol=rtol)
# now delete the other adapter
model.delete_adapter("other")
model.set_adapter("default")
with torch.inference_mode():
output_after_delete = model(sample)
# outputs after delete == output of default adapter
assert torch.allclose(output_default_adapter.logits, output_after_delete.logits, atol=atol, rtol=rtol)
def test_causal_lm_disable_lora_adapter_compile(self, tokenizer, data):
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_base = model(sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
model = self.compile(model, {})
output_lora = model(sample)
with model.disable_adapter():
with torch.inference_mode():
output_disabled = model(sample)
atol, rtol = 5e-4, 5e-4
# outputs of the base model == output disabled adapter != output of lora adapter
assert torch.allclose(output_base.logits, output_disabled.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_base.logits, output_lora.logits, atol=atol, rtol=rtol)
@require_bitsandbytes
def test_causal_lm_merging_lora_adapter_compile(self, tokenizer, data):
# merge the adapter
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_base = model(sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
with torch.inference_mode():
output_lora = model(sample)
model.merge_adapter()
with torch.inference_mode():
output_merged = model(sample)
# merging is less precise, be more tolerant
atol, rtol = 1e-1, 1e-1
# outputs of the base model != output of lora adapter == output of merged adapter
assert not torch.allclose(output_base.logits, output_lora.logits, atol=atol, rtol=rtol)
assert torch.allclose(output_lora.logits, output_merged.logits, atol=atol, rtol=rtol)
@require_bitsandbytes
def test_causal_lm_merging_multiple_lora_adapters_compile(self, tokenizer, data):
# merge multiple adapters at once
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_base = model(sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
model.add_adapter("other", config)
with torch.inference_mode():
output_default = model(sample)
model.set_adapter("other")
with torch.inference_mode():
output_other = model(sample)
model.base_model.merge_adapter(["default", "other"])
with torch.inference_mode():
output_merged = model(sample)
# merging is less precise, be more tolerant
atol, rtol = 1e-1, 1e-1
# outputs of the base model != output of default adapter != output of other adapter
assert not torch.allclose(output_base.logits, output_default.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_base.logits, output_other.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_default.logits, output_other.logits, atol=atol, rtol=rtol)
# outputs of merged adapter != all others
assert not torch.allclose(output_base.logits, output_merged.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_default.logits, output_merged.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_other.logits, output_merged.logits, atol=atol, rtol=rtol)
@require_bitsandbytes
def test_causal_lm_merge_and_unload_lora_adapter_compile(self, tokenizer, data):
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_base = model(sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
model = self.compile(model, {})
with torch.inference_mode():
output_lora = model(sample)
unloaded = model.merge_and_unload()
with torch.inference_mode():
output_unloaded = unloaded(sample)
# merging is less precise, be more tolerant
atol, rtol = 1e-1, 1e-1
# outputs of the base model != output of lora adapter == output of unloaded adapter
assert not torch.allclose(output_base.logits, output_lora.logits, atol=atol, rtol=rtol)
assert torch.allclose(output_lora.logits, output_unloaded.logits, atol=atol, rtol=rtol)
@require_bitsandbytes
def test_causal_lm_mixed_batch_lora_adapter_compile(self, tokenizer, data):
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
# we need at least 3 samples for this to work!
sample = {
"input_ids": torch.arange(12).reshape(3, 4).to("cuda"),
"attention_mask": torch.ones(3, 4).long().to("cuda"),
}
with torch.inference_mode():
output_base = model(**sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
with torch.inference_mode():
output_default = model(**sample)
model.add_adapter("other", config)
model.set_adapter("other")
with torch.inference_mode():
output_other = model(**sample)
model = self.compile(model, {})
# set adapter_indices so that it alternates between 0 (base), lora 1, and lora 2
adapter_names = ["__base__", "default", "other"]
with torch.inference_mode():
output_mixed = model(**sample, adapter_names=adapter_names)
atol, rtol = 5e-4, 5e-4
# outputs of the base model != output of lora adapter 1 != output of other adapter
assert not torch.allclose(output_base.logits, output_default.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_default.logits, output_other.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_other.logits, output_mixed.logits, atol=atol, rtol=rtol)
# outputs of mixed adapter is mix of all 3
assert torch.allclose(output_base.logits[0], output_mixed.logits[0], atol=atol, rtol=rtol)
assert torch.allclose(output_default.logits[1], output_mixed.logits[1], atol=atol, rtol=rtol)
assert torch.allclose(output_other.logits[2], output_mixed.logits[2], atol=atol, rtol=rtol)
@require_bitsandbytes
def test_causal_lm_add_weighted_adapter_lora_adapter_compile(self, tokenizer, data):
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_base = model(sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
model.add_adapter("other", config)
with torch.inference_mode():
output_default = model(sample)
model.set_adapter("other")
with torch.inference_mode():
output_other = model(sample)
model.add_weighted_adapter(["default", "other"], [0.5, 0.5], adapter_name="combined")
model.set_adapter("combined")
with torch.inference_mode():
output_combined = model(sample)
atol, rtol = 1e-4, 1e-4
# outputs of the base model != output of default adapter != output of other adapter
assert not torch.allclose(output_base.logits, output_default.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_base.logits, output_other.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_default.logits, output_other.logits, atol=atol, rtol=rtol)
# outputs of combined adapter != all others
assert not torch.allclose(output_base.logits, output_combined.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_default.logits, output_combined.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_other.logits, output_combined.logits, atol=atol, rtol=rtol)
| peft/tests/test_torch_compile.py/0 | {
"file_path": "peft/tests/test_torch_compile.py",
"repo_id": "peft",
"token_count": 11038
} |
*This guideline is very much a work-in-progress.*
Contributions to `timm` for code, documentation, tests are more than welcome!
There haven't been any formal guidelines to date so please bear with me, and feel free to add to this guide.
# Coding style
Code linting and auto-format (black) are not currently in place but open to consideration. In the meantime, the style to follow is (mostly) aligned with Google's guide: https://google.github.io/styleguide/pyguide.html.
A few specific differences from Google style (or black)
1. Line length is 120 char. Going over is okay in some cases (e.g. I prefer not to break URL across lines).
2. Hanging indents are always preferred, please avoid aligning arguments with closing brackets or braces.
Example, from Google guide, but this is a NO here:
```
# Aligned with opening delimiter.
foo = long_function_name(var_one, var_two,
var_three, var_four)
meal = (spam,
beans)
# Aligned with opening delimiter in a dictionary.
foo = {
'long_dictionary_key': value1 +
value2,
...
}
```
This is YES:
```
# 4-space hanging indent; nothing on first line,
# closing parenthesis on a new line.
foo = long_function_name(
var_one, var_two, var_three,
var_four
)
meal = (
spam,
beans,
)
# 4-space hanging indent in a dictionary.
foo = {
'long_dictionary_key':
long_dictionary_value,
...
}
```
When there is discrepancy in a given source file (there are many origins for various bits of code and not all have been updated to what I consider current goal), please follow the style in a given file.
In general, if you add new code, formatting it with black using the following options should result in a style that is compatible with the rest of the code base:
```
black --skip-string-normalization --line-length 120 <path-to-file>
```
Avoid formatting code that is unrelated to your PR though.
PR with pure formatting / style fixes will be accepted but only in isolation from functional changes, best to ask before starting such a change.
# Documentation
As with code style, docstrings style based on the Google guide: guide: https://google.github.io/styleguide/pyguide.html
The goal for the code is to eventually move to have all major functions and `__init__` methods use PEP484 type annotations.
When type annotations are used for a function, as per the Google pyguide, they should **NOT** be duplicated in the docstrings, please leave annotations as the one source of truth re typing.
There are a LOT of gaps in current documentation relative to the functionality in timm, please, document away!
# Installation
Create a Python virtual environment using Python 3.10. Inside the environment, install torch` and `torchvision` using the instructions matching your system as listed on the [PyTorch website](https://pytorch.org/).
Then install the remaining dependencies:
```
python -m pip install -r requirements.txt
python -m pip install -r requirements-dev.txt # for testing
python -m pip install -e .
```
## Unit tests
Run the tests using:
```
pytest tests/
```
Since the whole test suite takes a lot of time to run locally (a few hours), you may want to select a subset of tests relating to the changes you made by using the `-k` option of [`pytest`](https://docs.pytest.org/en/7.1.x/example/markers.html#using-k-expr-to-select-tests-based-on-their-name). Moreover, running tests in parallel (in this example 4 processes) with the `-n` option may help:
```
pytest -k "substring-to-match" -n 4 tests/
```
## Building documentation
Please refer to [this document](https://github.com/huggingface/pytorch-image-models/tree/main/hfdocs).
# Questions
If you have any questions about contribution, where / how to contribute, please ask in the [Discussions](https://github.com/huggingface/pytorch-image-models/discussions/categories/contributing) (there is a `Contributing` topic).
| pytorch-image-models/CONTRIBUTING.md/0 | {
"file_path": "pytorch-image-models/CONTRIBUTING.md",
"repo_id": "pytorch-image-models",
"token_count": 1223
} |
# Sharing and Loading Models From the Hugging Face Hub
The `timm` library has a built-in integration with the Hugging Face Hub, making it easy to share and load models from the 🤗 Hub.
In this short guide, we'll see how to:
1. Share a `timm` model on the Hub
2. How to load that model back from the Hub
## Authenticating
First, you'll need to make sure you have the `huggingface_hub` package installed.
```bash
pip install huggingface_hub
```
Then, you'll need to authenticate yourself. You can do this by running the following command:
```bash
huggingface-cli login
```
Or, if you're using a notebook, you can use the `notebook_login` helper:
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
## Sharing a Model
```py
>>> import timm
>>> model = timm.create_model('resnet18', pretrained=True, num_classes=4)
```
Here is where you would normally train or fine-tune the model. We'll skip that for the sake of this tutorial.
Let's pretend we've now fine-tuned the model. The next step would be to push it to the Hub! We can do this with the `timm.models.hub.push_to_hf_hub` function.
```py
>>> model_cfg = dict(label_names=['a', 'b', 'c', 'd'])
>>> timm.models.push_to_hf_hub(model, 'resnet18-random', model_config=model_cfg)
```
Running the above would push the model to `<your-username>/resnet18-random` on the Hub. You can now share this model with your friends, or use it in your own code!
## Loading a Model
Loading a model from the Hub is as simple as calling `timm.create_model` with the `pretrained` argument set to the name of the model you want to load. In this case, we'll use [`nateraw/resnet18-random`](https://huggingface.co/nateraw/resnet18-random), which is the model we just pushed to the Hub.
```py
>>> model_reloaded = timm.create_model('hf_hub:nateraw/resnet18-random', pretrained=True)
```
| pytorch-image-models/hfdocs/source/hf_hub.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/hf_hub.mdx",
"repo_id": "pytorch-image-models",
"token_count": 593
} |
# # Ensemble Adversarial Inception ResNet v2
**Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture).
This particular model was trained for study of adversarial examples (adversarial training).
The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('ens_adv_inception_resnet_v2', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `ens_adv_inception_resnet_v2`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('ens_adv_inception_resnet_v2', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1804-00097,
author = {Alexey Kurakin and
Ian J. Goodfellow and
Samy Bengio and
Yinpeng Dong and
Fangzhou Liao and
Ming Liang and
Tianyu Pang and
Jun Zhu and
Xiaolin Hu and
Cihang Xie and
Jianyu Wang and
Zhishuai Zhang and
Zhou Ren and
Alan L. Yuille and
Sangxia Huang and
Yao Zhao and
Yuzhe Zhao and
Zhonglin Han and
Junjiajia Long and
Yerkebulan Berdibekov and
Takuya Akiba and
Seiya Tokui and
Motoki Abe},
title = {Adversarial Attacks and Defences Competition},
journal = {CoRR},
volume = {abs/1804.00097},
year = {2018},
url = {http://arxiv.org/abs/1804.00097},
archivePrefix = {arXiv},
eprint = {1804.00097},
timestamp = {Thu, 31 Oct 2019 16:31:22 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1804-00097.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: Ensemble Adversarial
Paper:
Title: Adversarial Attacks and Defences Competition
URL: https://paperswithcode.com/paper/adversarial-attacks-and-defences-competition
Models:
- Name: ens_adv_inception_resnet_v2
In Collection: Ensemble Adversarial
Metadata:
FLOPs: 16959133120
Parameters: 55850000
File Size: 223774238
Architecture:
- 1x1 Convolution
- Auxiliary Classifier
- Average Pooling
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inception-v3 Module
- Max Pooling
- ReLU
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: ens_adv_inception_resnet_v2
Crop Pct: '0.897'
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_resnet_v2.py#L351
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ens_adv_inception_resnet_v2-2592a550.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 1.0%
Top 5 Accuracy: 17.32%
--> | pytorch-image-models/hfdocs/source/models/ensemble-adversarial.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/ensemble-adversarial.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2211
} |
# RexNet
**Rank Expansion Networks** (ReXNets) follow a set of new design principles for designing bottlenecks in image classification models. Authors refine each layer by 1) expanding the input channel size of the convolution layer and 2) replacing the [ReLU6s](https://www.paperswithcode.com/method/relu6).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('rexnet_100', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `rexnet_100`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('rexnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{han2020rexnet,
title={ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network},
author={Dongyoon Han and Sangdoo Yun and Byeongho Heo and YoungJoon Yoo},
year={2020},
eprint={2007.00992},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: RexNet
Paper:
Title: 'ReXNet: Diminishing Representational Bottleneck on Convolutional Neural
Network'
URL: https://paperswithcode.com/paper/rexnet-diminishing-representational
Models:
- Name: rexnet_100
In Collection: RexNet
Metadata:
FLOPs: 509989377
Parameters: 4800000
File Size: 19417552
Architecture:
- Batch Normalization
- Convolution
- Dropout
- ReLU6
- Residual Connection
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- Linear Warmup With Cosine Annealing
- Nesterov Accelerated Gradient
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x NVIDIA V100 GPUs
ID: rexnet_100
LR: 0.5
Epochs: 400
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 512
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
Label Smoothing: 0.1
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L212
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_100-1b4dddf4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.86%
Top 5 Accuracy: 93.88%
- Name: rexnet_130
In Collection: RexNet
Metadata:
FLOPs: 848364461
Parameters: 7560000
File Size: 30508197
Architecture:
- Batch Normalization
- Convolution
- Dropout
- ReLU6
- Residual Connection
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- Linear Warmup With Cosine Annealing
- Nesterov Accelerated Gradient
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x NVIDIA V100 GPUs
ID: rexnet_130
LR: 0.5
Epochs: 400
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 512
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
Label Smoothing: 0.1
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L218
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_130-590d768e.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.49%
Top 5 Accuracy: 94.67%
- Name: rexnet_150
In Collection: RexNet
Metadata:
FLOPs: 1122374469
Parameters: 9730000
File Size: 39227315
Architecture:
- Batch Normalization
- Convolution
- Dropout
- ReLU6
- Residual Connection
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- Linear Warmup With Cosine Annealing
- Nesterov Accelerated Gradient
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x NVIDIA V100 GPUs
ID: rexnet_150
LR: 0.5
Epochs: 400
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 512
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
Label Smoothing: 0.1
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L224
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_150-bd1a6aa8.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.31%
Top 5 Accuracy: 95.16%
- Name: rexnet_200
In Collection: RexNet
Metadata:
FLOPs: 1960224938
Parameters: 16370000
File Size: 65862221
Architecture:
- Batch Normalization
- Convolution
- Dropout
- ReLU6
- Residual Connection
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- Linear Warmup With Cosine Annealing
- Nesterov Accelerated Gradient
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x NVIDIA V100 GPUs
ID: rexnet_200
LR: 0.5
Epochs: 400
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 512
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
Label Smoothing: 0.1
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/rexnet.py#L230
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_200-8c0b7f2d.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.63%
Top 5 Accuracy: 95.67%
--> | pytorch-image-models/hfdocs/source/models/rexnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/rexnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 3086
} |
# TResNet
A **TResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that aim to boost accuracy while maintaining GPU training and inference efficiency. They contain several design tricks including a SpaceToDepth stem, [Anti-Alias downsampling](https://paperswithcode.com/method/anti-alias-downsampling), In-Place Activated BatchNorm, Blocks selection and [squeeze-and-excitation layers](https://paperswithcode.com/method/squeeze-and-excitation-block).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('tresnet_l', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `tresnet_l`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('tresnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{ridnik2020tresnet,
title={TResNet: High Performance GPU-Dedicated Architecture},
author={Tal Ridnik and Hussam Lawen and Asaf Noy and Emanuel Ben Baruch and Gilad Sharir and Itamar Friedman},
year={2020},
eprint={2003.13630},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: TResNet
Paper:
Title: 'TResNet: High Performance GPU-Dedicated Architecture'
URL: https://paperswithcode.com/paper/tresnet-high-performance-gpu-dedicated
Models:
- Name: tresnet_l
In Collection: TResNet
Metadata:
FLOPs: 10873416792
Parameters: 53456696
File Size: 224440219
Architecture:
- 1x1 Convolution
- Anti-Alias Downsampling
- Convolution
- Global Average Pooling
- InPlace-ABN
- Leaky ReLU
- ReLU
- Residual Connection
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Cutout
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA 100 GPUs
ID: tresnet_l
LR: 0.01
Epochs: 300
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L267
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.49%
Top 5 Accuracy: 95.62%
- Name: tresnet_l_448
In Collection: TResNet
Metadata:
FLOPs: 43488238584
Parameters: 53456696
File Size: 224440219
Architecture:
- 1x1 Convolution
- Anti-Alias Downsampling
- Convolution
- Global Average Pooling
- InPlace-ABN
- Leaky ReLU
- ReLU
- Residual Connection
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Cutout
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA 100 GPUs
ID: tresnet_l_448
LR: 0.01
Epochs: 300
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '448'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L285
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.26%
Top 5 Accuracy: 95.98%
- Name: tresnet_m
In Collection: TResNet
Metadata:
FLOPs: 5733048064
Parameters: 41282200
File Size: 125861314
Architecture:
- 1x1 Convolution
- Anti-Alias Downsampling
- Convolution
- Global Average Pooling
- InPlace-ABN
- Leaky ReLU
- ReLU
- Residual Connection
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Cutout
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA 100 GPUs
Training Time: < 24 hours
ID: tresnet_m
LR: 0.01
Epochs: 300
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L261
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_80_8-dbc13962.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.8%
Top 5 Accuracy: 94.86%
- Name: tresnet_m_448
In Collection: TResNet
Metadata:
FLOPs: 22929743104
Parameters: 29278464
File Size: 125861314
Architecture:
- 1x1 Convolution
- Anti-Alias Downsampling
- Convolution
- Global Average Pooling
- InPlace-ABN
- Leaky ReLU
- ReLU
- Residual Connection
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Cutout
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA 100 GPUs
ID: tresnet_m_448
LR: 0.01
Epochs: 300
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '448'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L279
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_448-bc359d10.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.72%
Top 5 Accuracy: 95.57%
- Name: tresnet_xl
In Collection: TResNet
Metadata:
FLOPs: 15162534034
Parameters: 75646610
File Size: 314378965
Architecture:
- 1x1 Convolution
- Anti-Alias Downsampling
- Convolution
- Global Average Pooling
- InPlace-ABN
- Leaky ReLU
- ReLU
- Residual Connection
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Cutout
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA 100 GPUs
ID: tresnet_xl
LR: 0.01
Epochs: 300
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L273
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_82_0-a2d51b00.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.05%
Top 5 Accuracy: 95.93%
- Name: tresnet_xl_448
In Collection: TResNet
Metadata:
FLOPs: 60641712730
Parameters: 75646610
File Size: 224440219
Architecture:
- 1x1 Convolution
- Anti-Alias Downsampling
- Convolution
- Global Average Pooling
- InPlace-ABN
- Leaky ReLU
- ReLU
- Residual Connection
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Cutout
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA 100 GPUs
ID: tresnet_xl_448
LR: 0.01
Epochs: 300
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '448'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L291
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.06%
Top 5 Accuracy: 96.19%
--> | pytorch-image-models/hfdocs/source/models/tresnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/tresnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 4202
} |
import numpy as np
import pandas as pd
results = {
'results-imagenet.csv': [
'results-imagenet-real.csv',
'results-imagenetv2-matched-frequency.csv',
'results-sketch.csv'
],
'results-imagenet-a-clean.csv': [
'results-imagenet-a.csv',
],
'results-imagenet-r-clean.csv': [
'results-imagenet-r.csv',
],
}
def diff(base_df, test_csv):
base_df['mi'] = base_df.model + '-' + base_df.img_size.astype('str')
base_models = base_df['mi'].values
test_df = pd.read_csv(test_csv)
test_df['mi'] = test_df.model + '-' + test_df.img_size.astype('str')
test_models = test_df['mi'].values
rank_diff = np.zeros_like(test_models, dtype='object')
top1_diff = np.zeros_like(test_models, dtype='object')
top5_diff = np.zeros_like(test_models, dtype='object')
for rank, model in enumerate(test_models):
if model in base_models:
base_rank = int(np.where(base_models == model)[0])
top1_d = test_df['top1'][rank] - base_df['top1'][base_rank]
top5_d = test_df['top5'][rank] - base_df['top5'][base_rank]
# rank_diff
if rank == base_rank:
rank_diff[rank] = f'0'
elif rank > base_rank:
rank_diff[rank] = f'-{rank - base_rank}'
else:
rank_diff[rank] = f'+{base_rank - rank}'
# top1_diff
if top1_d >= .0:
top1_diff[rank] = f'+{top1_d:.3f}'
else:
top1_diff[rank] = f'-{abs(top1_d):.3f}'
# top5_diff
if top5_d >= .0:
top5_diff[rank] = f'+{top5_d:.3f}'
else:
top5_diff[rank] = f'-{abs(top5_d):.3f}'
else:
rank_diff[rank] = ''
top1_diff[rank] = ''
top5_diff[rank] = ''
test_df['top1_diff'] = top1_diff
test_df['top5_diff'] = top5_diff
test_df['rank_diff'] = rank_diff
test_df.drop('mi', axis=1, inplace=True)
base_df.drop('mi', axis=1, inplace=True)
test_df['param_count'] = test_df['param_count'].map('{:,.2f}'.format)
test_df.sort_values(['top1', 'top5', 'model'], ascending=[False, False, True], inplace=True)
test_df.to_csv(test_csv, index=False, float_format='%.3f')
for base_results, test_results in results.items():
base_df = pd.read_csv(base_results)
base_df.sort_values(['top1', 'top5', 'model'], ascending=[False, False, True], inplace=True)
for test_csv in test_results:
diff(base_df, test_csv)
base_df['param_count'] = base_df['param_count'].map('{:,.2f}'.format)
base_df.to_csv(base_results, index=False, float_format='%.3f')
| pytorch-image-models/results/generate_csv_results.py/0 | {
"file_path": "pytorch-image-models/results/generate_csv_results.py",
"repo_id": "pytorch-image-models",
"token_count": 1453
} |
from .version import __version__ as __version__
from .layers import (
is_scriptable as is_scriptable,
is_exportable as is_exportable,
set_scriptable as set_scriptable,
set_exportable as set_exportable,
)
from .models import (
create_model as create_model,
list_models as list_models,
list_pretrained as list_pretrained,
is_model as is_model,
list_modules as list_modules,
model_entrypoint as model_entrypoint,
is_model_pretrained as is_model_pretrained,
get_pretrained_cfg as get_pretrained_cfg,
get_pretrained_cfg_value as get_pretrained_cfg_value,
)
| pytorch-image-models/timm/__init__.py/0 | {
"file_path": "pytorch-image-models/timm/__init__.py",
"repo_id": "pytorch-image-models",
"token_count": 219
} |
""" Mixup and Cutmix
Papers:
mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899)
Code Reference:
CutMix: https://github.com/clovaai/CutMix-PyTorch
Hacked together by / Copyright 2019, Ross Wightman
"""
import numpy as np
import torch
def one_hot(x, num_classes, on_value=1., off_value=0.):
x = x.long().view(-1, 1)
return torch.full((x.size()[0], num_classes), off_value, device=x.device).scatter_(1, x, on_value)
def mixup_target(target, num_classes, lam=1., smoothing=0.0):
off_value = smoothing / num_classes
on_value = 1. - smoothing + off_value
y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value)
y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value)
return y1 * lam + y2 * (1. - lam)
def rand_bbox(img_shape, lam, margin=0., count=None):
""" Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh
def rand_bbox_minmax(img_shape, minmax, count=None):
""" Min-Max CutMix bounding-box
Inspired by Darknet cutmix impl, generates a random rectangular bbox
based on min/max percent values applied to each dimension of the input image.
Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.
Args:
img_shape (tuple): Image shape as tuple
minmax (tuple or list): Min and max bbox ratios (as percent of image size)
count (int): Number of bbox to generate
"""
assert len(minmax) == 2
img_h, img_w = img_shape[-2:]
cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count)
cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count)
yl = np.random.randint(0, img_h - cut_h, size=count)
xl = np.random.randint(0, img_w - cut_w, size=count)
yu = yl + cut_h
xu = xl + cut_w
return yl, yu, xl, xu
def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None):
""" Generate bbox and apply lambda correction.
"""
if ratio_minmax is not None:
yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count)
else:
yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)
if correct_lam or ratio_minmax is not None:
bbox_area = (yu - yl) * (xu - xl)
lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])
return (yl, yu, xl, xu), lam
class Mixup:
""" Mixup/Cutmix that applies different params to each element or whole batch
Args:
mixup_alpha (float): mixup alpha value, mixup is active if > 0.
cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.
cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.
prob (float): probability of applying mixup or cutmix per batch or element
switch_prob (float): probability of switching to cutmix instead of mixup when both are active
mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)
correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders
label_smoothing (float): apply label smoothing to the mixed target tensor
num_classes (int): number of classes for target
"""
def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,
mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000):
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.cutmix_minmax = cutmix_minmax
if self.cutmix_minmax is not None:
assert len(self.cutmix_minmax) == 2
# force cutmix alpha == 1.0 when minmax active to keep logic simple & safe
self.cutmix_alpha = 1.0
self.mix_prob = prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.num_classes = num_classes
self.mode = mode
self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix
self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)
def _params_per_elem(self, batch_size):
lam = np.ones(batch_size, dtype=np.float32)
use_cutmix = np.zeros(batch_size, dtype=bool)
if self.mixup_enabled:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand(batch_size) < self.switch_prob
lam_mix = np.where(
use_cutmix,
np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size),
np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size))
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)
elif self.cutmix_alpha > 0.:
use_cutmix = np.ones(batch_size, dtype=bool)
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)
return lam, use_cutmix
def _params_per_batch(self):
lam = 1.
use_cutmix = False
if self.mixup_enabled and np.random.rand() < self.mix_prob:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand() < self.switch_prob
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \
np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.cutmix_alpha > 0.:
use_cutmix = True
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = float(lam_mix)
return lam, use_cutmix
def _mix_elem(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_pair(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
x[j] = x[j] * lam + x_orig[i] * (1 - lam)
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_batch(self, x):
lam, use_cutmix = self._params_per_batch()
if lam == 1.:
return 1.
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]
else:
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
return lam
def __call__(self, x, target):
assert len(x) % 2 == 0, 'Batch size should be even when using this'
if self.mode == 'elem':
lam = self._mix_elem(x)
elif self.mode == 'pair':
lam = self._mix_pair(x)
else:
lam = self._mix_batch(x)
target = mixup_target(target, self.num_classes, lam, self.label_smoothing)
return x, target
class FastCollateMixup(Mixup):
""" Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch
A Mixup impl that's performed while collating the batches.
"""
def _mix_elem_collate(self, output, batch, half=False):
batch_size = len(batch)
num_elem = batch_size // 2 if half else batch_size
assert len(output) == num_elem
lam_batch, use_cutmix = self._params_per_elem(num_elem)
for i in range(num_elem):
j = batch_size - i - 1
lam = lam_batch[i]
mixed = batch[i][0]
if lam != 1.:
if use_cutmix[i]:
if not half:
mixed = mixed.copy()
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
np.rint(mixed, out=mixed)
output[i] += torch.from_numpy(mixed.astype(np.uint8))
if half:
lam_batch = np.concatenate((lam_batch, np.ones(num_elem)))
return torch.tensor(lam_batch).unsqueeze(1)
def _mix_pair_collate(self, output, batch):
batch_size = len(batch)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
mixed_i = batch[i][0]
mixed_j = batch[j][0]
assert 0 <= lam <= 1.0
if lam < 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
patch_i = mixed_i[:, yl:yh, xl:xh].copy()
mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh]
mixed_j[:, yl:yh, xl:xh] = patch_i
lam_batch[i] = lam
else:
mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam)
mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam)
mixed_i = mixed_temp
np.rint(mixed_j, out=mixed_j)
np.rint(mixed_i, out=mixed_i)
output[i] += torch.from_numpy(mixed_i.astype(np.uint8))
output[j] += torch.from_numpy(mixed_j.astype(np.uint8))
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch).unsqueeze(1)
def _mix_batch_collate(self, output, batch):
batch_size = len(batch)
lam, use_cutmix = self._params_per_batch()
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
for i in range(batch_size):
j = batch_size - i - 1
mixed = batch[i][0]
if lam != 1.:
if use_cutmix:
mixed = mixed.copy() # don't want to modify the original while iterating
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
else:
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
np.rint(mixed, out=mixed)
output[i] += torch.from_numpy(mixed.astype(np.uint8))
return lam
def __call__(self, batch, _=None):
batch_size = len(batch)
assert batch_size % 2 == 0, 'Batch size should be even when using this'
half = 'half' in self.mode
if half:
batch_size //= 2
output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
if self.mode == 'elem' or self.mode == 'half':
lam = self._mix_elem_collate(output, batch, half=half)
elif self.mode == 'pair':
lam = self._mix_pair_collate(output, batch)
else:
lam = self._mix_batch_collate(output, batch)
target = torch.tensor([b[1] for b in batch], dtype=torch.int64)
target = mixup_target(target, self.num_classes, lam, self.label_smoothing)
target = target[:batch_size]
return output, target
| pytorch-image-models/timm/data/mixup.py/0 | {
"file_path": "pytorch-image-models/timm/data/mixup.py",
"repo_id": "pytorch-image-models",
"token_count": 7225
} |
""" Tensorflow Preprocessing Adapter
Allows use of Tensorflow preprocessing pipeline in PyTorch Transform
Copyright of original Tensorflow code below.
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet preprocessing for MnasNet."""
import tensorflow.compat.v1 as tf
import numpy as np
IMAGE_SIZE = 224
CROP_PADDING = 32
tf.compat.v1.disable_eager_execution()
def distorted_bounding_box_crop(image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image_bytes: `Tensor` of binary image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
cropped image `Tensor`
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _at_least_x_are_equal(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _decode_and_random_crop(image_bytes, image_size, resize_method):
"""Make a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(
image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(
bad,
lambda: _decode_and_center_crop(image_bytes, image_size),
lambda: tf.image.resize([image], [image_size, image_size], resize_method)[0])
return image
def _decode_and_center_crop(image_bytes, image_size, resize_method):
"""Crops to center of image with padding then scales image_size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize([image], [image_size, image_size], resize_method)[0]
return image
def _flip(image):
"""Random horizontal image flip."""
image = tf.image.random_flip_left_right(image)
return image
def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
interpolation: image interpolation method
Returns:
A preprocessed image `Tensor`.
"""
resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR
image = _decode_and_random_crop(image_bytes, image_size, resize_method)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
interpolation: image interpolation method
Returns:
A preprocessed image `Tensor`.
"""
resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR
image = _decode_and_center_crop(image_bytes, image_size, resize_method)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_image(image_bytes,
is_training=False,
use_bfloat16=False,
image_size=IMAGE_SIZE,
interpolation='bicubic'):
"""Preprocesses the given image.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
is_training: `bool` for whether the preprocessing is for training.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
interpolation: image interpolation method
Returns:
A preprocessed image `Tensor` with value range of [0, 255].
"""
if is_training:
return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation)
else:
return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation)
class TfPreprocessTransform:
def __init__(self, is_training=False, size=224, interpolation='bicubic'):
self.is_training = is_training
self.size = size[0] if isinstance(size, tuple) else size
self.interpolation = interpolation
self._image_bytes = None
self.process_image = self._build_tf_graph()
self.sess = None
def _build_tf_graph(self):
with tf.device('/cpu:0'):
self._image_bytes = tf.placeholder(
shape=[],
dtype=tf.string,
)
img = preprocess_image(
self._image_bytes, self.is_training, False, self.size, self.interpolation)
return img
def __call__(self, image_bytes):
if self.sess is None:
self.sess = tf.Session()
img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes})
img = img.round().clip(0, 255).astype(np.uint8)
if img.ndim < 3:
img = np.expand_dims(img, axis=-1)
img = np.rollaxis(img, 2) # HWC to CHW
return img
| pytorch-image-models/timm/data/tf_preprocessing.py/0 | {
"file_path": "pytorch-image-models/timm/data/tf_preprocessing.py",
"repo_id": "pytorch-image-models",
"token_count": 3775
} |
""" Conv2d w/ Same Padding
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple, Optional
from .config import is_exportable, is_scriptable
from .padding import pad_same, pad_same_arg, get_padding_value
_USE_EXPORT_CONV = False
def conv2d_same(
x,
weight: torch.Tensor,
bias: Optional[torch.Tensor] = None,
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
dilation: Tuple[int, int] = (1, 1),
groups: int = 1,
):
x = pad_same(x, weight.shape[-2:], stride, dilation)
return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSame(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
):
super(Conv2dSame, self).__init__(
in_channels, out_channels, kernel_size,
stride, 0, dilation, groups, bias,
)
def forward(self, x):
return conv2d_same(
x, self.weight, self.bias,
self.stride, self.padding, self.dilation, self.groups,
)
class Conv2dSameExport(nn.Conv2d):
""" ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions
NOTE: This does not currently work with torch.jit.script
"""
# pylint: disable=unused-argument
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
):
super(Conv2dSameExport, self).__init__(
in_channels, out_channels, kernel_size,
stride, 0, dilation, groups, bias,
)
self.pad = None
self.pad_input_size = (0, 0)
def forward(self, x):
input_size = x.size()[-2:]
if self.pad is None:
pad_arg = pad_same_arg(input_size, self.weight.size()[-2:], self.stride, self.dilation)
self.pad = nn.ZeroPad2d(pad_arg)
self.pad_input_size = input_size
x = self.pad(x)
return F.conv2d(
x, self.weight, self.bias,
self.stride, self.padding, self.dilation, self.groups,
)
def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs):
padding = kwargs.pop('padding', '')
kwargs.setdefault('bias', False)
padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs)
if is_dynamic:
if _USE_EXPORT_CONV and is_exportable():
# older PyTorch ver needed this to export same padding reasonably
assert not is_scriptable() # Conv2DSameExport does not work with jit
return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs)
else:
return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs)
else:
return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
| pytorch-image-models/timm/layers/conv2d_same.py/0 | {
"file_path": "pytorch-image-models/timm/layers/conv2d_same.py",
"repo_id": "pytorch-image-models",
"token_count": 1560
} |
""" Global Response Normalization Module
Based on the GRN layer presented in
`ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808
This implementation
* works for both NCHW and NHWC tensor layouts
* uses affine param names matching existing torch norm layers
* slightly improves eager mode performance via fused addcmul
Hacked together by / Copyright 2023 Ross Wightman
"""
import torch
from torch import nn as nn
class GlobalResponseNorm(nn.Module):
""" Global Response Normalization layer
"""
def __init__(self, dim, eps=1e-6, channels_last=True):
super().__init__()
self.eps = eps
if channels_last:
self.spatial_dim = (1, 2)
self.channel_dim = -1
self.wb_shape = (1, 1, 1, -1)
else:
self.spatial_dim = (2, 3)
self.channel_dim = 1
self.wb_shape = (1, -1, 1, 1)
self.weight = nn.Parameter(torch.zeros(dim))
self.bias = nn.Parameter(torch.zeros(dim))
def forward(self, x):
x_g = x.norm(p=2, dim=self.spatial_dim, keepdim=True)
x_n = x_g / (x_g.mean(dim=self.channel_dim, keepdim=True) + self.eps)
return x + torch.addcmul(self.bias.view(self.wb_shape), self.weight.view(self.wb_shape), x * x_n)
| pytorch-image-models/timm/layers/grn.py/0 | {
"file_path": "pytorch-image-models/timm/layers/grn.py",
"repo_id": "pytorch-image-models",
"token_count": 565
} |
""" Padding Helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
from typing import List, Tuple, Union
import torch
import torch.nn.functional as F
from .helpers import to_2tuple
# Calculate symmetric padding for a convolution
def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> Union[int, List[int]]:
if any([isinstance(v, (tuple, list)) for v in [kernel_size, stride, dilation]]):
kernel_size, stride, dilation = to_2tuple(kernel_size), to_2tuple(stride), to_2tuple(dilation)
return [get_padding(*a) for a in zip(kernel_size, stride, dilation)]
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding
# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
def get_same_padding(x: int, kernel_size: int, stride: int, dilation: int):
if isinstance(x, torch.Tensor):
return torch.clamp(((x / stride).ceil() - 1) * stride + (kernel_size - 1) * dilation + 1 - x, min=0)
else:
return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0)
# Can SAME padding for given args be done statically?
def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_):
if any([isinstance(v, (tuple, list)) for v in [kernel_size, stride, dilation]]):
kernel_size, stride, dilation = to_2tuple(kernel_size), to_2tuple(stride), to_2tuple(dilation)
return all([is_static_pad(*a) for a in zip(kernel_size, stride, dilation)])
return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0
def pad_same_arg(
input_size: List[int],
kernel_size: List[int],
stride: List[int],
dilation: List[int] = (1, 1),
) -> List[int]:
ih, iw = input_size
kh, kw = kernel_size
pad_h = get_same_padding(ih, kh, stride[0], dilation[0])
pad_w = get_same_padding(iw, kw, stride[1], dilation[1])
return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
# Dynamically pad input x with 'SAME' padding for conv with specified args
def pad_same(
x,
kernel_size: List[int],
stride: List[int],
dilation: List[int] = (1, 1),
value: float = 0,
):
ih, iw = x.size()[-2:]
pad_h = get_same_padding(ih, kernel_size[0], stride[0], dilation[0])
pad_w = get_same_padding(iw, kernel_size[1], stride[1], dilation[1])
x = F.pad(x, (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2), value=value)
return x
def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]:
dynamic = False
if isinstance(padding, str):
# for any string padding, the padding will be calculated for you, one of three ways
padding = padding.lower()
if padding == 'same':
# TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
if is_static_pad(kernel_size, **kwargs):
# static case, no extra overhead
padding = get_padding(kernel_size, **kwargs)
else:
# dynamic 'SAME' padding, has runtime/GPU memory overhead
padding = 0
dynamic = True
elif padding == 'valid':
# 'VALID' padding, same as padding=0
padding = 0
else:
# Default to PyTorch style 'same'-ish symmetric padding
padding = get_padding(kernel_size, **kwargs)
return padding, dynamic
| pytorch-image-models/timm/layers/padding.py/0 | {
"file_path": "pytorch-image-models/timm/layers/padding.py",
"repo_id": "pytorch-image-models",
"token_count": 1439
} |
import collections.abc
import math
import re
from collections import defaultdict
from itertools import chain
from typing import Any, Callable, Dict, Iterator, Optional, Tuple, Type, Union
import torch
import torch.utils.checkpoint
from torch import nn as nn
from timm.layers import use_reentrant_ckpt
__all__ = ['model_parameters', 'named_apply', 'named_modules', 'named_modules_with_params', 'adapt_input_conv',
'group_with_matcher', 'group_modules', 'group_parameters', 'flatten_modules', 'checkpoint_seq', 'checkpoint']
def model_parameters(model: nn.Module, exclude_head: bool = False):
if exclude_head:
# FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering
return [p for p in model.parameters()][:-2]
else:
return model.parameters()
def named_apply(
fn: Callable,
module: nn.Module, name='',
depth_first: bool = True,
include_root: bool = False,
) -> nn.Module:
if not depth_first and include_root:
fn(module=module, name=name)
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
fn(module=module, name=name)
return module
def named_modules(
module: nn.Module,
name: str = '',
depth_first: bool = True,
include_root: bool = False,
):
if not depth_first and include_root:
yield name, module
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
yield from named_modules(
module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if depth_first and include_root:
yield name, module
def named_modules_with_params(
module: nn.Module,
name: str = '',
depth_first: bool = True,
include_root: bool = False,
):
if module._parameters and not depth_first and include_root:
yield name, module
for child_name, child_module in module.named_children():
child_name = '.'.join((name, child_name)) if name else child_name
yield from named_modules_with_params(
module=child_module, name=child_name, depth_first=depth_first, include_root=True)
if module._parameters and depth_first and include_root:
yield name, module
MATCH_PREV_GROUP = (99999,)
def group_with_matcher(
named_objects: Iterator[Tuple[str, Any]],
group_matcher: Union[Dict, Callable],
return_values: bool = False,
reverse: bool = False
):
if isinstance(group_matcher, dict):
# dictionary matcher contains a dict of raw-string regex expr that must be compiled
compiled = []
for group_ordinal, (group_name, mspec) in enumerate(group_matcher.items()):
if mspec is None:
continue
# map all matching specifications into 3-tuple (compiled re, prefix, suffix)
if isinstance(mspec, (tuple, list)):
# multi-entry match specifications require each sub-spec to be a 2-tuple (re, suffix)
for sspec in mspec:
compiled += [(re.compile(sspec[0]), (group_ordinal,), sspec[1])]
else:
compiled += [(re.compile(mspec), (group_ordinal,), None)]
group_matcher = compiled
def _get_grouping(name):
if isinstance(group_matcher, (list, tuple)):
for match_fn, prefix, suffix in group_matcher:
r = match_fn.match(name)
if r:
parts = (prefix, r.groups(), suffix)
# map all tuple elem to int for numeric sort, filter out None entries
return tuple(map(float, chain.from_iterable(filter(None, parts))))
return float('inf'), # un-matched layers (neck, head) mapped to largest ordinal
else:
ord = group_matcher(name)
if not isinstance(ord, collections.abc.Iterable):
return ord,
return tuple(ord)
# map layers into groups via ordinals (ints or tuples of ints) from matcher
grouping = defaultdict(list)
for k, v in named_objects:
grouping[_get_grouping(k)].append(v if return_values else k)
# remap to integers
layer_id_to_param = defaultdict(list)
lid = -1
for k in sorted(filter(lambda x: x is not None, grouping.keys())):
if lid < 0 or k[-1] != MATCH_PREV_GROUP[0]:
lid += 1
layer_id_to_param[lid].extend(grouping[k])
if reverse:
assert not return_values, "reverse mapping only sensible for name output"
# output reverse mapping
param_to_layer_id = {}
for lid, lm in layer_id_to_param.items():
for n in lm:
param_to_layer_id[n] = lid
return param_to_layer_id
return layer_id_to_param
def group_parameters(
module: nn.Module,
group_matcher,
return_values: bool = False,
reverse: bool = False,
):
return group_with_matcher(
module.named_parameters(), group_matcher, return_values=return_values, reverse=reverse)
def group_modules(
module: nn.Module,
group_matcher,
return_values: bool = False,
reverse: bool = False,
):
return group_with_matcher(
named_modules_with_params(module), group_matcher, return_values=return_values, reverse=reverse)
def flatten_modules(
named_modules: Iterator[Tuple[str, nn.Module]],
depth: int = 1,
prefix: Union[str, Tuple[str, ...]] = '',
module_types: Union[str, Tuple[Type[nn.Module]]] = 'sequential',
):
prefix_is_tuple = isinstance(prefix, tuple)
if isinstance(module_types, str):
if module_types == 'container':
module_types = (nn.Sequential, nn.ModuleList, nn.ModuleDict)
else:
module_types = (nn.Sequential,)
for name, module in named_modules:
if depth and isinstance(module, module_types):
yield from flatten_modules(
module.named_children(),
depth - 1,
prefix=(name,) if prefix_is_tuple else name,
module_types=module_types,
)
else:
if prefix_is_tuple:
name = prefix + (name,)
yield name, module
else:
if prefix:
name = '.'.join([prefix, name])
yield name, module
def checkpoint(
function,
*args,
use_reentrant: Optional[bool] = None,
**kwargs,
):
""" checkpoint wrapper fn
A thin wrapper around torch.utils.checkpoint.checkpoint to default
use_reentrant to False
"""
if use_reentrant is None:
use_reentrant = use_reentrant_ckpt()
return torch.utils.checkpoint.checkpoint(
function,
*args,
use_reentrant=use_reentrant,
**kwargs,
)
def checkpoint_seq(
functions,
x,
every: int = 1,
flatten: bool = False,
skip_last: bool = False,
use_reentrant: Optional[bool] = None,
):
r"""A helper function for checkpointing sequential models.
Sequential models execute a list of modules/functions in order
(sequentially). Therefore, we can divide such a sequence into segments
and checkpoint each segment. All segments except run in :func:`torch.no_grad`
manner, i.e., not storing the intermediate activations. The inputs of each
checkpointed segment will be saved for re-running the segment in the backward pass.
See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works.
.. warning::
Checkpointing currently only supports :func:`torch.autograd.backward`
and only if its `inputs` argument is not passed. :func:`torch.autograd.grad`
is not supported.
.. warning:
At least one of the inputs needs to have :code:`requires_grad=True` if
grads are needed for model inputs, otherwise the checkpointed part of the
model won't have gradients.
Args:
functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially.
x: A Tensor that is input to :attr:`functions`
every: checkpoint every-n functions (default: 1)
flatten: flatten nn.Sequential of nn.Sequentials
skip_last: skip checkpointing the last function in the sequence if True
use_reentrant: Use re-entrant checkpointing
Returns:
Output of running :attr:`functions` sequentially on :attr:`*inputs`
Example:
>>> model = nn.Sequential(...)
>>> input_var = checkpoint_seq(model, input_var, every=2)
"""
if use_reentrant is None:
use_reentrant = use_reentrant_ckpt()
def run_function(start, end, functions):
def forward(_x):
for j in range(start, end + 1):
_x = functions[j](_x)
return _x
return forward
if isinstance(functions, torch.nn.Sequential):
functions = functions.children()
if flatten:
functions = chain.from_iterable(functions)
if not isinstance(functions, (tuple, list)):
functions = tuple(functions)
num_checkpointed = len(functions)
if skip_last:
num_checkpointed -= 1
end = -1
for start in range(0, num_checkpointed, every):
end = min(start + every - 1, num_checkpointed - 1)
x = torch.utils.checkpoint.checkpoint(
run_function(start, end, functions),
x,
use_reentrant=use_reentrant,
)
if skip_last:
return run_function(end + 1, len(functions) - 1, functions)(x)
return x
def adapt_input_conv(in_chans, conv_weight):
conv_type = conv_weight.dtype
conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU
O, I, J, K = conv_weight.shape
if in_chans == 1:
if I > 3:
assert conv_weight.shape[1] % 3 == 0
# For models with space2depth stems
conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)
conv_weight = conv_weight.sum(dim=2, keepdim=False)
else:
conv_weight = conv_weight.sum(dim=1, keepdim=True)
elif in_chans != 3:
if I != 3:
raise NotImplementedError('Weight format not supported by conversion.')
else:
# NOTE this strategy should be better than random init, but there could be other combinations of
# the original RGB input layer weights that'd work better for specific cases.
repeat = int(math.ceil(in_chans / 3))
conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv_weight *= (3 / float(in_chans))
conv_weight = conv_weight.to(conv_type)
return conv_weight
| pytorch-image-models/timm/models/_manipulate.py/0 | {
"file_path": "pytorch-image-models/timm/models/_manipulate.py",
"repo_id": "pytorch-image-models",
"token_count": 4675
} |
""" ConvNeXt
Papers:
* `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
@Article{liu2022convnet,
author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie},
title = {A ConvNet for the 2020s},
journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
year = {2022},
}
* `ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808
@article{Woo2023ConvNeXtV2,
title={ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders},
author={Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon and Saining Xie},
year={2023},
journal={arXiv preprint arXiv:2301.00808},
}
Original code and weights from:
* https://github.com/facebookresearch/ConvNeXt, original copyright below
* https://github.com/facebookresearch/ConvNeXt-V2, original copyright below
Model defs atto, femto, pico, nano and _ols / _hnf variants are timm originals.
Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
"""
# ConvNeXt
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the MIT license
# ConvNeXt-V2
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree (Attribution-NonCommercial 4.0 International (CC BY-NC 4.0))
# No code was used directly from ConvNeXt-V2, however the weights are CC BY-NC 4.0 so beware if using commercially.
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from timm.layers import trunc_normal_, AvgPool2dSame, DropPath, Mlp, GlobalResponseNormMlp, \
LayerNorm2d, LayerNorm, RmsNorm2d, RmsNorm, create_conv2d, get_act_layer, get_norm_layer, make_divisible, to_ntuple
from timm.layers import SimpleNorm2d, SimpleNorm
from timm.layers import NormMlpClassifierHead, ClassifierHead
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import named_apply, checkpoint_seq
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['ConvNeXt'] # model_registry will add each entrypoint fn to this
class Downsample(nn.Module):
def __init__(self, in_chs, out_chs, stride=1, dilation=1):
super().__init__()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
if in_chs != out_chs:
self.conv = create_conv2d(in_chs, out_chs, 1, stride=1)
else:
self.conv = nn.Identity()
def forward(self, x):
x = self.pool(x)
x = self.conv(x)
return x
class ConvNeXtBlock(nn.Module):
""" ConvNeXt Block
There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate
choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear
is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW.
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
kernel_size: int = 7,
stride: int = 1,
dilation: Union[int, Tuple[int, int]] = (1, 1),
mlp_ratio: float = 4,
conv_mlp: bool = False,
conv_bias: bool = True,
use_grn: bool = False,
ls_init_value: Optional[float] = 1e-6,
act_layer: Union[str, Callable] = 'gelu',
norm_layer: Optional[Callable] = None,
drop_path: float = 0.,
):
"""
Args:
in_chs: Block input channels.
out_chs: Block output channels (same as in_chs if None).
kernel_size: Depthwise convolution kernel size.
stride: Stride of depthwise convolution.
dilation: Tuple specifying input and output dilation of block.
mlp_ratio: MLP expansion ratio.
conv_mlp: Use 1x1 convolutions for MLP and a NCHW compatible norm layer if True.
conv_bias: Apply bias for all convolution (linear) layers.
use_grn: Use GlobalResponseNorm in MLP (from ConvNeXt-V2)
ls_init_value: Layer-scale init values, layer-scale applied if not None.
act_layer: Activation layer.
norm_layer: Normalization layer (defaults to LN if not specified).
drop_path: Stochastic depth probability.
"""
super().__init__()
out_chs = out_chs or in_chs
dilation = to_ntuple(2)(dilation)
act_layer = get_act_layer(act_layer)
if not norm_layer:
norm_layer = LayerNorm2d if conv_mlp else LayerNorm
mlp_layer = partial(GlobalResponseNormMlp if use_grn else Mlp, use_conv=conv_mlp)
self.use_conv_mlp = conv_mlp
self.conv_dw = create_conv2d(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=dilation[0],
depthwise=True,
bias=conv_bias,
)
self.norm = norm_layer(out_chs)
self.mlp = mlp_layer(out_chs, int(mlp_ratio * out_chs), act_layer=act_layer)
self.gamma = nn.Parameter(ls_init_value * torch.ones(out_chs)) if ls_init_value is not None else None
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = Downsample(in_chs, out_chs, stride=stride, dilation=dilation[0])
else:
self.shortcut = nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = x.permute(0, 3, 1, 2)
if self.gamma is not None:
x = x.mul(self.gamma.reshape(1, -1, 1, 1))
x = self.drop_path(x) + self.shortcut(shortcut)
return x
class ConvNeXtStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=7,
stride=2,
depth=2,
dilation=(1, 1),
drop_path_rates=None,
ls_init_value=1.0,
conv_mlp=False,
conv_bias=True,
use_grn=False,
act_layer='gelu',
norm_layer=None,
norm_layer_cl=None
):
super().__init__()
self.grad_checkpointing = False
if in_chs != out_chs or stride > 1 or dilation[0] != dilation[1]:
ds_ks = 2 if stride > 1 or dilation[0] != dilation[1] else 1
pad = 'same' if dilation[1] > 1 else 0 # same padding needed if dilation used
self.downsample = nn.Sequential(
norm_layer(in_chs),
create_conv2d(
in_chs,
out_chs,
kernel_size=ds_ks,
stride=stride,
dilation=dilation[0],
padding=pad,
bias=conv_bias,
),
)
in_chs = out_chs
else:
self.downsample = nn.Identity()
drop_path_rates = drop_path_rates or [0.] * depth
stage_blocks = []
for i in range(depth):
stage_blocks.append(ConvNeXtBlock(
in_chs=in_chs,
out_chs=out_chs,
kernel_size=kernel_size,
dilation=dilation[1],
drop_path=drop_path_rates[i],
ls_init_value=ls_init_value,
conv_mlp=conv_mlp,
conv_bias=conv_bias,
use_grn=use_grn,
act_layer=act_layer,
norm_layer=norm_layer if conv_mlp else norm_layer_cl,
))
in_chs = out_chs
self.blocks = nn.Sequential(*stage_blocks)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
# map of norm layers with NCHW (2D) and channels last variants
_NORM_MAP = {
'layernorm': (LayerNorm2d, LayerNorm),
'layernorm2d': (LayerNorm2d, LayerNorm),
'simplenorm': (SimpleNorm2d, SimpleNorm),
'simplenorm2d': (SimpleNorm2d, SimpleNorm),
'rmsnorm': (RmsNorm2d, RmsNorm),
'rmsnorm2d': (RmsNorm2d, RmsNorm),
}
def _get_norm_layers(norm_layer: Union[Callable, str], conv_mlp: bool, norm_eps: float):
norm_layer = norm_layer or 'layernorm'
if norm_layer in _NORM_MAP:
norm_layer_cl = _NORM_MAP[norm_layer][0] if conv_mlp else _NORM_MAP[norm_layer][1]
norm_layer = _NORM_MAP[norm_layer][0]
if norm_eps is not None:
norm_layer = partial(norm_layer, eps=norm_eps)
norm_layer_cl = partial(norm_layer_cl, eps=norm_eps)
else:
assert conv_mlp, \
'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input'
norm_layer = get_norm_layer(norm_layer)
norm_layer_cl = norm_layer
if norm_eps is not None:
norm_layer_cl = partial(norm_layer_cl, eps=norm_eps)
return norm_layer, norm_layer_cl
class ConvNeXt(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
"""
def __init__(
self,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
output_stride: int = 32,
depths: Tuple[int, ...] = (3, 3, 9, 3),
dims: Tuple[int, ...] = (96, 192, 384, 768),
kernel_sizes: Union[int, Tuple[int, ...]] = 7,
ls_init_value: Optional[float] = 1e-6,
stem_type: str = 'patch',
patch_size: int = 4,
head_init_scale: float = 1.,
head_norm_first: bool = False,
head_hidden_size: Optional[int] = None,
conv_mlp: bool = False,
conv_bias: bool = True,
use_grn: bool = False,
act_layer: Union[str, Callable] = 'gelu',
norm_layer: Optional[Union[str, Callable]] = None,
norm_eps: Optional[float] = None,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
):
"""
Args:
in_chans: Number of input image channels.
num_classes: Number of classes for classification head.
global_pool: Global pooling type.
output_stride: Output stride of network, one of (8, 16, 32).
depths: Number of blocks at each stage.
dims: Feature dimension at each stage.
kernel_sizes: Depthwise convolution kernel-sizes for each stage.
ls_init_value: Init value for Layer Scale, disabled if None.
stem_type: Type of stem.
patch_size: Stem patch size for patch stem.
head_init_scale: Init scaling value for classifier weights and biases.
head_norm_first: Apply normalization before global pool + head.
head_hidden_size: Size of MLP hidden layer in head if not None and head_norm_first == False.
conv_mlp: Use 1x1 conv in MLP, improves speed for small networks w/ chan last.
conv_bias: Use bias layers w/ all convolutions.
use_grn: Use Global Response Norm (ConvNeXt-V2) in MLP.
act_layer: Activation layer type.
norm_layer: Normalization layer type.
drop_rate: Head pre-classifier dropout rate.
drop_path_rate: Stochastic depth drop rate.
"""
super().__init__()
assert output_stride in (8, 16, 32)
kernel_sizes = to_ntuple(4)(kernel_sizes)
norm_layer, norm_layer_cl = _get_norm_layers(norm_layer, conv_mlp, norm_eps)
act_layer = get_act_layer(act_layer)
self.num_classes = num_classes
self.drop_rate = drop_rate
self.feature_info = []
assert stem_type in ('patch', 'overlap', 'overlap_tiered', 'overlap_act')
if stem_type == 'patch':
# NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size, bias=conv_bias),
norm_layer(dims[0]),
)
stem_stride = patch_size
else:
mid_chs = make_divisible(dims[0] // 2) if 'tiered' in stem_type else dims[0]
self.stem = nn.Sequential(*filter(None, [
nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias),
act_layer() if 'act' in stem_type else None,
nn.Conv2d(mid_chs, dims[0], kernel_size=3, stride=2, padding=1, bias=conv_bias),
norm_layer(dims[0]),
]))
stem_stride = 4
self.stages = nn.Sequential()
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
stages = []
prev_chs = dims[0]
curr_stride = stem_stride
dilation = 1
# 4 feature resolution stages, each consisting of multiple residual blocks
for i in range(4):
stride = 2 if curr_stride == 2 or i > 0 else 1
if curr_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
curr_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
out_chs = dims[i]
stages.append(ConvNeXtStage(
prev_chs,
out_chs,
kernel_size=kernel_sizes[i],
stride=stride,
dilation=(first_dilation, dilation),
depth=depths[i],
drop_path_rates=dp_rates[i],
ls_init_value=ls_init_value,
conv_mlp=conv_mlp,
conv_bias=conv_bias,
use_grn=use_grn,
act_layer=act_layer,
norm_layer=norm_layer,
norm_layer_cl=norm_layer_cl,
))
prev_chs = out_chs
# NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2
self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_features = self.head_hidden_size = prev_chs
# if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets
# otherwise pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights)
if head_norm_first:
assert not head_hidden_size
self.norm_pre = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
else:
self.norm_pre = nn.Identity()
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
hidden_size=head_hidden_size,
pool_type=global_pool,
drop_rate=self.drop_rate,
norm_layer=norm_layer,
act_layer='gelu',
)
self.head_hidden_size = self.head.num_features
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.downsample', (0,)), # blocks
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^norm_pre', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stages) + 1, indices)
# forward pass
feat_idx = 0 # stem is index 0
x = self.stem(x)
if feat_idx in take_indices:
intermediates.append(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.stages
else:
stages = self.stages[:max_index]
for stage in stages:
feat_idx += 1
x = stage(x)
if feat_idx in take_indices:
# NOTE not bothering to apply norm_pre when norm=True as almost no models have it enabled
intermediates.append(x)
if intermediates_only:
return intermediates
x = self.norm_pre(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stages) + 1, indices)
self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0
if prune_norm:
self.norm_pre = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm_pre(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name=None, head_init_scale=1.0):
if isinstance(module, nn.Conv2d):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
nn.init.zeros_(module.bias)
if name and 'head.' in name:
module.weight.data.mul_(head_init_scale)
module.bias.data.mul_(head_init_scale)
def checkpoint_filter_fn(state_dict, model):
""" Remap FB checkpoints -> timm """
if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict:
return state_dict # non-FB checkpoint
if 'model' in state_dict:
state_dict = state_dict['model']
out_dict = {}
if 'visual.trunk.stem.0.weight' in state_dict:
out_dict = {k.replace('visual.trunk.', ''): v for k, v in state_dict.items() if k.startswith('visual.trunk.')}
if 'visual.head.proj.weight' in state_dict:
out_dict['head.fc.weight'] = state_dict['visual.head.proj.weight']
out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.proj.weight'].shape[0])
elif 'visual.head.mlp.fc1.weight' in state_dict:
out_dict['head.pre_logits.fc.weight'] = state_dict['visual.head.mlp.fc1.weight']
out_dict['head.pre_logits.fc.bias'] = state_dict['visual.head.mlp.fc1.bias']
out_dict['head.fc.weight'] = state_dict['visual.head.mlp.fc2.weight']
out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.mlp.fc2.weight'].shape[0])
return out_dict
import re
for k, v in state_dict.items():
k = k.replace('downsample_layers.0.', 'stem.')
k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k)
k = k.replace('dwconv', 'conv_dw')
k = k.replace('pwconv', 'mlp.fc')
if 'grn' in k:
k = k.replace('grn.beta', 'mlp.grn.bias')
k = k.replace('grn.gamma', 'mlp.grn.weight')
v = v.reshape(v.shape[-1])
k = k.replace('head.', 'head.fc.')
if k.startswith('norm.'):
k = k.replace('norm', 'head.norm')
if v.ndim == 2 and 'head' not in k:
model_shape = model.state_dict()[k].shape
v = v.reshape(model_shape)
out_dict[k] = v
return out_dict
def _create_convnext(variant, pretrained=False, **kwargs):
if kwargs.get('pretrained_cfg', '') == 'fcmae':
# NOTE fcmae pretrained weights have no classifier or final norm-layer (`head.norm`)
# This is workaround loading with num_classes=0 w/o removing norm-layer.
kwargs.setdefault('pretrained_strict', False)
model = build_model_with_cfg(
ConvNeXt, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc',
**kwargs
}
def _cfgv2(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc',
'license': 'cc-by-nc-4.0', 'paper_ids': 'arXiv:2301.00808',
'paper_name': 'ConvNeXt-V2: Co-designing and Scaling ConvNets with Masked Autoencoders',
'origin_url': 'https://github.com/facebookresearch/ConvNeXt-V2',
**kwargs
}
default_cfgs = generate_default_cfgs({
# timm specific variants
'convnext_tiny.in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_small.in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_zepto_rms.ra4_e3600_r224_in1k': _cfg(
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
'convnext_zepto_rms_ols.ra4_e3600_r224_in1k': _cfg(
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
crop_pct=0.9),
'convnext_atto.d2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_d2-01bb0f51.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_atto_ols.a2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_ols_a2-78d1c8f3.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_atto_rms.untrained': _cfg(
#hf_hub_id='timm/',
test_input_size=(3, 256, 256), test_crop_pct=0.95),
'convnext_femto.d1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_d1-d71d5b4c.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_femto_ols.d1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_ols_d1-246bf2ed.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_pico.d1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_d1-10ad7f0d.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnext_pico_ols.d1_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_ols_d1-611f0ca7.pth',
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_nano.in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_nano.d1h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_d1h-7eb4bdea.pth',
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_nano_ols.d1h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_ols_d1h-ae424a9a.pth',
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_tiny_hnf.a2h_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_tiny_hnf_a2h-ab7e9df2.pth',
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_nano.r384_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'convnext_tiny.in12k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_small.in12k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_nano.in12k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, num_classes=11821),
'convnext_nano.r384_in12k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=11821),
'convnext_nano.r384_ad_in12k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=11821),
'convnext_tiny.in12k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, num_classes=11821),
'convnext_small.in12k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, num_classes=11821),
'convnext_tiny.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_224.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_small.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_224.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_base.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_large.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_xlarge.fb_in22k_ft_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_tiny.fb_in1k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_small.fb_in1k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_base.fb_in1k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_large.fb_in1k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnext_tiny.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_384.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_small.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_384.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_base.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_large.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_xlarge.fb_in22k_ft_in1k_384': _cfg(
url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_tiny.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnext_small.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnext_base.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnext_large.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnext_xlarge.fb_in22k': _cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
hf_hub_id='timm/',
num_classes=21841),
'convnextv2_nano.fcmae_ft_in22k_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_nano.fcmae_ft_in22k_in1k_384': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt',
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_tiny.fcmae_ft_in22k_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_tiny.fcmae_ft_in22k_in1k_384': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt",
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_base.fcmae_ft_in22k_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_base.fcmae_ft_in22k_in1k_384': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt",
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_large.fcmae_ft_in22k_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_large.fcmae_ft_in22k_in1k_384': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt",
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_huge.fcmae_ft_in22k_in1k_384': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt",
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnextv2_huge.fcmae_ft_in22k_in1k_512': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt",
hf_hub_id='timm/',
input_size=(3, 512, 512), pool_size=(15, 15), crop_pct=1.0, crop_mode='squash'),
'convnextv2_atto.fcmae_ft_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnextv2_femto.fcmae_ft_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnextv2_pico.fcmae_ft_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'convnextv2_nano.fcmae_ft_in1k': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt',
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_tiny.fcmae_ft_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_base.fcmae_ft_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_large.fcmae_ft_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_huge.fcmae_ft_in1k': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt",
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'convnextv2_atto.fcmae': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_atto_1k_224_fcmae.pt',
hf_hub_id='timm/',
num_classes=0),
'convnextv2_femto.fcmae': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_femto_1k_224_fcmae.pt',
hf_hub_id='timm/',
num_classes=0),
'convnextv2_pico.fcmae': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_pico_1k_224_fcmae.pt',
hf_hub_id='timm/',
num_classes=0),
'convnextv2_nano.fcmae': _cfgv2(
url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_nano_1k_224_fcmae.pt',
hf_hub_id='timm/',
num_classes=0),
'convnextv2_tiny.fcmae': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_tiny_1k_224_fcmae.pt",
hf_hub_id='timm/',
num_classes=0),
'convnextv2_base.fcmae': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_base_1k_224_fcmae.pt",
hf_hub_id='timm/',
num_classes=0),
'convnextv2_large.fcmae': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_large_1k_224_fcmae.pt",
hf_hub_id='timm/',
num_classes=0),
'convnextv2_huge.fcmae': _cfgv2(
url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_huge_1k_224_fcmae.pt",
hf_hub_id='timm/',
num_classes=0),
'convnextv2_small.untrained': _cfg(),
# CLIP weights, fine-tuned on in1k or in12k + in1k
'convnext_base.clip_laion2b_augreg_ft_in12k_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
'convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0),
'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_base.clip_laion2b_augreg_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
'convnext_base.clip_laiona_augreg_ft_in1k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'convnext_large_mlp.clip_laion2b_augreg_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0
),
'convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'
),
'convnext_xxlarge.clip_laion2b_soup_ft_in1k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
'convnext_base.clip_laion2b_augreg_ft_in12k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
'convnext_large_mlp.clip_laion2b_soup_ft_in12k_320': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0),
'convnext_large_mlp.clip_laion2b_augreg_ft_in12k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_large_mlp.clip_laion2b_soup_ft_in12k_384': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'),
'convnext_xxlarge.clip_laion2b_soup_ft_in12k': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0),
# CLIP original image tower weights
'convnext_base.clip_laion2b': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640),
'convnext_base.clip_laion2b_augreg': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640),
'convnext_base.clip_laiona': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640),
'convnext_base.clip_laiona_320': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640),
'convnext_base.clip_laiona_augreg_320': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640),
'convnext_large_mlp.clip_laion2b_augreg': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=768),
'convnext_large_mlp.clip_laion2b_ft_320': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768),
'convnext_large_mlp.clip_laion2b_ft_soup_320': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768),
'convnext_xxlarge.clip_laion2b_soup': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024),
'convnext_xxlarge.clip_laion2b_rewind': _cfg(
hf_hub_id='timm/',
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024),
"test_convnext.r160_in1k": _cfg(
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95),
"test_convnext2.r160_in1k": _cfg(
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95),
"test_convnext3.r160_in1k": _cfg(
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95),
})
@register_model
def convnext_zepto_rms(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M
model_args = dict(depths=(2, 2, 4, 2), dims=(32, 64, 128, 256), conv_mlp=True, norm_layer='simplenorm')
model = _create_convnext('convnext_zepto_rms', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_zepto_rms_ols(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M
model_args = dict(
depths=(2, 2, 4, 2), dims=(32, 64, 128, 256), conv_mlp=True, norm_layer='simplenorm', stem_type='overlap_act')
model = _create_convnext('convnext_zepto_rms_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_atto(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M
model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True)
model = _create_convnext('convnext_atto', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_atto_ols(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant with overlapping 3x3 conv stem, wider than non-ols femto above, current param count 3.7M
model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, stem_type='overlap_tiered')
model = _create_convnext('convnext_atto_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_atto_rms(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M
model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, norm_layer='rmsnorm2d')
model = _create_convnext('convnext_atto_rms', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_femto(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant
model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True)
model = _create_convnext('convnext_femto', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_femto_ols(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant
model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True, stem_type='overlap_tiered')
model = _create_convnext('convnext_femto_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_pico(pretrained=False, **kwargs) -> ConvNeXt:
# timm pico variant
model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True)
model = _create_convnext('convnext_pico', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_pico_ols(pretrained=False, **kwargs) -> ConvNeXt:
# timm nano variant with overlapping 3x3 conv stem
model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True, stem_type='overlap_tiered')
model = _create_convnext('convnext_pico_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_nano(pretrained=False, **kwargs) -> ConvNeXt:
# timm nano variant with standard stem and head
model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True)
model = _create_convnext('convnext_nano', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_nano_ols(pretrained=False, **kwargs) -> ConvNeXt:
# experimental nano variant with overlapping conv stem
model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True, stem_type='overlap')
model = _create_convnext('convnext_nano_ols', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_tiny_hnf(pretrained=False, **kwargs) -> ConvNeXt:
# experimental tiny variant with norm before pooling in head (head norm first)
model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), head_norm_first=True, conv_mlp=True)
model = _create_convnext('convnext_tiny_hnf', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_tiny(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768))
model = _create_convnext('convnext_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_small(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768])
model = _create_convnext('convnext_small', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_base(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024])
model = _create_convnext('convnext_base', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_large(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536])
model = _create_convnext('convnext_large', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_large_mlp(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], head_hidden_size=1536)
model = _create_convnext('convnext_large_mlp', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_xlarge(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048])
model = _create_convnext('convnext_xlarge', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnext_xxlarge(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 4, 30, 3], dims=[384, 768, 1536, 3072], norm_eps=kwargs.pop('norm_eps', 1e-5))
model = _create_convnext('convnext_xxlarge', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_atto(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M
model_args = dict(
depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), use_grn=True, ls_init_value=None, conv_mlp=True)
model = _create_convnext('convnextv2_atto', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_femto(pretrained=False, **kwargs) -> ConvNeXt:
# timm femto variant
model_args = dict(
depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), use_grn=True, ls_init_value=None, conv_mlp=True)
model = _create_convnext('convnextv2_femto', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_pico(pretrained=False, **kwargs) -> ConvNeXt:
# timm pico variant
model_args = dict(
depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), use_grn=True, ls_init_value=None, conv_mlp=True)
model = _create_convnext('convnextv2_pico', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_nano(pretrained=False, **kwargs) -> ConvNeXt:
# timm nano variant with standard stem and head
model_args = dict(
depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), use_grn=True, ls_init_value=None, conv_mlp=True)
model = _create_convnext('convnextv2_nano', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_tiny(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_small(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_small', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_base(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_base', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_large(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_large', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def convnextv2_huge(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], use_grn=True, ls_init_value=None)
model = _create_convnext('convnextv2_huge', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def test_convnext(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[1, 2, 4, 2], dims=[24, 32, 48, 64], norm_eps=kwargs.pop('norm_eps', 1e-5), act_layer='gelu_tanh')
model = _create_convnext('test_convnext', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def test_convnext2(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(depths=[1, 1, 1, 1], dims=[32, 64, 96, 128], norm_eps=kwargs.pop('norm_eps', 1e-5), act_layer='gelu_tanh')
model = _create_convnext('test_convnext2', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def test_convnext3(pretrained=False, **kwargs) -> ConvNeXt:
model_args = dict(
depths=[1, 1, 1, 1], dims=[32, 64, 96, 128], norm_eps=kwargs.pop('norm_eps', 1e-5), kernel_sizes=(7, 5, 5, 3), act_layer='silu')
model = _create_convnext('test_convnext3', pretrained=pretrained, **dict(model_args, **kwargs))
return model
register_model_deprecations(__name__, {
'convnext_tiny_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k',
'convnext_small_in22ft1k': 'convnext_small.fb_in22k_ft_in1k',
'convnext_base_in22ft1k': 'convnext_base.fb_in22k_ft_in1k',
'convnext_large_in22ft1k': 'convnext_large.fb_in22k_ft_in1k',
'convnext_xlarge_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k',
'convnext_tiny_384_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k_384',
'convnext_small_384_in22ft1k': 'convnext_small.fb_in22k_ft_in1k_384',
'convnext_base_384_in22ft1k': 'convnext_base.fb_in22k_ft_in1k_384',
'convnext_large_384_in22ft1k': 'convnext_large.fb_in22k_ft_in1k_384',
'convnext_xlarge_384_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k_384',
'convnext_tiny_in22k': 'convnext_tiny.fb_in22k',
'convnext_small_in22k': 'convnext_small.fb_in22k',
'convnext_base_in22k': 'convnext_base.fb_in22k',
'convnext_large_in22k': 'convnext_large.fb_in22k',
'convnext_xlarge_in22k': 'convnext_xlarge.fb_in22k',
})
| pytorch-image-models/timm/models/convnext.py/0 | {
"file_path": "pytorch-image-models/timm/models/convnext.py",
"repo_id": "pytorch-image-models",
"token_count": 27273
} |
# FastViT for PyTorch
#
# Original implementation and weights from https://github.com/apple/ml-fastvit
#
# For licensing see accompanying LICENSE file at https://github.com/apple/ml-fastvit/tree/main
# Original work is copyright (C) 2023 Apple Inc. All Rights Reserved.
#
import os
from functools import partial
from typing import List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, trunc_normal_, create_conv2d, ConvNormAct, SqueezeExcite, use_fused_attn, \
ClassifierHead
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['FastVit']
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
class MobileOneBlock(nn.Module):
"""MobileOne building block.
This block has a multi-branched architecture at train-time
and plain-CNN style architecture at inference time
For more details, please refer to our paper:
`An Improved One millisecond Mobile Backbone` -
https://arxiv.org/pdf/2206.04040.pdf
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int,
stride: int = 1,
dilation: int = 1,
group_size: int = 0,
inference_mode: bool = False,
use_se: bool = False,
use_act: bool = True,
use_scale_branch: bool = True,
num_conv_branches: int = 1,
act_layer: Type[nn.Module] = nn.GELU,
) -> None:
"""Construct a MobileOneBlock module.
Args:
in_chs: Number of channels in the input.
out_chs: Number of channels produced by the block.
kernel_size: Size of the convolution kernel.
stride: Stride size.
dilation: Kernel dilation factor.
group_size: Convolution group size.
inference_mode: If True, instantiates model in inference mode.
use_se: Whether to use SE-ReLU activations.
use_act: Whether to use activation. Default: ``True``
use_scale_branch: Whether to use scale branch. Default: ``True``
num_conv_branches: Number of linear conv branches.
"""
super(MobileOneBlock, self).__init__()
self.inference_mode = inference_mode
self.groups = num_groups(group_size, in_chs)
self.stride = stride
self.dilation = dilation
self.kernel_size = kernel_size
self.in_chs = in_chs
self.out_chs = out_chs
self.num_conv_branches = num_conv_branches
# Check if SE-ReLU is requested
self.se = SqueezeExcite(out_chs, rd_divisor=1) if use_se else nn.Identity()
if inference_mode:
self.reparam_conv = create_conv2d(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=self.groups,
bias=True,
)
else:
# Re-parameterizable skip connection
self.reparam_conv = None
self.identity = (
nn.BatchNorm2d(num_features=in_chs)
if out_chs == in_chs and stride == 1
else None
)
# Re-parameterizable conv branches
if num_conv_branches > 0:
self.conv_kxk = nn.ModuleList([
ConvNormAct(
self.in_chs,
self.out_chs,
kernel_size=kernel_size,
stride=self.stride,
groups=self.groups,
apply_act=False,
) for _ in range(self.num_conv_branches)
])
else:
self.conv_kxk = None
# Re-parameterizable scale branch
self.conv_scale = None
if kernel_size > 1 and use_scale_branch:
self.conv_scale = ConvNormAct(
self.in_chs,
self.out_chs,
kernel_size=1,
stride=self.stride,
groups=self.groups,
apply_act=False
)
self.act = act_layer() if use_act else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Apply forward pass."""
# Inference mode forward pass.
if self.reparam_conv is not None:
return self.act(self.se(self.reparam_conv(x)))
# Multi-branched train-time forward pass.
# Identity branch output
identity_out = 0
if self.identity is not None:
identity_out = self.identity(x)
# Scale branch output
scale_out = 0
if self.conv_scale is not None:
scale_out = self.conv_scale(x)
# Other kxk conv branches
out = scale_out + identity_out
if self.conv_kxk is not None:
for rc in self.conv_kxk:
out += rc(x)
return self.act(self.se(out))
def reparameterize(self):
"""Following works like `RepVGG: Making VGG-style ConvNets Great Again` -
https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched
architecture used at training time to obtain a plain CNN-like structure
for inference.
"""
if self.reparam_conv is not None:
return
kernel, bias = self._get_kernel_bias()
self.reparam_conv = create_conv2d(
in_channels=self.in_chs,
out_channels=self.out_chs,
kernel_size=self.kernel_size,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
bias=True,
)
self.reparam_conv.weight.data = kernel
self.reparam_conv.bias.data = bias
# Delete un-used branches
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__("conv_kxk")
self.__delattr__("conv_scale")
if hasattr(self, "identity"):
self.__delattr__("identity")
self.inference_mode = True
def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to obtain re-parameterized kernel and bias.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83
Returns:
Tuple of (kernel, bias) after fusing branches.
"""
# get weights and bias of scale branch
kernel_scale = 0
bias_scale = 0
if self.conv_scale is not None:
kernel_scale, bias_scale = self._fuse_bn_tensor(self.conv_scale)
# Pad scale branch kernel to match conv branch kernel size.
pad = self.kernel_size // 2
kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad])
# get weights and bias of skip branch
kernel_identity = 0
bias_identity = 0
if self.identity is not None:
kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity)
# get weights and bias of conv branches
kernel_conv = 0
bias_conv = 0
if self.conv_kxk is not None:
for ix in range(self.num_conv_branches):
_kernel, _bias = self._fuse_bn_tensor(self.conv_kxk[ix])
kernel_conv += _kernel
bias_conv += _bias
kernel_final = kernel_conv + kernel_scale + kernel_identity
bias_final = bias_conv + bias_scale + bias_identity
return kernel_final, bias_final
def _fuse_bn_tensor(
self, branch: Union[nn.Sequential, nn.BatchNorm2d]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to fuse batchnorm layer with preceding conv layer.
Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95
Args:
branch: Sequence of ops to be fused.
Returns:
Tuple of (kernel, bias) after fusing batchnorm.
"""
if isinstance(branch, ConvNormAct):
kernel = branch.conv.weight
running_mean = branch.bn.running_mean
running_var = branch.bn.running_var
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn.eps
else:
assert isinstance(branch, nn.BatchNorm2d)
if not hasattr(self, "id_tensor"):
input_dim = self.in_chs // self.groups
kernel_value = torch.zeros(
(self.in_chs, input_dim, self.kernel_size, self.kernel_size),
dtype=branch.weight.dtype,
device=branch.weight.device,
)
for i in range(self.in_chs):
kernel_value[
i, i % input_dim, self.kernel_size // 2, self.kernel_size // 2
] = 1
self.id_tensor = kernel_value
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
class ReparamLargeKernelConv(nn.Module):
"""Building Block of RepLKNet
This class defines overparameterized large kernel conv block
introduced in `RepLKNet <https://arxiv.org/abs/2203.06717>`_
Reference: https://github.com/DingXiaoH/RepLKNet-pytorch
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int,
stride: int,
group_size: int,
small_kernel: Optional[int] = None,
use_se: bool = False,
act_layer: Optional[nn.Module] = None,
inference_mode: bool = False,
) -> None:
"""Construct a ReparamLargeKernelConv module.
Args:
in_chs: Number of input channels.
out_chs: Number of output channels.
kernel_size: Kernel size of the large kernel conv branch.
stride: Stride size. Default: 1
group_size: Group size. Default: 1
small_kernel: Kernel size of small kernel conv branch.
act_layer: Activation module. Default: ``nn.GELU``
inference_mode: If True, instantiates model in inference mode. Default: ``False``
"""
super(ReparamLargeKernelConv, self).__init__()
self.stride = stride
self.groups = num_groups(group_size, in_chs)
self.in_chs = in_chs
self.out_chs = out_chs
self.kernel_size = kernel_size
self.small_kernel = small_kernel
if inference_mode:
self.reparam_conv = create_conv2d(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=stride,
dilation=1,
groups=self.groups,
bias=True,
)
else:
self.reparam_conv = None
self.large_conv = ConvNormAct(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=self.stride,
groups=self.groups,
apply_act=False,
)
if small_kernel is not None:
assert (
small_kernel <= kernel_size
), "The kernel size for re-param cannot be larger than the large kernel!"
self.small_conv = ConvNormAct(
in_chs,
out_chs,
kernel_size=small_kernel,
stride=self.stride,
groups=self.groups,
apply_act=False,
)
self.se = SqueezeExcite(out_chs, rd_ratio=0.25) if use_se else nn.Identity()
# FIXME output of this act was not used in original impl, likely due to bug
self.act = act_layer() if act_layer is not None else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.reparam_conv is not None:
out = self.reparam_conv(x)
else:
out = self.large_conv(x)
if self.small_conv is not None:
out = out + self.small_conv(x)
out = self.se(out)
out = self.act(out)
return out
def get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to obtain re-parameterized kernel and bias.
Reference: https://github.com/DingXiaoH/RepLKNet-pytorch
Returns:
Tuple of (kernel, bias) after fusing branches.
"""
eq_k, eq_b = self._fuse_bn(self.large_conv.conv, self.large_conv.bn)
if hasattr(self, "small_conv"):
small_k, small_b = self._fuse_bn(self.small_conv.conv, self.small_conv.bn)
eq_b += small_b
eq_k += nn.functional.pad(
small_k, [(self.kernel_size - self.small_kernel) // 2] * 4
)
return eq_k, eq_b
def reparameterize(self) -> None:
"""
Following works like `RepVGG: Making VGG-style ConvNets Great Again` -
https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched
architecture used at training time to obtain a plain CNN-like structure
for inference.
"""
eq_k, eq_b = self.get_kernel_bias()
self.reparam_conv = create_conv2d(
self.in_chs,
self.out_chs,
kernel_size=self.kernel_size,
stride=self.stride,
groups=self.groups,
bias=True,
)
self.reparam_conv.weight.data = eq_k
self.reparam_conv.bias.data = eq_b
self.__delattr__("large_conv")
if hasattr(self, "small_conv"):
self.__delattr__("small_conv")
@staticmethod
def _fuse_bn(
conv: nn.Conv2d, bn: nn.BatchNorm2d
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Method to fuse batchnorm layer with conv layer.
Args:
conv: Convolutional kernel weights.
bn: Batchnorm 2d layer.
Returns:
Tuple of (kernel, bias) after fusing batchnorm.
"""
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
def convolutional_stem(
in_chs: int,
out_chs: int,
act_layer: Type[nn.Module] = nn.GELU,
inference_mode: bool = False
) -> nn.Sequential:
"""Build convolutional stem with MobileOne blocks.
Args:
in_chs: Number of input channels.
out_chs: Number of output channels.
inference_mode: Flag to instantiate model in inference mode. Default: ``False``
Returns:
nn.Sequential object with stem elements.
"""
return nn.Sequential(
MobileOneBlock(
in_chs=in_chs,
out_chs=out_chs,
kernel_size=3,
stride=2,
act_layer=act_layer,
inference_mode=inference_mode,
),
MobileOneBlock(
in_chs=out_chs,
out_chs=out_chs,
kernel_size=3,
stride=2,
group_size=1,
act_layer=act_layer,
inference_mode=inference_mode,
),
MobileOneBlock(
in_chs=out_chs,
out_chs=out_chs,
kernel_size=1,
stride=1,
act_layer=act_layer,
inference_mode=inference_mode,
),
)
class Attention(nn.Module):
"""Multi-headed Self Attention module.
Source modified from:
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
head_dim: int = 32,
qkv_bias: bool = False,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
) -> None:
"""Build MHSA module that can handle 3D or 4D input tensors.
Args:
dim: Number of embedding dimensions.
head_dim: Number of hidden dimensions per head. Default: ``32``
qkv_bias: Use bias or not. Default: ``False``
attn_drop: Dropout rate for attention tensor.
proj_drop: Dropout rate for projection tensor.
"""
super().__init__()
assert dim % head_dim == 0, "dim should be divisible by head_dim"
self.head_dim = head_dim
self.num_heads = dim // head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, C, H, W = x.shape
N = H * W
x = x.flatten(2).transpose(-2, -1) # (B, N, C)
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, self.head_dim)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
x = x.transpose(-2, -1).reshape(B, C, H, W)
return x
class PatchEmbed(nn.Module):
"""Convolutional patch embedding layer."""
def __init__(
self,
patch_size: int,
stride: int,
in_chs: int,
embed_dim: int,
act_layer: Type[nn.Module] = nn.GELU,
lkc_use_act: bool = False,
use_se: bool = False,
inference_mode: bool = False,
) -> None:
"""Build patch embedding layer.
Args:
patch_size: Patch size for embedding computation.
stride: Stride for convolutional embedding layer.
in_chs: Number of channels of input tensor.
embed_dim: Number of embedding dimensions.
inference_mode: Flag to instantiate model in inference mode. Default: ``False``
"""
super().__init__()
self.proj = nn.Sequential(
ReparamLargeKernelConv(
in_chs=in_chs,
out_chs=embed_dim,
kernel_size=patch_size,
stride=stride,
group_size=1,
small_kernel=3,
use_se=use_se,
act_layer=act_layer if lkc_use_act else None, # NOTE original weights didn't use this act
inference_mode=inference_mode,
),
MobileOneBlock(
in_chs=embed_dim,
out_chs=embed_dim,
kernel_size=1,
stride=1,
use_se=False,
act_layer=act_layer,
inference_mode=inference_mode,
)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.proj(x)
return x
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim, 1, 1))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class RepMixer(nn.Module):
"""Reparameterizable token mixer.
For more details, please refer to our paper:
`FastViT: A Fast Hybrid Vision Transformer using Structural Reparameterization <https://arxiv.org/pdf/2303.14189.pdf>`_
"""
def __init__(
self,
dim,
kernel_size=3,
layer_scale_init_value=1e-5,
inference_mode: bool = False,
):
"""Build RepMixer Module.
Args:
dim: Input feature map dimension. :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, H, W)`.
kernel_size: Kernel size for spatial mixing. Default: 3
layer_scale_init_value: Initial value for layer scale. Default: 1e-5
inference_mode: If True, instantiates model in inference mode. Default: ``False``
"""
super().__init__()
self.dim = dim
self.kernel_size = kernel_size
self.inference_mode = inference_mode
if inference_mode:
self.reparam_conv = nn.Conv2d(
self.dim,
self.dim,
kernel_size=self.kernel_size,
stride=1,
padding=self.kernel_size // 2,
groups=self.dim,
bias=True,
)
else:
self.reparam_conv = None
self.norm = MobileOneBlock(
dim,
dim,
kernel_size,
group_size=1,
use_act=False,
use_scale_branch=False,
num_conv_branches=0,
)
self.mixer = MobileOneBlock(
dim,
dim,
kernel_size,
group_size=1,
use_act=False,
)
if layer_scale_init_value is not None:
self.layer_scale = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale = nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.reparam_conv is not None:
x = self.reparam_conv(x)
else:
x = x + self.layer_scale(self.mixer(x) - self.norm(x))
return x
def reparameterize(self) -> None:
"""Reparameterize mixer and norm into a single
convolutional layer for efficient inference.
"""
if self.inference_mode:
return
self.mixer.reparameterize()
self.norm.reparameterize()
if isinstance(self.layer_scale, LayerScale2d):
w = self.mixer.id_tensor + self.layer_scale.gamma.unsqueeze(-1) * (
self.mixer.reparam_conv.weight - self.norm.reparam_conv.weight
)
b = torch.squeeze(self.layer_scale.gamma) * (
self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias
)
else:
w = (
self.mixer.id_tensor
+ self.mixer.reparam_conv.weight
- self.norm.reparam_conv.weight
)
b = self.mixer.reparam_conv.bias - self.norm.reparam_conv.bias
self.reparam_conv = create_conv2d(
self.dim,
self.dim,
kernel_size=self.kernel_size,
stride=1,
groups=self.dim,
bias=True,
)
self.reparam_conv.weight.data = w
self.reparam_conv.bias.data = b
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__("mixer")
self.__delattr__("norm")
self.__delattr__("layer_scale")
class ConvMlp(nn.Module):
"""Convolutional FFN Module."""
def __init__(
self,
in_chs: int,
hidden_channels: Optional[int] = None,
out_chs: Optional[int] = None,
act_layer: Type[nn.Module] = nn.GELU,
drop: float = 0.0,
) -> None:
"""Build convolutional FFN module.
Args:
in_chs: Number of input channels.
hidden_channels: Number of channels after expansion. Default: None
out_chs: Number of output channels. Default: None
act_layer: Activation layer. Default: ``GELU``
drop: Dropout rate. Default: ``0.0``.
"""
super().__init__()
out_chs = out_chs or in_chs
hidden_channels = hidden_channels or in_chs
self.conv = ConvNormAct(
in_chs,
out_chs,
kernel_size=7,
groups=in_chs,
apply_act=False,
)
self.fc1 = nn.Conv2d(in_chs, hidden_channels, kernel_size=1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_channels, out_chs, kernel_size=1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv(x)
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepConditionalPosEnc(nn.Module):
"""Implementation of conditional positional encoding.
For more details refer to paper:
`Conditional Positional Encodings for Vision Transformers <https://arxiv.org/pdf/2102.10882.pdf>`_
In our implementation, we can reparameterize this module to eliminate a skip connection.
"""
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
spatial_shape: Union[int, Tuple[int, int]] = (7, 7),
inference_mode=False,
) -> None:
"""Build reparameterizable conditional positional encoding
Args:
dim: Number of input channels.
dim_out: Number of embedding dimensions. Default: 768
spatial_shape: Spatial shape of kernel for positional encoding. Default: (7, 7)
inference_mode: Flag to instantiate block in inference mode. Default: ``False``
"""
super(RepConditionalPosEnc, self).__init__()
if isinstance(spatial_shape, int):
spatial_shape = tuple([spatial_shape] * 2)
assert isinstance(spatial_shape, Tuple), (
f'"spatial_shape" must by a sequence or int, '
f"get {type(spatial_shape)} instead."
)
assert len(spatial_shape) == 2, (
f'Length of "spatial_shape" should be 2, '
f"got {len(spatial_shape)} instead."
)
self.spatial_shape = spatial_shape
self.dim = dim
self.dim_out = dim_out or dim
self.groups = dim
if inference_mode:
self.reparam_conv = nn.Conv2d(
self.dim,
self.dim_out,
kernel_size=self.spatial_shape,
stride=1,
padding=spatial_shape[0] // 2,
groups=self.groups,
bias=True,
)
else:
self.reparam_conv = None
self.pos_enc = nn.Conv2d(
self.dim,
self.dim_out,
spatial_shape,
1,
int(spatial_shape[0] // 2),
groups=self.groups,
bias=True,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.reparam_conv is not None:
x = self.reparam_conv(x)
else:
x = self.pos_enc(x) + x
return x
def reparameterize(self) -> None:
# Build equivalent Id tensor
input_dim = self.dim // self.groups
kernel_value = torch.zeros(
(
self.dim,
input_dim,
self.spatial_shape[0],
self.spatial_shape[1],
),
dtype=self.pos_enc.weight.dtype,
device=self.pos_enc.weight.device,
)
for i in range(self.dim):
kernel_value[
i,
i % input_dim,
self.spatial_shape[0] // 2,
self.spatial_shape[1] // 2,
] = 1
id_tensor = kernel_value
# Reparameterize Id tensor and conv
w_final = id_tensor + self.pos_enc.weight
b_final = self.pos_enc.bias
# Introduce reparam conv
self.reparam_conv = nn.Conv2d(
self.dim,
self.dim_out,
kernel_size=self.spatial_shape,
stride=1,
padding=int(self.spatial_shape[0] // 2),
groups=self.groups,
bias=True,
)
self.reparam_conv.weight.data = w_final
self.reparam_conv.bias.data = b_final
for name, para in self.named_parameters():
if 'reparam_conv' in name:
continue
para.detach_()
self.__delattr__("pos_enc")
class RepMixerBlock(nn.Module):
"""Implementation of Metaformer block with RepMixer as token mixer.
For more details on Metaformer structure, please refer to:
`MetaFormer Is Actually What You Need for Vision <https://arxiv.org/pdf/2111.11418.pdf>`_
"""
def __init__(
self,
dim: int,
kernel_size: int = 3,
mlp_ratio: float = 4.0,
act_layer: Type[nn.Module] = nn.GELU,
proj_drop: float = 0.0,
drop_path: float = 0.0,
layer_scale_init_value: float = 1e-5,
inference_mode: bool = False,
):
"""Build RepMixer Block.
Args:
dim: Number of embedding dimensions.
kernel_size: Kernel size for repmixer. Default: 3
mlp_ratio: MLP expansion ratio. Default: 4.0
act_layer: Activation layer. Default: ``nn.GELU``
proj_drop: Dropout rate. Default: 0.0
drop_path: Drop path rate. Default: 0.0
layer_scale_init_value: Layer scale value at initialization. Default: 1e-5
inference_mode: Flag to instantiate block in inference mode. Default: ``False``
"""
super().__init__()
self.token_mixer = RepMixer(
dim,
kernel_size=kernel_size,
layer_scale_init_value=layer_scale_init_value,
inference_mode=inference_mode,
)
self.mlp = ConvMlp(
in_chs=dim,
hidden_channels=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
if layer_scale_init_value is not None:
self.layer_scale = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale = nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
x = self.token_mixer(x)
x = x + self.drop_path(self.layer_scale(self.mlp(x)))
return x
class AttentionBlock(nn.Module):
"""Implementation of metaformer block with MHSA as token mixer.
For more details on Metaformer structure, please refer to:
`MetaFormer Is Actually What You Need for Vision <https://arxiv.org/pdf/2111.11418.pdf>`_
"""
def __init__(
self,
dim: int,
mlp_ratio: float = 4.0,
act_layer: Type[nn.Module] = nn.GELU,
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
proj_drop: float = 0.0,
drop_path: float = 0.0,
layer_scale_init_value: float = 1e-5,
):
"""Build Attention Block.
Args:
dim: Number of embedding dimensions.
mlp_ratio: MLP expansion ratio. Default: 4.0
act_layer: Activation layer. Default: ``nn.GELU``
norm_layer: Normalization layer. Default: ``nn.BatchNorm2d``
proj_drop: Dropout rate. Default: 0.0
drop_path: Drop path rate. Default: 0.0
layer_scale_init_value: Layer scale value at initialization. Default: 1e-5
"""
super().__init__()
self.norm = norm_layer(dim)
self.token_mixer = Attention(dim=dim)
if layer_scale_init_value is not None:
self.layer_scale_1 = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale_1 = nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.mlp = ConvMlp(
in_chs=dim,
hidden_channels=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
if layer_scale_init_value is not None:
self.layer_scale_2 = LayerScale2d(dim, layer_scale_init_value)
else:
self.layer_scale_2 = nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.layer_scale_1(self.token_mixer(self.norm(x))))
x = x + self.drop_path2(self.layer_scale_2(self.mlp(x)))
return x
class FastVitStage(nn.Module):
def __init__(
self,
dim: int,
dim_out: int,
depth: int,
token_mixer_type: str,
downsample: bool = True,
se_downsample: bool = False,
down_patch_size: int = 7,
down_stride: int = 2,
pos_emb_layer: Optional[nn.Module] = None,
kernel_size: int = 3,
mlp_ratio: float = 4.0,
act_layer: Type[nn.Module] = nn.GELU,
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
proj_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
layer_scale_init_value: Optional[float] = 1e-5,
lkc_use_act=False,
inference_mode=False,
):
"""FastViT stage.
Args:
dim: Number of embedding dimensions.
depth: Number of blocks in stage
token_mixer_type: Token mixer type.
kernel_size: Kernel size for repmixer.
mlp_ratio: MLP expansion ratio.
act_layer: Activation layer.
norm_layer: Normalization layer.
proj_drop_rate: Dropout rate.
drop_path_rate: Drop path rate.
layer_scale_init_value: Layer scale value at initialization.
inference_mode: Flag to instantiate block in inference mode.
"""
super().__init__()
self.grad_checkpointing = False
if downsample:
self.downsample = PatchEmbed(
patch_size=down_patch_size,
stride=down_stride,
in_chs=dim,
embed_dim=dim_out,
use_se=se_downsample,
act_layer=act_layer,
lkc_use_act=lkc_use_act,
inference_mode=inference_mode,
)
else:
assert dim == dim_out
self.downsample = nn.Identity()
if pos_emb_layer is not None:
self.pos_emb = pos_emb_layer(dim_out, inference_mode=inference_mode)
else:
self.pos_emb = nn.Identity()
blocks = []
for block_idx in range(depth):
if token_mixer_type == "repmixer":
blocks.append(RepMixerBlock(
dim_out,
kernel_size=kernel_size,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
proj_drop=proj_drop_rate,
drop_path=drop_path_rate[block_idx],
layer_scale_init_value=layer_scale_init_value,
inference_mode=inference_mode,
))
elif token_mixer_type == "attention":
blocks.append(AttentionBlock(
dim_out,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
norm_layer=norm_layer,
proj_drop=proj_drop_rate,
drop_path=drop_path_rate[block_idx],
layer_scale_init_value=layer_scale_init_value,
))
else:
raise ValueError(
"Token mixer type: {} not supported".format(token_mixer_type)
)
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.pos_emb(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class FastVit(nn.Module):
fork_feat: torch.jit.Final[bool]
"""
This class implements `FastViT architecture <https://arxiv.org/pdf/2303.14189.pdf>`_
"""
def __init__(
self,
in_chans: int = 3,
layers: Tuple[int, ...] = (2, 2, 6, 2),
token_mixers: Tuple[str, ...] = ("repmixer", "repmixer", "repmixer", "repmixer"),
embed_dims: Tuple[int, ...] = (64, 128, 256, 512),
mlp_ratios: Tuple[float, ...] = (4,) * 4,
downsamples: Tuple[bool, ...] = (False, True, True, True),
se_downsamples: Tuple[bool, ...] = (False, False, False, False),
repmixer_kernel_size: int = 3,
num_classes: int = 1000,
pos_embs: Tuple[Optional[nn.Module], ...] = (None,) * 4,
down_patch_size: int = 7,
down_stride: int = 2,
drop_rate: float = 0.0,
proj_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
layer_scale_init_value: float = 1e-5,
lkc_use_act: bool = False,
fork_feat: bool = False,
cls_ratio: float = 2.0,
global_pool: str = 'avg',
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
act_layer: Type[nn.Module] = nn.GELU,
inference_mode: bool = False,
) -> None:
super().__init__()
self.num_classes = 0 if fork_feat else num_classes
self.fork_feat = fork_feat
self.global_pool = global_pool
self.feature_info = []
# Convolutional stem
self.stem = convolutional_stem(
in_chans,
embed_dims[0],
act_layer,
inference_mode,
)
# Build the main stages of the network architecture
prev_dim = embed_dims[0]
scale = 1
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)]
stages = []
for i in range(len(layers)):
downsample = downsamples[i] or prev_dim != embed_dims[i]
stage = FastVitStage(
dim=prev_dim,
dim_out=embed_dims[i],
depth=layers[i],
downsample=downsample,
se_downsample=se_downsamples[i],
down_patch_size=down_patch_size,
down_stride=down_stride,
pos_emb_layer=pos_embs[i],
token_mixer_type=token_mixers[i],
kernel_size=repmixer_kernel_size,
mlp_ratio=mlp_ratios[i],
act_layer=act_layer,
norm_layer=norm_layer,
proj_drop_rate=proj_drop_rate,
drop_path_rate=dpr[i],
layer_scale_init_value=layer_scale_init_value,
lkc_use_act=lkc_use_act,
inference_mode=inference_mode,
)
stages.append(stage)
prev_dim = embed_dims[i]
if downsample:
scale *= 2
self.feature_info += [dict(num_chs=prev_dim, reduction=4 * scale, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_stages = len(self.stages)
self.num_features = self.head_hidden_size = prev_dim
# For segmentation and detection, extract intermediate output
if self.fork_feat:
# Add a norm layer for each output. self.stages is slightly different than self.network
# in the original code, the PatchEmbed layer is part of self.stages in this code where
# it was part of self.network in the original code. So we do not need to skip out indices.
self.out_indices = [0, 1, 2, 3]
for i_emb, i_layer in enumerate(self.out_indices):
if i_emb == 0 and os.environ.get("FORK_LAST3", None):
"""For RetinaNet, `start_level=1`. The first norm layer will not used.
cmd: `FORK_LAST3=1 python -m torch.distributed.launch ...`
"""
layer = nn.Identity()
else:
layer = norm_layer(embed_dims[i_emb])
layer_name = f"norm{i_layer}"
self.add_module(layer_name, layer)
else:
# Classifier head
self.num_features = self.head_hidden_size = final_features = int(embed_dims[-1] * cls_ratio)
self.final_conv = MobileOneBlock(
in_chs=embed_dims[-1],
out_chs=final_features,
kernel_size=3,
stride=1,
group_size=1,
inference_mode=inference_mode,
use_se=True,
act_layer=act_layer,
num_conv_branches=1,
)
self.head = ClassifierHead(
final_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
"""Init. for classification"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return set()
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem', # stem and embed
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+).pos_emb', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stages), indices)
# forward pass
x = self.stem(x)
last_idx = self.num_stages - 1
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.stages
else:
stages = self.stages[:max_index + 1]
feat_idx = 0
for feat_idx, stage in enumerate(stages):
x = stage(x)
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
if feat_idx == last_idx:
x = self.final_conv(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stages), indices)
self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
# input embedding
x = self.stem(x)
outs = []
for idx, block in enumerate(self.stages):
x = block(x)
if self.fork_feat:
if idx in self.out_indices:
norm_layer = getattr(self, f"norm{idx}")
x_out = norm_layer(x)
outs.append(x_out)
if self.fork_feat:
# output the features of four stages for dense prediction
return outs
x = self.final_conv(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
if self.fork_feat:
return x
x = self.forward_head(x)
return x
def _cfg(url="", **kwargs):
return {
"url": url,
"num_classes": 1000,
"input_size": (3, 256, 256),
"pool_size": (8, 8),
"crop_pct": 0.9,
"interpolation": "bicubic",
"mean": IMAGENET_DEFAULT_MEAN,
"std": IMAGENET_DEFAULT_STD,
'first_conv': ('stem.0.conv_kxk.0.conv', 'stem.0.conv_scale.conv'),
"classifier": "head.fc",
**kwargs,
}
default_cfgs = generate_default_cfgs({
"fastvit_t8.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_t12.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_s12.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_sa12.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_sa24.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_sa36.apple_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_ma36.apple_in1k": _cfg(
hf_hub_id='timm/',
crop_pct=0.95),
"fastvit_t8.apple_dist_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_t12.apple_dist_in1k": _cfg(
hf_hub_id='timm/'),
"fastvit_s12.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_sa12.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_sa24.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_sa36.apple_dist_in1k": _cfg(
hf_hub_id='timm/',),
"fastvit_ma36.apple_dist_in1k": _cfg(
hf_hub_id='timm/',
crop_pct=0.95
),
"fastvit_mci0.apple_mclip": _cfg(
hf_hub_id='apple/mobileclip_s0_timm',
url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s0.pt',
crop_pct=0.95,
num_classes=512, # CLIP proj dim
mean=(0., 0., 0.), std=(1., 1., 1.)
),
"fastvit_mci1.apple_mclip": _cfg(
hf_hub_id='apple/mobileclip_s1_timm',
url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s1.pt',
crop_pct=0.95,
num_classes=512, # CLIP proj dim
mean=(0., 0., 0.), std=(1., 1., 1.)
),
"fastvit_mci2.apple_mclip": _cfg(
hf_hub_id='apple/mobileclip_s2_timm',
url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s2.pt',
crop_pct=0.95,
num_classes=512, # CLIP proj dim
mean=(0., 0., 0.), std=(1., 1., 1.)
),
})
def checkpoint_filter_fn(state_dict, model):
""" Remap original checkpoints -> timm """
if 'stem.0.conv_kxk.0.conv.weight' in state_dict:
return state_dict # non-original checkpoint, no remapping needed
state_dict = state_dict.get('state_dict', state_dict)
if 'image_encoder.model.patch_embed.0.rbr_conv.0.conv.weight' in state_dict:
# remap MobileCLIP checkpoints
prefix = 'image_encoder.model.'
else:
prefix = ''
import re
import bisect
# find stage ends by locating downsample layers
stage_ends = []
for k, v in state_dict.items():
match = re.match(r'^(.*?)network\.(\d+)\.proj.*', k)
if match:
stage_ends.append(int(match.group(2)))
stage_ends = list(sorted(set(stage_ends)))
out_dict = {}
for k, v in state_dict.items():
if prefix:
if prefix not in k:
continue
k = k.replace(prefix, '')
# remap renamed layers
k = k.replace('patch_embed', 'stem')
k = k.replace('rbr_conv', 'conv_kxk')
k = k.replace('rbr_scale', 'conv_scale')
k = k.replace('rbr_skip', 'identity')
k = k.replace('conv_exp', 'final_conv') # to match byobnet, regnet, nfnet
k = k.replace('lkb_origin', 'large_conv')
k = k.replace('convffn', 'mlp')
k = k.replace('se.reduce', 'se.fc1')
k = k.replace('se.expand', 'se.fc2')
k = re.sub(r'layer_scale_([0-9])', r'layer_scale_\1.gamma', k)
if k.endswith('layer_scale'):
k = k.replace('layer_scale', 'layer_scale.gamma')
k = k.replace('dist_head', 'head_dist')
if k.startswith('head.'):
if k == 'head.proj' and hasattr(model.head, 'fc') and isinstance(model.head.fc, nn.Linear):
# if CLIP projection, map to head.fc w/ bias = zeros
k = k.replace('head.proj', 'head.fc.weight')
v = v.T
out_dict['head.fc.bias'] = torch.zeros(v.shape[0])
else:
k = k.replace('head.', 'head.fc.')
# remap flat sequential network to stages
match = re.match(r'^network\.(\d+)', k)
stage_idx, net_idx = None, None
if match:
net_idx = int(match.group(1))
stage_idx = bisect.bisect_right(stage_ends, net_idx)
if stage_idx is not None:
net_prefix = f'network.{net_idx}'
stage_prefix = f'stages.{stage_idx}'
if net_prefix + '.proj' in k:
k = k.replace(net_prefix + '.proj', stage_prefix + '.downsample.proj')
elif net_prefix + '.pe' in k:
k = k.replace(net_prefix + '.pe', stage_prefix + '.pos_emb.pos_enc')
else:
k = k.replace(net_prefix, stage_prefix + '.blocks')
out_dict[k] = v
return out_dict
def _create_fastvit(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
model = build_model_with_cfg(
FastVit,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs
)
return model
@register_model
def fastvit_t8(pretrained=False, **kwargs):
"""Instantiate FastViT-T8 model variant."""
model_args = dict(
layers=(2, 2, 4, 2),
embed_dims=(48, 96, 192, 384),
mlp_ratios=(3, 3, 3, 3),
token_mixers=("repmixer", "repmixer", "repmixer", "repmixer")
)
return _create_fastvit('fastvit_t8', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_t12(pretrained=False, **kwargs):
"""Instantiate FastViT-T12 model variant."""
model_args = dict(
layers=(2, 2, 6, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(3, 3, 3, 3),
token_mixers=("repmixer", "repmixer", "repmixer", "repmixer"),
)
return _create_fastvit('fastvit_t12', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_s12(pretrained=False, **kwargs):
"""Instantiate FastViT-S12 model variant."""
model_args = dict(
layers=(2, 2, 6, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
token_mixers=("repmixer", "repmixer", "repmixer", "repmixer"),
)
return _create_fastvit('fastvit_s12', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_sa12(pretrained=False, **kwargs):
"""Instantiate FastViT-SA12 model variant."""
model_args = dict(
layers=(2, 2, 6, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
)
return _create_fastvit('fastvit_sa12', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_sa24(pretrained=False, **kwargs):
"""Instantiate FastViT-SA24 model variant."""
model_args = dict(
layers=(4, 4, 12, 4),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
)
return _create_fastvit('fastvit_sa24', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_sa36(pretrained=False, **kwargs):
"""Instantiate FastViT-SA36 model variant."""
model_args = dict(
layers=(6, 6, 18, 6),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
)
return _create_fastvit('fastvit_sa36', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_ma36(pretrained=False, **kwargs):
"""Instantiate FastViT-MA36 model variant."""
model_args = dict(
layers=(6, 6, 18, 6),
embed_dims=(76, 152, 304, 608),
mlp_ratios=(4, 4, 4, 4),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention")
)
return _create_fastvit('fastvit_ma36', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_mci0(pretrained=False, **kwargs):
"""Instantiate MCi0 model variant."""
model_args = dict(
layers=(2, 6, 10, 2),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(3, 3, 3, 3),
se_downsamples=(False, False, True, True),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
lkc_use_act=True,
)
return _create_fastvit('fastvit_mci0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_mci1(pretrained=False, **kwargs):
"""Instantiate MCi1 model variant."""
model_args = dict(
layers=(4, 12, 20, 4),
embed_dims=(64, 128, 256, 512),
mlp_ratios=(3, 3, 3, 3),
se_downsamples=(False, False, True, True),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
lkc_use_act=True,
)
return _create_fastvit('fastvit_mci1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fastvit_mci2(pretrained=False, **kwargs):
"""Instantiate MCi2 model variant."""
model_args = dict(
layers=(4, 12, 24, 4),
embed_dims=(80, 160, 320, 640),
mlp_ratios=(3, 3, 3, 3),
se_downsamples=(False, False, True, True),
pos_embs=(None, None, None, partial(RepConditionalPosEnc, spatial_shape=(7, 7))),
token_mixers=("repmixer", "repmixer", "repmixer", "attention"),
lkc_use_act=True,
)
return _create_fastvit('fastvit_mci2', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/fastvit.py/0 | {
"file_path": "pytorch-image-models/timm/models/fastvit.py",
"repo_id": "pytorch-image-models",
"token_count": 29338
} |
""" Pytorch Inception-V4 implementation
Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is
based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License)
"""
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import create_classifier, ConvNormAct
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['InceptionV4']
class Mixed3a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed3a, self).__init__()
self.maxpool = nn.MaxPool2d(3, stride=2)
self.conv = conv_block(64, 96, kernel_size=3, stride=2)
def forward(self, x):
x0 = self.maxpool(x)
x1 = self.conv(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed4a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed4a, self).__init__()
self.branch0 = nn.Sequential(
conv_block(160, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1)
)
self.branch1 = nn.Sequential(
conv_block(160, 64, kernel_size=1, stride=1),
conv_block(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(64, 96, kernel_size=(3, 3), stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed5a(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(Mixed5a, self).__init__()
self.conv = conv_block(192, 192, kernel_size=3, stride=2)
self.maxpool = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.conv(x)
x1 = self.maxpool(x)
out = torch.cat((x0, x1), 1)
return out
class InceptionA(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionA, self).__init__()
self.branch0 = conv_block(384, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(384, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
conv_block(384, 64, kernel_size=1, stride=1),
conv_block(64, 96, kernel_size=3, stride=1, padding=1),
conv_block(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(384, 96, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class ReductionA(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(ReductionA, self).__init__()
self.branch0 = conv_block(384, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
conv_block(384, 192, kernel_size=1, stride=1),
conv_block(192, 224, kernel_size=3, stride=1, padding=1),
conv_block(224, 256, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class InceptionB(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionB, self).__init__()
self.branch0 = conv_block(1024, 384, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0))
)
self.branch2 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3))
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(1024, 128, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class ReductionB(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(ReductionB, self).__init__()
self.branch0 = nn.Sequential(
conv_block(1024, 192, kernel_size=1, stride=1),
conv_block(192, 192, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
conv_block(1024, 256, kernel_size=1, stride=1),
conv_block(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)),
conv_block(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)),
conv_block(320, 320, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class InceptionC(nn.Module):
def __init__(self, conv_block=ConvNormAct):
super(InceptionC, self).__init__()
self.branch0 = conv_block(1536, 256, kernel_size=1, stride=1)
self.branch1_0 = conv_block(1536, 384, kernel_size=1, stride=1)
self.branch1_1a = conv_block(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch1_1b = conv_block(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_0 = conv_block(1536, 384, kernel_size=1, stride=1)
self.branch2_1 = conv_block(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_2 = conv_block(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3a = conv_block(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3b = conv_block(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
conv_block(1536, 256, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1_0 = self.branch1_0(x)
x1_1a = self.branch1_1a(x1_0)
x1_1b = self.branch1_1b(x1_0)
x1 = torch.cat((x1_1a, x1_1b), 1)
x2_0 = self.branch2_0(x)
x2_1 = self.branch2_1(x2_0)
x2_2 = self.branch2_2(x2_1)
x2_3a = self.branch2_3a(x2_2)
x2_3b = self.branch2_3b(x2_2)
x2 = torch.cat((x2_3a, x2_3b), 1)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class InceptionV4(nn.Module):
def __init__(
self,
num_classes=1000,
in_chans=3,
output_stride=32,
drop_rate=0.,
global_pool='avg',
norm_layer='batchnorm2d',
norm_eps=1e-3,
act_layer='relu',
):
super(InceptionV4, self).__init__()
assert output_stride == 32
self.num_classes = num_classes
self.num_features = self.head_hidden_size = 1536
conv_block = partial(
ConvNormAct,
padding=0,
norm_layer=norm_layer,
act_layer=act_layer,
norm_kwargs=dict(eps=norm_eps),
act_kwargs=dict(inplace=True),
)
features = [
conv_block(in_chans, 32, kernel_size=3, stride=2),
conv_block(32, 32, kernel_size=3, stride=1),
conv_block(32, 64, kernel_size=3, stride=1, padding=1),
Mixed3a(conv_block),
Mixed4a(conv_block),
Mixed5a(conv_block),
]
features += [InceptionA(conv_block) for _ in range(4)]
features += [ReductionA(conv_block)] # Mixed6a
features += [InceptionB(conv_block) for _ in range(7)]
features += [ReductionB(conv_block)] # Mixed7a
features += [InceptionC(conv_block) for _ in range(3)]
self.features = nn.Sequential(*features)
self.feature_info = [
dict(num_chs=64, reduction=2, module='features.2'),
dict(num_chs=160, reduction=4, module='features.3'),
dict(num_chs=384, reduction=8, module='features.9'),
dict(num_chs=1024, reduction=16, module='features.17'),
dict(num_chs=1536, reduction=32, module='features.21'),
]
self.global_pool, self.head_drop, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^features\.[012]\.',
blocks=r'^features\.(\d+)'
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.last_linear
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
return self.features(x)
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.last_linear(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_inception_v4(variant, pretrained=False, **kwargs) -> InceptionV4:
return build_model_with_cfg(
InceptionV4,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True),
**kwargs,
)
default_cfgs = generate_default_cfgs({
'inception_v4.tf_in1k': {
'hf_hub_id': 'timm/',
'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'features.0.conv', 'classifier': 'last_linear',
}
})
@register_model
def inception_v4(pretrained=False, **kwargs):
return _create_inception_v4('inception_v4', pretrained, **kwargs)
| pytorch-image-models/timm/models/inception_v4.py/0 | {
"file_path": "pytorch-image-models/timm/models/inception_v4.py",
"repo_id": "pytorch-image-models",
"token_count": 5547
} |
""" Pyramid Vision Transformer v2
@misc{wang2021pvtv2,
title={PVTv2: Improved Baselines with Pyramid Vision Transformer},
author={Wenhai Wang and Enze Xie and Xiang Li and Deng-Ping Fan and Kaitao Song and Ding Liang and
Tong Lu and Ping Luo and Ling Shao},
year={2021},
eprint={2106.13797},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
Based on Apache 2.0 licensed code at https://github.com/whai362/PVT
Modifications and timm support by / Copyright 2022, Ross Wightman
"""
import math
from typing import Callable, List, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, to_2tuple, to_ntuple, trunc_normal_, LayerNorm, use_fused_attn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint
from ._registry import register_model, generate_default_cfgs
__all__ = ['PyramidVisionTransformerV2']
class MlpWithDepthwiseConv(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.,
extra_relu=False,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.relu = nn.ReLU() if extra_relu else nn.Identity()
self.dwconv = nn.Conv2d(hidden_features, hidden_features, 3, 1, 1, bias=True, groups=hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x, feat_size: List[int]):
x = self.fc1(x)
B, N, C = x.shape
x = x.transpose(1, 2).view(B, C, feat_size[0], feat_size[1])
x = self.relu(x)
x = self.dwconv(x)
x = x.flatten(2).transpose(1, 2)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim,
num_heads=8,
sr_ratio=1,
linear_attn=False,
qkv_bias=True,
attn_drop=0.,
proj_drop=0.
):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
if not linear_attn:
self.pool = None
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
else:
self.sr = None
self.norm = None
self.act = None
else:
self.pool = nn.AdaptiveAvgPool2d(7)
self.sr = nn.Conv2d(dim, dim, kernel_size=1, stride=1)
self.norm = nn.LayerNorm(dim)
self.act = nn.GELU()
def forward(self, x, feat_size: List[int]):
B, N, C = x.shape
H, W = feat_size
q = self.q(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
if self.pool is not None:
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.sr(self.pool(x)).reshape(B, C, -1).permute(0, 2, 1)
x = self.norm(x)
x = self.act(x)
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
else:
if self.sr is not None:
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1)
x = self.norm(x)
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
k, v = kv.unbind(0)
if self.fused_attn:
x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
sr_ratio=1,
linear_attn=False,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
sr_ratio=sr_ratio,
linear_attn=linear_attn,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = MlpWithDepthwiseConv(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
extra_relu=linear_attn,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, feat_size: List[int]):
x = x + self.drop_path1(self.attn(self.norm1(x), feat_size))
x = x + self.drop_path2(self.mlp(self.norm2(x), feat_size))
return x
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, patch_size=7, stride=4, in_chans=3, embed_dim=768):
super().__init__()
patch_size = to_2tuple(patch_size)
assert max(patch_size) > stride, "Set larger patch_size than stride"
self.patch_size = patch_size
self.proj = nn.Conv2d(
in_chans, embed_dim, patch_size,
stride=stride, padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = nn.LayerNorm(embed_dim)
def forward(self, x):
x = self.proj(x)
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
return x
class PyramidVisionTransformerStage(nn.Module):
def __init__(
self,
dim: int,
dim_out: int,
depth: int,
downsample: bool = True,
num_heads: int = 8,
sr_ratio: int = 1,
linear_attn: bool = False,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: Union[List[float], float] = 0.0,
norm_layer: Callable = LayerNorm,
):
super().__init__()
self.grad_checkpointing = False
if downsample:
self.downsample = OverlapPatchEmbed(
patch_size=3,
stride=2,
in_chans=dim,
embed_dim=dim_out,
)
else:
assert dim == dim_out
self.downsample = None
self.blocks = nn.ModuleList([Block(
dim=dim_out,
num_heads=num_heads,
sr_ratio=sr_ratio,
linear_attn=linear_attn,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
) for i in range(depth)])
self.norm = norm_layer(dim_out)
def forward(self, x):
# x is either B, C, H, W (if downsample) or B, H, W, C if not
if self.downsample is not None:
# input to downsample is B, C, H, W
x = self.downsample(x) # output B, H, W, C
B, H, W, C = x.shape
feat_size = (H, W)
x = x.reshape(B, -1, C)
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint.checkpoint(blk, x, feat_size)
else:
x = blk(x, feat_size)
x = self.norm(x)
x = x.reshape(B, feat_size[0], feat_size[1], -1).permute(0, 3, 1, 2).contiguous()
return x
class PyramidVisionTransformerV2(nn.Module):
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
depths=(3, 4, 6, 3),
embed_dims=(64, 128, 256, 512),
num_heads=(1, 2, 4, 8),
sr_ratios=(8, 4, 2, 1),
mlp_ratios=(8., 8., 4., 4.),
qkv_bias=True,
linear=False,
drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=LayerNorm,
):
super().__init__()
self.num_classes = num_classes
assert global_pool in ('avg', '')
self.global_pool = global_pool
self.depths = depths
num_stages = len(depths)
mlp_ratios = to_ntuple(num_stages)(mlp_ratios)
num_heads = to_ntuple(num_stages)(num_heads)
sr_ratios = to_ntuple(num_stages)(sr_ratios)
assert(len(embed_dims)) == num_stages
self.feature_info = []
self.patch_embed = OverlapPatchEmbed(
patch_size=7,
stride=4,
in_chans=in_chans,
embed_dim=embed_dims[0],
)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
cur = 0
prev_dim = embed_dims[0]
stages = []
for i in range(num_stages):
stages += [PyramidVisionTransformerStage(
dim=prev_dim,
dim_out=embed_dims[i],
depth=depths[i],
downsample=i > 0,
num_heads=num_heads[i],
sr_ratio=sr_ratios[i],
mlp_ratio=mlp_ratios[i],
linear_attn=linear,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
)]
prev_dim = embed_dims[i]
cur += depths[i]
self.feature_info += [dict(num_chs=prev_dim, reduction=4 * 2**i, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
# classification head
self.num_features = self.head_hidden_size = embed_dims[-1]
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def freeze_patch_emb(self):
self.patch_embed.requires_grad = False
@torch.jit.ignore
def no_weight_decay(self):
return {}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embed', # stem and embed
blocks=r'^stages\.(\d+)'
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('avg', '')
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x.mean(dim=(-1, -2))
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" Remap original checkpoints -> timm """
if 'patch_embed.proj.weight' in state_dict:
return state_dict # non-original checkpoint, no remapping needed
out_dict = {}
import re
for k, v in state_dict.items():
if k.startswith('patch_embed'):
k = k.replace('patch_embed1', 'patch_embed')
k = k.replace('patch_embed2', 'stages.1.downsample')
k = k.replace('patch_embed3', 'stages.2.downsample')
k = k.replace('patch_embed4', 'stages.3.downsample')
k = k.replace('dwconv.dwconv', 'dwconv')
k = re.sub(r'block(\d+).(\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.blocks.{x.group(2)}', k)
k = re.sub(r'^norm(\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.norm', k)
out_dict[k] = v
return out_dict
def _create_pvt2(variant, pretrained=False, **kwargs):
default_out_indices = tuple(range(4))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
PyramidVisionTransformerV2,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head', 'fixed_input_size': False,
**kwargs
}
default_cfgs = generate_default_cfgs({
'pvt_v2_b0.in1k': _cfg(hf_hub_id='timm/'),
'pvt_v2_b1.in1k': _cfg(hf_hub_id='timm/'),
'pvt_v2_b2.in1k': _cfg(hf_hub_id='timm/'),
'pvt_v2_b3.in1k': _cfg(hf_hub_id='timm/'),
'pvt_v2_b4.in1k': _cfg(hf_hub_id='timm/'),
'pvt_v2_b5.in1k': _cfg(hf_hub_id='timm/'),
'pvt_v2_b2_li.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def pvt_v2_b0(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(depths=(2, 2, 2, 2), embed_dims=(32, 64, 160, 256), num_heads=(1, 2, 5, 8))
return _create_pvt2('pvt_v2_b0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def pvt_v2_b1(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(depths=(2, 2, 2, 2), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8))
return _create_pvt2('pvt_v2_b1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def pvt_v2_b2(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8))
return _create_pvt2('pvt_v2_b2', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def pvt_v2_b3(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(depths=(3, 4, 18, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8))
return _create_pvt2('pvt_v2_b3', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def pvt_v2_b4(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(depths=(3, 8, 27, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8))
return _create_pvt2('pvt_v2_b4', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def pvt_v2_b5(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(
depths=(3, 6, 40, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), mlp_ratios=(4, 4, 4, 4))
return _create_pvt2('pvt_v2_b5', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def pvt_v2_b2_li(pretrained=False, **kwargs) -> PyramidVisionTransformerV2:
model_args = dict(
depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), linear=True)
return _create_pvt2('pvt_v2_b2_li', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/pvt_v2.py/0 | {
"file_path": "pytorch-image-models/timm/models/pvt_v2.py",
"repo_id": "pytorch-image-models",
"token_count": 9062
} |
""" Swin Transformer V2
A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution`
- https://arxiv.org/abs/2111.09883
Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below
Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
"""
# --------------------------------------------------------
# Swin Transformer V2
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import math
from typing import Callable, List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, ClassifierHead,\
resample_patch_embed, ndgrid, get_act_layer, LayerType
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._features_fx import register_notrace_function
from ._manipulate import checkpoint
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['SwinTransformerV2'] # model_registry will add each entrypoint fn to this
_int_or_tuple_2_t = Union[int, Tuple[int, int]]
def window_partition(x: torch.Tensor, window_size: Tuple[int, int]) -> torch.Tensor:
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows: torch.Tensor, window_size: Tuple[int, int], img_size: Tuple[int, int]) -> torch.Tensor:
"""
Args:
windows: (num_windows * B, window_size[0], window_size[1], C)
window_size (Tuple[int, int]): Window size
img_size (Tuple[int, int]): Image size
Returns:
x: (B, H, W, C)
"""
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
pretrained_window_size (tuple[int]): The height and width of the window in pre-training.
"""
def __init__(
self,
dim: int,
window_size: Tuple[int, int],
num_heads: int,
qkv_bias: bool = True,
qkv_bias_separate: bool = False,
attn_drop: float = 0.,
proj_drop: float = 0.,
pretrained_window_size: Tuple[int, int] = (0, 0),
) -> None:
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.pretrained_window_size = to_2tuple(pretrained_window_size)
self.num_heads = num_heads
self.qkv_bias_separate = qkv_bias_separate
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
# mlp to generate continuous relative position bias
self.cpb_mlp = nn.Sequential(
nn.Linear(2, 512, bias=True),
nn.ReLU(inplace=True),
nn.Linear(512, num_heads, bias=False)
)
self.qkv = nn.Linear(dim, dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(dim))
self.register_buffer('k_bias', torch.zeros(dim), persistent=False)
self.v_bias = nn.Parameter(torch.zeros(dim))
else:
self.q_bias = None
self.k_bias = None
self.v_bias = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.softmax = nn.Softmax(dim=-1)
self._make_pair_wise_relative_positions()
def _make_pair_wise_relative_positions(self):
# get relative_coords_table
relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0]).to(torch.float32)
relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1]).to(torch.float32)
relative_coords_table = torch.stack(ndgrid(relative_coords_h, relative_coords_w))
relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2
if self.pretrained_window_size[0] > 0:
relative_coords_table[:, :, :, 0] /= (self.pretrained_window_size[0] - 1)
relative_coords_table[:, :, :, 1] /= (self.pretrained_window_size[1] - 1)
else:
relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1)
relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1)
relative_coords_table *= 8 # normalize to -8, 8
relative_coords_table = torch.sign(relative_coords_table) * torch.log2(
torch.abs(relative_coords_table) + 1.0) / math.log2(8)
self.register_buffer("relative_coords_table", relative_coords_table, persistent=False)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(ndgrid(coords_h, coords_w)) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index, persistent=False)
def set_window_size(self, window_size: Tuple[int, int]) -> None:
"""Update window size & interpolate position embeddings
Args:
window_size (int): New window size
"""
window_size = to_2tuple(window_size)
if window_size != self.window_size:
self.window_size = window_size
self._make_pair_wise_relative_positions()
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
if self.q_bias is None:
qkv = self.qkv(x)
else:
qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias))
if self.qkv_bias_separate:
qkv = self.qkv(x)
qkv += qkv_bias
else:
qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
# cosine attention
attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1))
logit_scale = torch.clamp(self.logit_scale, max=math.log(1. / 0.01)).exp()
attn = attn * logit_scale
relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads)
relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
num_win = mask.shape[0]
attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerV2Block(nn.Module):
""" Swin Transformer Block.
"""
def __init__(
self,
dim: int,
input_resolution: _int_or_tuple_2_t,
num_heads: int,
window_size: _int_or_tuple_2_t = 7,
shift_size: _int_or_tuple_2_t = 0,
always_partition: bool = False,
dynamic_mask: bool = False,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
act_layer: LayerType = "gelu",
norm_layer: Type[nn.Module] = nn.LayerNorm,
pretrained_window_size: _int_or_tuple_2_t = 0,
):
"""
Args:
dim: Number of input channels.
input_resolution: Input resolution.
num_heads: Number of attention heads.
window_size: Window size.
shift_size: Shift size for SW-MSA.
always_partition: Always partition into full windows and shift
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: If True, add a learnable bias to query, key, value.
proj_drop: Dropout rate.
attn_drop: Attention dropout rate.
drop_path: Stochastic depth rate.
act_layer: Activation layer.
norm_layer: Normalization layer.
pretrained_window_size: Window size in pretraining.
"""
super().__init__()
self.dim = dim
self.input_resolution = to_2tuple(input_resolution)
self.num_heads = num_heads
self.target_shift_size = to_2tuple(shift_size) # store for later resize
self.always_partition = always_partition
self.dynamic_mask = dynamic_mask
self.window_size, self.shift_size = self._calc_window_shift(window_size, shift_size)
self.window_area = self.window_size[0] * self.window_size[1]
self.mlp_ratio = mlp_ratio
act_layer = get_act_layer(act_layer)
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
pretrained_window_size=to_2tuple(pretrained_window_size),
)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.register_buffer(
"attn_mask",
None if self.dynamic_mask else self.get_attn_mask(),
persistent=False,
)
def get_attn_mask(self, x: Optional[torch.Tensor] = None) -> Optional[torch.Tensor]:
if any(self.shift_size):
# calculate attention mask for SW-MSA
if x is None:
img_mask = torch.zeros((1, *self.input_resolution, 1)) # 1 H W 1
else:
img_mask = torch.zeros((1, x.shape[1], x.shape[2], 1), dtype=x.dtype, device=x.device) # 1 H W 1
cnt = 0
for h in (
(0, -self.window_size[0]),
(-self.window_size[0], -self.shift_size[0]),
(-self.shift_size[0], None),
):
for w in (
(0, -self.window_size[1]),
(-self.window_size[1], -self.shift_size[1]),
(-self.shift_size[1], None),
):
img_mask[:, h[0]:h[1], w[0]:w[1], :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_area)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
return attn_mask
def _calc_window_shift(
self,
target_window_size: _int_or_tuple_2_t,
target_shift_size: Optional[_int_or_tuple_2_t] = None,
) -> Tuple[Tuple[int, int], Tuple[int, int]]:
target_window_size = to_2tuple(target_window_size)
if target_shift_size is None:
# if passed value is None, recalculate from default window_size // 2 if it was active
target_shift_size = self.target_shift_size
if any(target_shift_size):
# if there was previously a non-zero shift, recalculate based on current window_size
target_shift_size = (target_window_size[0] // 2, target_window_size[1] // 2)
else:
target_shift_size = to_2tuple(target_shift_size)
if self.always_partition:
return target_window_size, target_shift_size
target_window_size = to_2tuple(target_window_size)
target_shift_size = to_2tuple(target_shift_size)
window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)]
shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)]
return tuple(window_size), tuple(shift_size)
def set_input_size(
self,
feat_size: Tuple[int, int],
window_size: Tuple[int, int],
always_partition: Optional[bool] = None,
):
""" Updates the input resolution, window size.
Args:
feat_size (Tuple[int, int]): New input resolution
window_size (int): New window size
always_partition: Change always_partition attribute if not None
"""
# Update input resolution
self.input_resolution = feat_size
if always_partition is not None:
self.always_partition = always_partition
self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(window_size))
self.window_area = self.window_size[0] * self.window_size[1]
self.attn.set_window_size(self.window_size)
self.register_buffer(
"attn_mask",
None if self.dynamic_mask else self.get_attn_mask(),
persistent=False,
)
def _attn(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, C = x.shape
# cyclic shift
has_shift = any(self.shift_size)
if has_shift:
shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2))
else:
shifted_x = x
pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0]
pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1]
shifted_x = torch.nn.functional.pad(shifted_x, (0, 0, 0, pad_w, 0, pad_h))
_, Hp, Wp, _ = shifted_x.shape
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_area, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
if getattr(self, 'dynamic_mask', False):
attn_mask = self.get_attn_mask(shifted_x)
else:
attn_mask = self.attn_mask
attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C)
shifted_x = window_reverse(attn_windows, self.window_size, (Hp, Wp)) # B H' W' C
shifted_x = shifted_x[:, :H, :W, :].contiguous()
# reverse cyclic shift
if has_shift:
x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2))
else:
x = shifted_x
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, C = x.shape
x = x + self.drop_path1(self.norm1(self._attn(x)))
x = x.reshape(B, -1, C)
x = x + self.drop_path2(self.norm2(self.mlp(x)))
x = x.reshape(B, H, W, C)
return x
class PatchMerging(nn.Module):
""" Patch Merging Layer.
"""
def __init__(
self,
dim: int,
out_dim: Optional[int] = None,
norm_layer: Type[nn.Module] = nn.LayerNorm
):
"""
Args:
dim (int): Number of input channels.
out_dim (int): Number of output channels (or 2 * dim if None)
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
super().__init__()
self.dim = dim
self.out_dim = out_dim or 2 * dim
self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False)
self.norm = norm_layer(self.out_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, C = x.shape
pad_values = (0, 0, 0, W % 2, 0, H % 2)
x = nn.functional.pad(x, pad_values)
_, H, W, _ = x.shape
x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3)
x = self.reduction(x)
x = self.norm(x)
return x
class SwinTransformerV2Stage(nn.Module):
""" A Swin Transformer V2 Stage.
"""
def __init__(
self,
dim: int,
out_dim: int,
input_resolution: _int_or_tuple_2_t,
depth: int,
num_heads: int,
window_size: _int_or_tuple_2_t,
always_partition: bool = False,
dynamic_mask: bool = False,
downsample: bool = False,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
act_layer: Union[str, Callable] = 'gelu',
norm_layer: Type[nn.Module] = nn.LayerNorm,
pretrained_window_size: _int_or_tuple_2_t = 0,
output_nchw: bool = False,
) -> None:
"""
Args:
dim: Number of input channels.
out_dim: Number of output channels.
input_resolution: Input resolution.
depth: Number of blocks.
num_heads: Number of attention heads.
window_size: Local window size.
always_partition: Always partition into full windows and shift
dynamic_mask: Create attention mask in forward based on current input size
downsample: Use downsample layer at start of the block.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: If True, add a learnable bias to query, key, value.
proj_drop: Projection dropout rate
attn_drop: Attention dropout rate.
drop_path: Stochastic depth rate.
act_layer: Activation layer type.
norm_layer: Normalization layer.
pretrained_window_size: Local window size in pretraining.
output_nchw: Output tensors on NCHW format instead of NHWC.
"""
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.output_resolution = tuple(i // 2 for i in input_resolution) if downsample else input_resolution
self.depth = depth
self.output_nchw = output_nchw
self.grad_checkpointing = False
window_size = to_2tuple(window_size)
shift_size = tuple([w // 2 for w in window_size])
# patch merging / downsample layer
if downsample:
self.downsample = PatchMerging(dim=dim, out_dim=out_dim, norm_layer=norm_layer)
else:
assert dim == out_dim
self.downsample = nn.Identity()
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerV2Block(
dim=out_dim,
input_resolution=self.output_resolution,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else shift_size,
always_partition=always_partition,
dynamic_mask=dynamic_mask,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
act_layer=act_layer,
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
)
for i in range(depth)])
def set_input_size(
self,
feat_size: Tuple[int, int],
window_size: int,
always_partition: Optional[bool] = None,
):
""" Updates the resolution, window size and so the pair-wise relative positions.
Args:
feat_size: New input (feature) resolution
window_size: New window size
always_partition: Always partition / shift the window
"""
self.input_resolution = feat_size
if isinstance(self.downsample, nn.Identity):
self.output_resolution = feat_size
else:
assert isinstance(self.downsample, PatchMerging)
self.output_resolution = tuple(i // 2 for i in feat_size)
for block in self.blocks:
block.set_input_size(
feat_size=self.output_resolution,
window_size=window_size,
always_partition=always_partition,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.downsample(x)
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
return x
def _init_respostnorm(self) -> None:
for blk in self.blocks:
nn.init.constant_(blk.norm1.bias, 0)
nn.init.constant_(blk.norm1.weight, 0)
nn.init.constant_(blk.norm2.bias, 0)
nn.init.constant_(blk.norm2.weight, 0)
class SwinTransformerV2(nn.Module):
""" Swin Transformer V2
A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution`
- https://arxiv.org/abs/2111.09883
"""
def __init__(
self,
img_size: _int_or_tuple_2_t = 224,
patch_size: int = 4,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: int = 96,
depths: Tuple[int, ...] = (2, 2, 6, 2),
num_heads: Tuple[int, ...] = (3, 6, 12, 24),
window_size: _int_or_tuple_2_t = 7,
always_partition: bool = False,
strict_img_size: bool = True,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.1,
act_layer: Union[str, Callable] = 'gelu',
norm_layer: Callable = nn.LayerNorm,
pretrained_window_sizes: Tuple[int, ...] = (0, 0, 0, 0),
**kwargs,
):
"""
Args:
img_size: Input image size.
patch_size: Patch size.
in_chans: Number of input image channels.
num_classes: Number of classes for classification head.
embed_dim: Patch embedding dimension.
depths: Depth of each Swin Transformer stage (layer).
num_heads: Number of attention heads in different layers.
window_size: Window size.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: If True, add a learnable bias to query, key, value.
drop_rate: Head dropout rate.
proj_drop_rate: Projection dropout rate.
attn_drop_rate: Attention dropout rate.
drop_path_rate: Stochastic depth rate.
norm_layer: Normalization layer.
act_layer: Activation layer type.
patch_norm: If True, add normalization after patch embedding.
pretrained_window_sizes: Pretrained window sizes of each layer.
output_fmt: Output tensor format if not None, otherwise output 'NHWC' by default.
"""
super().__init__()
self.num_classes = num_classes
assert global_pool in ('', 'avg')
self.global_pool = global_pool
self.output_fmt = 'NHWC'
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (self.num_layers - 1))
self.feature_info = []
if not isinstance(embed_dim, (tuple, list)):
embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
norm_layer=norm_layer,
strict_img_size=strict_img_size,
output_fmt='NHWC',
)
grid_size = self.patch_embed.grid_size
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
layers = []
in_dim = embed_dim[0]
scale = 1
for i in range(self.num_layers):
out_dim = embed_dim[i]
layers += [SwinTransformerV2Stage(
dim=in_dim,
out_dim=out_dim,
input_resolution=(grid_size[0] // scale, grid_size[1] // scale),
depth=depths[i],
downsample=i > 0,
num_heads=num_heads[i],
window_size=window_size,
always_partition=always_partition,
dynamic_mask=not strict_img_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
act_layer=act_layer,
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_sizes[i],
)]
in_dim = out_dim
if i > 0:
scale *= 2
self.feature_info += [dict(num_chs=out_dim, reduction=4 * scale, module=f'layers.{i}')]
self.layers = nn.Sequential(*layers)
self.norm = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
input_fmt=self.output_fmt,
)
self.apply(self._init_weights)
for bly in self.layers:
bly._init_respostnorm()
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def set_input_size(
self,
img_size: Optional[Tuple[int, int]] = None,
patch_size: Optional[Tuple[int, int]] = None,
window_size: Optional[Tuple[int, int]] = None,
window_ratio: Optional[int] = 8,
always_partition: Optional[bool] = None,
):
"""Updates the image resolution, window size, and so the pair-wise relative positions.
Args:
img_size (Optional[Tuple[int, int]]): New input resolution, if None current resolution is used
patch_size (Optional[Tuple[int, int]): New patch size, if None use current patch size
window_size (Optional[int]): New window size, if None based on new_img_size // window_div
window_ratio (int): divisor for calculating window size from patch grid size
always_partition: always partition / shift windows even if feat size is < window
"""
if img_size is not None or patch_size is not None:
self.patch_embed.set_input_size(img_size=img_size, patch_size=patch_size)
grid_size = self.patch_embed.grid_size
if window_size is None and window_ratio is not None:
window_size = tuple([s // window_ratio for s in grid_size])
for index, stage in enumerate(self.layers):
stage_scale = 2 ** max(index - 1, 0)
stage.set_input_size(
feat_size=(grid_size[0] // stage_scale, grid_size[1] // stage_scale),
window_size=window_size,
always_partition=always_partition,
)
@torch.jit.ignore
def no_weight_decay(self):
nod = set()
for n, m in self.named_modules():
if any([kw in n for kw in ("cpb_mlp", "logit_scale")]):
nod.add(n)
return nod
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^absolute_pos_embed|patch_embed', # stem and embed
blocks=r'^layers\.(\d+)' if coarse else [
(r'^layers\.(\d+).downsample', (0,)),
(r'^layers\.(\d+)\.\w+\.(\d+)', None),
(r'^norm', (99999,)),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for l in self.layers:
l.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.layers), indices)
# forward pass
x = self.patch_embed(x)
num_stages = len(self.layers)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.layers
else:
stages = self.layers[:max_index + 1]
for i, stage in enumerate(stages):
x = stage(x)
if i in take_indices:
if norm and i == num_stages - 1:
x_inter = self.norm(x) # applying final norm last intermediate
else:
x_inter = x
x_inter = x_inter.permute(0, 3, 1, 2).contiguous()
intermediates.append(x_inter)
if intermediates_only:
return intermediates
x = self.norm(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.layers), indices)
self.layers = self.layers[:max_index + 1] # truncate blocks
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.patch_embed(x)
x = self.layers(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('state_dict', state_dict)
native_checkpoint = 'head.fc.weight' in state_dict
out_dict = {}
import re
for k, v in state_dict.items():
if any([n in k for n in ('relative_position_index', 'relative_coords_table', 'attn_mask')]):
continue # skip buffers that should not be persistent
if 'patch_embed.proj.weight' in k:
_, _, H, W = model.patch_embed.proj.weight.shape
if v.shape[-2] != H or v.shape[-1] != W:
v = resample_patch_embed(
v,
(H, W),
interpolation='bicubic',
antialias=True,
verbose=True,
)
if not native_checkpoint:
# skip layer remapping for updated checkpoints
k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k)
k = k.replace('head.', 'head.fc.')
out_dict[k] = v
return out_dict
def _create_swin_transformer_v2(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
SwinTransformerV2, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8),
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
'license': 'mit', **kwargs
}
default_cfgs = generate_default_cfgs({
'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to16_192to256_22kto1k_ft.pth',
),
'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to24_192to384_22kto1k_ft.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to16_192to256_22kto1k_ft.pth',
),
'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to24_192to384_22kto1k_ft.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'swinv2_tiny_window8_256.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window8_256.pth',
),
'swinv2_tiny_window16_256.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window16_256.pth',
),
'swinv2_small_window8_256.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window8_256.pth',
),
'swinv2_small_window16_256.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window16_256.pth',
),
'swinv2_base_window8_256.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window8_256.pth',
),
'swinv2_base_window16_256.ms_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window16_256.pth',
),
'swinv2_base_window12_192.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12_192_22k.pth',
num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6)
),
'swinv2_large_window12_192.ms_in22k': _cfg(
hf_hub_id='timm/',
url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12_192_22k.pth',
num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6)
),
})
@register_model
def swinv2_tiny_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer_v2(
'swinv2_tiny_window16_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_tiny_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer_v2(
'swinv2_tiny_window8_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_small_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer_v2(
'swinv2_small_window16_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_small_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24))
return _create_swin_transformer_v2(
'swinv2_small_window8_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_base_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
return _create_swin_transformer_v2(
'swinv2_base_window16_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_base_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=8, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
return _create_swin_transformer_v2(
'swinv2_base_window8_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_base_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
return _create_swin_transformer_v2(
'swinv2_base_window12_192', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_base_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(
window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32),
pretrained_window_sizes=(12, 12, 12, 6))
return _create_swin_transformer_v2(
'swinv2_base_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_base_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(
window_size=24, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32),
pretrained_window_sizes=(12, 12, 12, 6))
return _create_swin_transformer_v2(
'swinv2_base_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_large_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48))
return _create_swin_transformer_v2(
'swinv2_large_window12_192', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_large_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(
window_size=16, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48),
pretrained_window_sizes=(12, 12, 12, 6))
return _create_swin_transformer_v2(
'swinv2_large_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def swinv2_large_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2:
"""
"""
model_args = dict(
window_size=24, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48),
pretrained_window_sizes=(12, 12, 12, 6))
return _create_swin_transformer_v2(
'swinv2_large_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs))
register_model_deprecations(__name__, {
'swinv2_base_window12_192_22k': 'swinv2_base_window12_192.ms_in22k',
'swinv2_base_window12to16_192to256_22kft1k': 'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k',
'swinv2_base_window12to24_192to384_22kft1k': 'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k',
'swinv2_large_window12_192_22k': 'swinv2_large_window12_192.ms_in22k',
'swinv2_large_window12to16_192to256_22kft1k': 'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k',
'swinv2_large_window12to24_192to384_22kft1k': 'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k',
})
| pytorch-image-models/timm/models/swin_transformer_v2.py/0 | {
"file_path": "pytorch-image-models/timm/models/swin_transformer_v2.py",
"repo_id": "pytorch-image-models",
"token_count": 21258
} |
"""Pytorch impl of Aligned Xception 41, 65, 71
This is a correct, from scratch impl of Aligned Xception (Deeplab) models compatible with TF weights at
https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/model_zoo.md
Hacked together by / Copyright 2020 Ross Wightman
"""
from functools import partial
from typing import List, Dict, Type, Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import ClassifierHead, ConvNormAct, DropPath, PadType, create_conv2d, get_norm_act_layer
from timm.layers.helpers import to_3tuple
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['XceptionAligned']
class SeparableConv2d(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: int = 1,
padding: PadType = '',
act_layer: Type[nn.Module] = nn.ReLU,
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
):
super(SeparableConv2d, self).__init__()
self.kernel_size = kernel_size
self.dilation = dilation
# depthwise convolution
self.conv_dw = create_conv2d(
in_chs, in_chs, kernel_size, stride=stride,
padding=padding, dilation=dilation, depthwise=True)
self.bn_dw = norm_layer(in_chs)
self.act_dw = act_layer(inplace=True) if act_layer is not None else nn.Identity()
# pointwise convolution
self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1)
self.bn_pw = norm_layer(out_chs)
self.act_pw = act_layer(inplace=True) if act_layer is not None else nn.Identity()
def forward(self, x):
x = self.conv_dw(x)
x = self.bn_dw(x)
x = self.act_dw(x)
x = self.conv_pw(x)
x = self.bn_pw(x)
x = self.act_pw(x)
return x
class PreSeparableConv2d(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
stride: int = 1,
dilation: int = 1,
padding: PadType = '',
act_layer: Type[nn.Module] = nn.ReLU,
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
first_act: bool = True,
):
super(PreSeparableConv2d, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer=act_layer)
self.kernel_size = kernel_size
self.dilation = dilation
self.norm = norm_act_layer(in_chs, inplace=True) if first_act else nn.Identity()
# depthwise convolution
self.conv_dw = create_conv2d(
in_chs, in_chs, kernel_size, stride=stride,
padding=padding, dilation=dilation, depthwise=True)
# pointwise convolution
self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1)
def forward(self, x):
x = self.norm(x)
x = self.conv_dw(x)
x = self.conv_pw(x)
return x
class XceptionModule(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 1,
dilation: int = 1,
pad_type: PadType = '',
start_with_relu: bool = True,
no_skip: bool = False,
act_layer: Type[nn.Module] = nn.ReLU,
norm_layer: Optional[Type[nn.Module]] = None,
drop_path: Optional[nn.Module] = None
):
super(XceptionModule, self).__init__()
out_chs = to_3tuple(out_chs)
self.in_channels = in_chs
self.out_channels = out_chs[-1]
self.no_skip = no_skip
if not no_skip and (self.out_channels != self.in_channels or stride != 1):
self.shortcut = ConvNormAct(
in_chs, self.out_channels, 1, stride=stride, norm_layer=norm_layer, apply_act=False)
else:
self.shortcut = None
separable_act_layer = None if start_with_relu else act_layer
self.stack = nn.Sequential()
for i in range(3):
if start_with_relu:
self.stack.add_module(f'act{i + 1}', act_layer(inplace=i > 0))
self.stack.add_module(f'conv{i + 1}', SeparableConv2d(
in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type,
act_layer=separable_act_layer, norm_layer=norm_layer))
in_chs = out_chs[i]
self.drop_path = drop_path
def forward(self, x):
skip = x
x = self.stack(x)
if self.shortcut is not None:
skip = self.shortcut(skip)
if not self.no_skip:
if self.drop_path is not None:
x = self.drop_path(x)
x = x + skip
return x
class PreXceptionModule(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 1,
dilation: int = 1,
pad_type: PadType = '',
no_skip: bool = False,
act_layer: Type[nn.Module] = nn.ReLU,
norm_layer: Optional[Type[nn.Module]] = None,
drop_path: Optional[nn.Module] = None
):
super(PreXceptionModule, self).__init__()
out_chs = to_3tuple(out_chs)
self.in_channels = in_chs
self.out_channels = out_chs[-1]
self.no_skip = no_skip
if not no_skip and (self.out_channels != self.in_channels or stride != 1):
self.shortcut = create_conv2d(in_chs, self.out_channels, 1, stride=stride)
else:
self.shortcut = nn.Identity()
self.norm = get_norm_act_layer(norm_layer, act_layer=act_layer)(in_chs, inplace=True)
self.stack = nn.Sequential()
for i in range(3):
self.stack.add_module(f'conv{i + 1}', PreSeparableConv2d(
in_chs,
out_chs[i],
3,
stride=stride if i == 2 else 1,
dilation=dilation,
padding=pad_type,
act_layer=act_layer,
norm_layer=norm_layer,
first_act=i > 0,
))
in_chs = out_chs[i]
self.drop_path = drop_path
def forward(self, x):
x = self.norm(x)
skip = x
x = self.stack(x)
if not self.no_skip:
if self.drop_path is not None:
x = self.drop_path(x)
x = x + self.shortcut(skip)
return x
class XceptionAligned(nn.Module):
"""Modified Aligned Xception
"""
def __init__(
self,
block_cfg: List[Dict],
num_classes: int = 1000,
in_chans: int = 3,
output_stride: int = 32,
preact: bool = False,
act_layer: Type[nn.Module] = nn.ReLU,
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
global_pool: str = 'avg',
):
super(XceptionAligned, self).__init__()
assert output_stride in (8, 16, 32)
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
layer_args = dict(act_layer=act_layer, norm_layer=norm_layer)
self.stem = nn.Sequential(*[
ConvNormAct(in_chans, 32, kernel_size=3, stride=2, **layer_args),
create_conv2d(32, 64, kernel_size=3, stride=1) if preact else
ConvNormAct(32, 64, kernel_size=3, stride=1, **layer_args)
])
curr_dilation = 1
curr_stride = 2
self.feature_info = []
self.blocks = nn.Sequential()
module_fn = PreXceptionModule if preact else XceptionModule
net_num_blocks = len(block_cfg)
net_block_idx = 0
for i, b in enumerate(block_cfg):
block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule
b['drop_path'] = DropPath(block_dpr) if block_dpr > 0. else None
b['dilation'] = curr_dilation
if b['stride'] > 1:
name = f'blocks.{i}.stack.conv2' if preact else f'blocks.{i}.stack.act3'
self.feature_info += [dict(num_chs=to_3tuple(b['out_chs'])[-2], reduction=curr_stride, module=name)]
next_stride = curr_stride * b['stride']
if next_stride > output_stride:
curr_dilation *= b['stride']
b['stride'] = 1
else:
curr_stride = next_stride
self.blocks.add_module(str(i), module_fn(**b, **layer_args))
self.num_features = self.blocks[-1].out_channels
net_block_idx += 1
self.feature_info += [dict(
num_chs=self.num_features, reduction=curr_stride, module='blocks.' + str(len(self.blocks) - 1))]
self.act = act_layer(inplace=True) if preact else nn.Identity()
self.head_hidden_size = self.num_features
self.head = ClassifierHead(
in_features=self.num_features,
num_classes=num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^blocks\.(\d+)',
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
x = self.act(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _xception(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
XceptionAligned,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, feature_cls='hook'),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (10, 10),
'crop_pct': 0.903, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'stem.0.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'xception65.ra3_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.94,
),
'xception41.tf_in1k': _cfg(hf_hub_id='timm/'),
'xception65.tf_in1k': _cfg(hf_hub_id='timm/'),
'xception71.tf_in1k': _cfg(hf_hub_id='timm/'),
'xception41p.ra3_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.94,
),
'xception65p.ra3_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.94,
),
})
@register_model
def xception41(pretrained=False, **kwargs) -> XceptionAligned:
""" Modified Aligned Xception-41
"""
block_cfg = [
# entry flow
dict(in_chs=64, out_chs=128, stride=2),
dict(in_chs=128, out_chs=256, stride=2),
dict(in_chs=256, out_chs=728, stride=2),
# middle flow
*([dict(in_chs=728, out_chs=728, stride=1)] * 8),
# exit flow
dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2),
dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False),
]
model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1))
return _xception('xception41', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def xception65(pretrained=False, **kwargs) -> XceptionAligned:
""" Modified Aligned Xception-65
"""
block_cfg = [
# entry flow
dict(in_chs=64, out_chs=128, stride=2),
dict(in_chs=128, out_chs=256, stride=2),
dict(in_chs=256, out_chs=728, stride=2),
# middle flow
*([dict(in_chs=728, out_chs=728, stride=1)] * 16),
# exit flow
dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2),
dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False),
]
model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1))
return _xception('xception65', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def xception71(pretrained=False, **kwargs) -> XceptionAligned:
""" Modified Aligned Xception-71
"""
block_cfg = [
# entry flow
dict(in_chs=64, out_chs=128, stride=2),
dict(in_chs=128, out_chs=256, stride=1),
dict(in_chs=256, out_chs=256, stride=2),
dict(in_chs=256, out_chs=728, stride=1),
dict(in_chs=728, out_chs=728, stride=2),
# middle flow
*([dict(in_chs=728, out_chs=728, stride=1)] * 16),
# exit flow
dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2),
dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False),
]
model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1))
return _xception('xception71', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def xception41p(pretrained=False, **kwargs) -> XceptionAligned:
""" Modified Aligned Xception-41 w/ Pre-Act
"""
block_cfg = [
# entry flow
dict(in_chs=64, out_chs=128, stride=2),
dict(in_chs=128, out_chs=256, stride=2),
dict(in_chs=256, out_chs=728, stride=2),
# middle flow
*([dict(in_chs=728, out_chs=728, stride=1)] * 8),
# exit flow
dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2),
dict(in_chs=1024, out_chs=(1536, 1536, 2048), no_skip=True, stride=1),
]
model_args = dict(block_cfg=block_cfg, preact=True, norm_layer=nn.BatchNorm2d)
return _xception('xception41p', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def xception65p(pretrained=False, **kwargs) -> XceptionAligned:
""" Modified Aligned Xception-65 w/ Pre-Act
"""
block_cfg = [
# entry flow
dict(in_chs=64, out_chs=128, stride=2),
dict(in_chs=128, out_chs=256, stride=2),
dict(in_chs=256, out_chs=728, stride=2),
# middle flow
*([dict(in_chs=728, out_chs=728, stride=1)] * 16),
# exit flow
dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2),
dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True),
]
model_args = dict(
block_cfg=block_cfg, preact=True, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1))
return _xception('xception65p', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/xception_aligned.py/0 | {
"file_path": "pytorch-image-models/timm/models/xception_aligned.py",
"repo_id": "pytorch-image-models",
"token_count": 7780
} |
""" PyTorch impl of LaProp optimizer
Code simplified from https://github.com/Z-T-WANG/LaProp-Optimizer, MIT License
Paper: LaProp: Separating Momentum and Adaptivity in Adam, https://arxiv.org/abs/2002.04839
@article{ziyin2020laprop,
title={LaProp: a Better Way to Combine Momentum with Adaptive Gradient},
author={Ziyin, Liu and Wang, Zhikang T and Ueda, Masahito},
journal={arXiv preprint arXiv:2002.04839},
year={2020}
}
"""
from typing import Tuple
from torch.optim import Optimizer
import torch
from ._types import ParamsT
class LaProp(Optimizer):
""" LaProp Optimizer
Paper: LaProp: Separating Momentum and Adaptivity in Adam, https://arxiv.org/abs/2002.04839
"""
def __init__(
self,
params: ParamsT,
lr: float = 4e-4,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-15,
weight_decay: float = 0.,
caution: bool = False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
caution=caution,
)
super(LaProp, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('LaProp does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of learning rates
state['exp_avg_lr_1'] = 0.
state['exp_avg_lr_2'] = 0.
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
one_minus_beta2 = 1 - beta2
one_minus_beta1 = 1 - beta1
# Decay the first and second moment running average coefficient
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=one_minus_beta2)
state['exp_avg_lr_1'] = state['exp_avg_lr_1'] * beta1 + one_minus_beta1 * group['lr']
state['exp_avg_lr_2'] = state['exp_avg_lr_2'] * beta2 + one_minus_beta2
# 1 - beta1 ** state['step']
bias_correction1 = state['exp_avg_lr_1'] / group['lr'] if group['lr'] != 0. else 1.
bias_correction2 = state['exp_avg_lr_2']
step_size = 1 / bias_correction1
denom = exp_avg_sq.div(bias_correction2).sqrt_().add_(group['eps'])
step_of_this_grad = grad / denom
exp_avg.mul_(beta1).add_(step_of_this_grad, alpha=group['lr'] * one_minus_beta1)
if group['caution']:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
mask = (exp_avg * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
exp_avg = exp_avg * mask
p.add_(exp_avg, alpha=-step_size)
if group['weight_decay'] != 0:
p.add_(p, alpha=-(group['lr'] * group['weight_decay']))
return loss | pytorch-image-models/timm/optim/laprop.py/0 | {
"file_path": "pytorch-image-models/timm/optim/laprop.py",
"repo_id": "pytorch-image-models",
"token_count": 2264
} |
""" Cosine Scheduler
Cosine LR schedule with warmup, cycle/restarts, noise, k-decay.
Hacked together by / Copyright 2021 Ross Wightman
"""
import logging
import math
import numpy as np
import torch
from typing import List
from .scheduler import Scheduler
_logger = logging.getLogger(__name__)
class CosineLRScheduler(Scheduler):
"""
Cosine decay with restarts.
This is described in the paper https://arxiv.org/abs/1608.03983.
Inspiration from
https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py
k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lr_min: float = 0.,
cycle_mul: float = 1.,
cycle_decay: float = 1.,
cycle_limit: int = 1,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
k_decay=1.0,
initialize=True,
) -> None:
super().__init__(
optimizer,
param_group_field="lr",
t_in_epochs=t_in_epochs,
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1:
_logger.warning(
"Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1.")
self.t_initial = t_initial
self.lr_min = lr_min
self.cycle_mul = cycle_mul
self.cycle_decay = cycle_decay
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.k_decay = k_decay
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t: int) -> List[float]:
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.cycle_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul))
t_i = self.cycle_mul ** i * self.t_initial
t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
gamma = self.cycle_decay ** i
lr_max_values = [v * gamma for v in self.base_values]
k = self.k_decay
if i < self.cycle_limit:
lrs = [
self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k))
for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_cycle_length(self, cycles=0):
cycles = max(1, cycles or self.cycle_limit)
if self.cycle_mul == 1.0:
t = self.t_initial * cycles
else:
t = int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
return t + self.warmup_t if self.warmup_prefix else t | pytorch-image-models/timm/scheduler/cosine_lr.py/0 | {
"file_path": "pytorch-image-models/timm/scheduler/cosine_lr.py",
"repo_id": "pytorch-image-models",
"token_count": 2070
} |
""" JIT scripting/tracing utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
import torch
def set_jit_legacy():
""" Set JIT executor to legacy w/ support for op fusion
This is hopefully a temporary need in 1.5/1.5.1/1.6 to restore performance due to changes
in the JIT executor. These API are not supported so could change.
"""
#
assert hasattr(torch._C, '_jit_set_profiling_executor'), "Old JIT behavior doesn't exist!"
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_override_can_fuse_on_gpu(True)
#torch._C._jit_set_texpr_fuser_enabled(True)
def set_jit_fuser(fuser):
if fuser == "te":
# default fuser should be == 'te'
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
try:
torch._C._jit_set_nvfuser_enabled(False)
except Exception:
pass
elif fuser == "old" or fuser == "legacy":
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
try:
torch._C._jit_set_nvfuser_enabled(False)
except Exception:
pass
elif fuser == "nvfuser" or fuser == "nvf":
os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = '1'
#os.environ['PYTORCH_NVFUSER_DISABLE_FMA'] = '1'
#os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0'
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_can_fuse_on_cpu()
torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_nvfuser_enabled(True)
else:
assert False, f"Invalid jit fuser ({fuser})"
| pytorch-image-models/timm/utils/jit.py/0 | {
"file_path": "pytorch-image-models/timm/utils/jit.py",
"repo_id": "pytorch-image-models",
"token_count": 1035
} |
# Web Browser Automation with Agents 🤖🌐
[[open-in-colab]]
In this notebook, we'll create an **agent-powered web browser automation system**! This system can navigate websites, interact with elements, and extract information automatically.
The agent will be able to:
- [x] Navigate to web pages
- [x] Click on elements
- [x] Search within pages
- [x] Handle popups and modals
- [x] Extract information
Let's set up this system step by step!
First, run these lines to install the required dependencies:
```bash
pip install smolagents selenium helium pillow -q
```
Let's import our required libraries and set up environment variables:
```python
from io import BytesIO
from time import sleep
import helium
from dotenv import load_dotenv
from PIL import Image
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from smolagents import CodeAgent, tool
from smolagents.agents import ActionStep
# Load environment variables
load_dotenv()
```
Now let's create our core browser interaction tools that will allow our agent to navigate and interact with web pages:
```python
@tool
def search_item_ctrl_f(text: str, nth_result: int = 1) -> str:
"""
Searches for text on the current page via Ctrl + F and jumps to the nth occurrence.
Args:
text: The text to search for
nth_result: Which occurrence to jump to (default: 1)
"""
elements = driver.find_elements(By.XPATH, f"//*[contains(text(), '{text}')]")
if nth_result > len(elements):
raise Exception(f"Match n°{nth_result} not found (only {len(elements)} matches found)")
result = f"Found {len(elements)} matches for '{text}'."
elem = elements[nth_result - 1]
driver.execute_script("arguments[0].scrollIntoView(true);", elem)
result += f"Focused on element {nth_result} of {len(elements)}"
return result
@tool
def go_back() -> None:
"""Goes back to previous page."""
driver.back()
@tool
def close_popups() -> str:
"""
Closes any visible modal or pop-up on the page. Use this to dismiss pop-up windows!
This does not work on cookie consent banners.
"""
webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()
```
Let's set up our browser with Chrome and configure screenshot capabilities:
```python
# Configure Chrome options
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--force-device-scale-factor=1")
chrome_options.add_argument("--window-size=1000,1350")
chrome_options.add_argument("--disable-pdf-viewer")
chrome_options.add_argument("--window-position=0,0")
# Initialize the browser
driver = helium.start_chrome(headless=False, options=chrome_options)
# Set up screenshot callback
def save_screenshot(memory_step: ActionStep, agent: CodeAgent) -> None:
sleep(1.0) # Let JavaScript animations happen before taking the screenshot
driver = helium.get_driver()
current_step = memory_step.step_number
if driver is not None:
for previous_memory_step in agent.memory.steps: # Remove previous screenshots for lean processing
if isinstance(previous_memory_step, ActionStep) and previous_memory_step.step_number <= current_step - 2:
previous_memory_step.observations_images = None
png_bytes = driver.get_screenshot_as_png()
image = Image.open(BytesIO(png_bytes))
print(f"Captured a browser screenshot: {image.size} pixels")
memory_step.observations_images = [image.copy()] # Create a copy to ensure it persists
# Update observations with current URL
url_info = f"Current url: {driver.current_url}"
memory_step.observations = (
url_info if memory_step.observations is None else memory_step.observations + "\n" + url_info
)
```
Now let's create our web automation agent:
```python
from smolagents import HfApiModel
# Initialize the model
model_id = "meta-llama/Llama-3.3-70B-Instruct" # You can change this to your preferred model
model = HfApiModel(model_id)
# Create the agent
agent = CodeAgent(
tools=[go_back, close_popups, search_item_ctrl_f],
model=model,
additional_authorized_imports=["helium"],
step_callbacks=[save_screenshot],
max_steps=20,
verbosity_level=2,
)
# Import helium for the agent
agent.python_executor("from helium import *", agent.state)
```
The agent needs instructions on how to use Helium for web automation. Here are the instructions we'll provide:
```python
helium_instructions = """
You can use helium to access websites. Don't bother about the helium driver, it's already managed.
We've already ran "from helium import *"
Then you can go to pages!
Code:
```py
go_to('github.com/trending')
```<end_code>
You can directly click clickable elements by inputting the text that appears on them.
Code:
```py
click("Top products")
```<end_code>
If it's a link:
Code:
```py
click(Link("Top products"))
```<end_code>
If you try to interact with an element and it's not found, you'll get a LookupError.
In general stop your action after each button click to see what happens on your screenshot.
Never try to login in a page.
To scroll up or down, use scroll_down or scroll_up with as an argument the number of pixels to scroll from.
Code:
```py
scroll_down(num_pixels=1200) # This will scroll one viewport down
```<end_code>
When you have pop-ups with a cross icon to close, don't try to click the close icon by finding its element or targeting an 'X' element (this most often fails).
Just use your built-in tool `close_popups` to close them:
Code:
```py
close_popups()
```<end_code>
You can use .exists() to check for the existence of an element. For example:
Code:
```py
if Text('Accept cookies?').exists():
click('I accept')
```<end_code>
"""
```
Now we can run our agent with a task! Let's try finding information on Wikipedia:
```python
search_request = """
Please navigate to https://en.wikipedia.org/wiki/Chicago and give me a sentence containing the word "1992" that mentions a construction accident.
"""
agent_output = agent.run(search_request + helium_instructions)
print("Final output:")
print(agent_output)
```
You can run different tasks by modifying the request. For example, here's for me to know if I should work harder:
```python
github_request = """
I'm trying to find how hard I have to work to get a repo in github.com/trending.
Can you navigate to the profile for the top author of the top trending repo, and give me their total number of commits over the last year?
"""
agent_output = agent.run(github_request + helium_instructions)
print("Final output:")
print(agent_output)
```
The system is particularly effective for tasks like:
- Data extraction from websites
- Web research automation
- UI testing and verification
- Content monitoring | smolagents/docs/source/en/examples/web_browser.md/0 | {
"file_path": "smolagents/docs/source/en/examples/web_browser.md",
"repo_id": "smolagents",
"token_count": 2146
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Text-to-SQL
[[open-in-colab]]
इस ट्यूटोरियल में, हम देखेंगे कि कैसे `smolagents` का उपयोग करके एक एजेंट को SQL का उपयोग करने के लिए लागू किया जा सकता है।
> आइए सबसे महत्वपूर्ण प्रश्न से शुरू करें: इसे साधारण क्यों नहीं रखें और एक सामान्य text-to-SQL पाइपलाइन का उपयोग करें?
एक सामान्य text-to-SQL पाइपलाइन कमजोर होती है, क्योंकि उत्पन्न SQL क्वेरी गलत हो सकती है। इससे भी बुरी बात यह है कि क्वेरी गलत हो सकती है, लेकिन कोई एरर नहीं दिखाएगी, बल्कि बिना किसी अलार्म के गलत/बेकार आउटपुट दे सकती है।
👉 इसके बजाय, एक एजेंट सिस्टम आउटपुट का गंभीरता से निरीक्षण कर सकता है और तय कर सकता है कि क्वेरी को बदलने की जरूरत है या नहीं, इस प्रकार इसे बेहतर प्रदर्शन में मदद मिलती है।
आइए इस एजेंट को बनाएं! 💪
पहले, हम SQL एनवायरनमेंट सेटअप करते हैं:
```py
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
Float,
insert,
inspect,
text,
)
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
# create city SQL table
table_name = "receipts"
receipts = Table(
table_name,
metadata_obj,
Column("receipt_id", Integer, primary_key=True),
Column("customer_name", String(16), primary_key=True),
Column("price", Float),
Column("tip", Float),
)
metadata_obj.create_all(engine)
rows = [
{"receipt_id": 1, "customer_name": "Alan Payne", "price": 12.06, "tip": 1.20},
{"receipt_id": 2, "customer_name": "Alex Mason", "price": 23.86, "tip": 0.24},
{"receipt_id": 3, "customer_name": "Woodrow Wilson", "price": 53.43, "tip": 5.43},
{"receipt_id": 4, "customer_name": "Margaret James", "price": 21.11, "tip": 1.00},
]
for row in rows:
stmt = insert(receipts).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
```
### Agent बनाएं
अब आइए हमारी SQL टेबल को एक टूल द्वारा पुनर्प्राप्त करने योग्य बनाएं।
टूल का विवरण विशेषता एजेंट सिस्टम द्वारा LLM के prompt में एम्बेड किया जाएगा: यह LLM को टूल का उपयोग करने के बारे में जानकारी देता है। यहीं पर हम SQL टेबल का वर्णन करना चाहते हैं।
```py
inspector = inspect(engine)
columns_info = [(col["name"], col["type"]) for col in inspector.get_columns("receipts")]
table_description = "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info])
print(table_description)
```
```text
Columns:
- receipt_id: INTEGER
- customer_name: VARCHAR(16)
- price: FLOAT
- tip: FLOAT
```
अब आइए हमारा टूल बनाएं। इसे निम्नलिखित की आवश्यकता है: (अधिक जानकारी के लिए [टूल doc](../tutorials/tools) पढ़ें)
- एक डॉकस्ट्रिंग जिसमें आर्ग्युमेंट्स की सूची वाला `Args:` भाग हो।
- इनपुट और आउटपुट दोनों पर टाइप हिंट्स।
```py
from smolagents import tool
@tool
def sql_engine(query: str) -> str:
"""
Allows you to perform SQL queries on the table. Returns a string representation of the result.
The table is named 'receipts'. Its description is as follows:
Columns:
- receipt_id: INTEGER
- customer_name: VARCHAR(16)
- price: FLOAT
- tip: FLOAT
Args:
query: The query to perform. This should be correct SQL.
"""
output = ""
with engine.connect() as con:
rows = con.execute(text(query))
for row in rows:
output += "\n" + str(row)
return output
```
अब आइए एक एजेंट बनाएं जो इस टूल का लाभ उठाता है।
हम `CodeAgent` का उपयोग करते हैं, जो smolagents का मुख्य एजेंट क्लास है: एक एजेंट जो कोड में एक्शन लिखता है और ReAct फ्रेमवर्क के अनुसार पिछले आउटपुट पर पुनरावृत्ति कर सकता है।
मॉडल वह LLM है जो एजेंट सिस्टम को संचालित करता है। `HfApiModel` आपको HF के Inference API का उपयोग करके LLM को कॉल करने की अनुमति देता है, या तो सर्वरलेस या डेडिकेटेड एंडपॉइंट के माध्यम से, लेकिन आप किसी भी प्रोप्राइटरी API का भी उपयोग कर सकते हैं।
```py
from smolagents import CodeAgent, HfApiModel
agent = CodeAgent(
tools=[sql_engine],
model=HfApiModel("meta-llama/Meta-Llama-3.1-8B-Instruct"),
)
agent.run("Can you give me the name of the client who got the most expensive receipt?")
```
### लेवल 2: टेबल जॉइन्स
अब आइए इसे और चुनौतीपूर्ण बनाएं! हम चाहते हैं कि हमारा एजेंट कई टेबल्स के बीच जॉइन को संभाल सके।
तो आइए हम प्रत्येक receipt_id के लिए वेटर्स के नाम रिकॉर्ड करने वाली एक दूसरी टेबल बनाते हैं!
```py
table_name = "waiters"
receipts = Table(
table_name,
metadata_obj,
Column("receipt_id", Integer, primary_key=True),
Column("waiter_name", String(16), primary_key=True),
)
metadata_obj.create_all(engine)
rows = [
{"receipt_id": 1, "waiter_name": "Corey Johnson"},
{"receipt_id": 2, "waiter_name": "Michael Watts"},
{"receipt_id": 3, "waiter_name": "Michael Watts"},
{"receipt_id": 4, "waiter_name": "Margaret James"},
]
for row in rows:
stmt = insert(receipts).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
```
चूंकि हमने टेबल को बदल दिया है, हम LLM को इस टेबल की जानकारी का उचित उपयोग करने देने के लिए इस टेबल के विवरण के साथ `SQLExecutorTool` को अपडेट करते हैं।
```py
updated_description = """Allows you to perform SQL queries on the table. Beware that this tool's output is a string representation of the execution output.
It can use the following tables:"""
inspector = inspect(engine)
for table in ["receipts", "waiters"]:
columns_info = [(col["name"], col["type"]) for col in inspector.get_columns(table)]
table_description = f"Table '{table}':\n"
table_description += "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info])
updated_description += "\n\n" + table_description
print(updated_description)
```
चूंकि यह रिक्वेस्ट पिछले वाले से थोड़ी कठिन है, हम LLM इंजन को अधिक शक्तिशाली [Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) का उपयोग करने के लिए स्विच करेंगे!
```py
sql_engine.description = updated_description
agent = CodeAgent(
tools=[sql_engine],
model=HfApiModel("Qwen/Qwen2.5-Coder-32B-Instruct"),
)
agent.run("Which waiter got more total money from tips?")
```
यह सीधे काम करता है! सेटअप आश्चर्यजनक रूप से सरल था, है ना?
यह उदाहरण पूरा हो गया! हमने इन अवधारणाओं को छुआ है:
- नए टूल्स का निर्माण।
- टूल के विवरण को अपडेट करना।
- एक मजबूत LLM में स्विच करने से एजेंट की तर्कशक्ति में मदद मिलती है।
✅ अब आप वह text-to-SQL सिस्टम बना सकते हैं जिसका आपने हमेशा सपना देखा है! ✨ | smolagents/docs/source/hi/examples/text_to_sql.md/0 | {
"file_path": "smolagents/docs/source/hi/examples/text_to_sql.md",
"repo_id": "smolagents",
"token_count": 5208
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Agents - 导览
[[open-in-colab]]
在本导览中,您将学习如何构建一个 agent(智能体),如何运行它,以及如何自定义它以使其更好地适应您的使用场景。
> [!TIP]
> 译者注:Agent 的业内术语是“智能体”。本译文将保留 agent,不作翻译,以带来更高效的阅读体验。(在中文为主的文章中,It's easier to 注意到英文。Attention Is All You Need!)
> [!TIP]
> 中文社区发布了关于 smolagents 的介绍和实践讲解视频(来源:[Issue#80](https://github.com/huggingface/smolagents/issues/80)),你可以访问[这里](https://www.youtube.com/watch?v=wwN3oAugc4c)进行观看!
### 构建您的 agent
要初始化一个最小化的 agent,您至少需要以下两个参数:
- `model`,一个为您的 agent 提供动力的文本生成模型 - 因为 agent 与简单的 LLM 不同,它是一个使用 LLM 作为引擎的系统。您可以使用以下任一选项:
- [`TransformersModel`] 使用预初始化的 `transformers` 管道在本地机器上运行推理
- [`HfApiModel`] 在底层使用 `huggingface_hub.InferenceClient`
- [`LiteLLMModel`] 让您通过 [LiteLLM](https://docs.litellm.ai/) 调用 100+ 不同的模型!
- `tools`,agent 可以用来解决任务的 `Tools` 列表。它可以是一个空列表。您还可以通过定义可选参数 `add_base_tools=True` 在您的 `tools` 列表之上添加默认工具箱。
一旦有了这两个参数 `tools` 和 `model`,您就可以创建一个 agent 并运行它。您可以使用任何您喜欢的 LLM,无论是通过 [Hugging Face API](https://huggingface.co/docs/api-inference/en/index)、[transformers](https://github.com/huggingface/transformers/)、[ollama](https://ollama.com/),还是 [LiteLLM](https://www.litellm.ai/)。
<hfoptions id="选择一个LLM">
<hfoption id="Hugging Face API">
Hugging Face API 可以免费使用而无需 token,但会有速率限制。
要访问受限模型或使用 PRO 账户提高速率限制,您需要设置环境变量 `HF_TOKEN` 或在初始化 `HfApiModel` 时传递 `token` 变量。
```python
from smolagents import CodeAgent, HfApiModel
model_id = "meta-llama/Llama-3.3-70B-Instruct"
model = HfApiModel(model_id=model_id, token="<YOUR_HUGGINGFACEHUB_API_TOKEN>")
agent = CodeAgent(tools=[], model=model, add_base_tools=True)
agent.run(
"Could you give me the 118th number in the Fibonacci sequence?",
)
```
</hfoption>
<hfoption id="本地Transformers模型">
```python
# !pip install smolagents[transformers]
from smolagents import CodeAgent, TransformersModel
model_id = "meta-llama/Llama-3.2-3B-Instruct"
model = TransformersModel(model_id=model_id)
agent = CodeAgent(tools=[], model=model, add_base_tools=True)
agent.run(
"Could you give me the 118th number in the Fibonacci sequence?",
)
```
</hfoption>
<hfoption id="OpenAI或Anthropic API">
要使用 `LiteLLMModel`,您需要设置环境变量 `ANTHROPIC_API_KEY` 或 `OPENAI_API_KEY`,或者在初始化时传递 `api_key` 变量。
```python
# !pip install smolagents[litellm]
from smolagents import CodeAgent, LiteLLMModel
model = LiteLLMModel(model_id="anthropic/claude-3-5-sonnet-latest", api_key="YOUR_ANTHROPIC_API_KEY") # 也可以使用 'gpt-4o'
agent = CodeAgent(tools=[], model=model, add_base_tools=True)
agent.run(
"Could you give me the 118th number in the Fibonacci sequence?",
)
```
</hfoption>
<hfoption id="Ollama">
```python
# !pip install smolagents[litellm]
from smolagents import CodeAgent, LiteLLMModel
model = LiteLLMModel(
model_id="ollama_chat/llama3.2", # 这个模型对于 agent 行为来说有点弱
api_base="http://localhost:11434", # 如果需要可以替换为远程 open-ai 兼容服务器
api_key="YOUR_API_KEY" # 如果需要可以替换为 API key
num_ctx=8192 # https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator
)
agent = CodeAgent(tools=[], model=model, add_base_tools=True)
agent.run(
"Could you give me the 118th number in the Fibonacci sequence?",
)
```
</hfoption>
</hfoptions>
#### CodeAgent 和 ToolCallingAgent
[`CodeAgent`] 是我们的默认 agent。它将在每一步编写并执行 Python 代码片段。
默认情况下,执行是在您的本地环境中完成的。
这应该是安全的,因为唯一可以调用的函数是您提供的工具(特别是如果只有 Hugging Face 的工具)和一组预定义的安全函数,如 `print` 或 `math` 模块中的函数,所以您已经限制了可以执行的内容。
Python 解释器默认也不允许在安全列表之外导入,所以所有最明显的攻击都不应该成为问题。
您可以通过在初始化 [`CodeAgent`] 时将授权模块作为字符串列表传递给参数 `additional_authorized_imports` 来授权额外的导入:
```py
from smolagents import CodeAgent
agent = CodeAgent(tools=[], model=model, additional_authorized_imports=['requests', 'bs4'])
agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?")
```
> [!WARNING]
> LLM 可以生成任意代码然后执行:不要添加任何不安全的导入!
如果生成的代码尝试执行非法操作或出现常规 Python 错误,执行将停止。
您也可以使用 [E2B 代码执行器](https://e2b.dev/docs#what-is-e2-b) 而不是本地 Python 解释器,首先 [设置 `E2B_API_KEY` 环境变量](https://e2b.dev/dashboard?tab=keys),然后在初始化 agent 时传递 `use_e2b_executor=True`。
> [!TIP]
> 在 [该教程中](tutorials/secure_code_execution) 了解更多关于代码执行的内容。
我们还支持广泛使用的将动作编写为 JSON-like 块的方式:[`ToolCallingAgent`],它的工作方式与 [`CodeAgent`] 非常相似,当然没有 `additional_authorized_imports`,因为它不执行代码:
```py
from smolagents import ToolCallingAgent
agent = ToolCallingAgent(tools=[], model=model)
agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?")
```
### 检查 agent 运行
以下是一些有用的属性,用于检查运行后发生了什么:
- `agent.logs` 存储 agent 的细粒度日志。在 agent 运行的每一步,所有内容都会存储在一个字典中,然后附加到 `agent.logs` 中。
- 运行 `agent.write_memory_to_messages()` 会为 LLM 创建一个 agent 日志的内部内存,作为聊天消息列表。此方法会遍历日志的每一步,并仅存储它感兴趣的内容作为消息:例如,它会将系统提示和任务存储为单独的消息,然后对于每一步,它会将 LLM 输出存储为一条消息,工具调用输出存储为另一条消息。如果您想要更高级别的视图 - 但不是每个日志都会被此方法转录。
## 工具
工具是 agent 使用的原子函数。为了被 LLM 使用,它还需要一些构成其 API 的属性,这些属性将用于向 LLM 描述如何调用此工具:
- 名称
- 描述
- 输入类型和描述
- 输出类型
例如,您可以查看 [`PythonInterpreterTool`]:它有一个名称、描述、输入描述、输出类型和一个执行操作的 `forward` 方法。
当 agent 初始化时,工具属性用于生成工具描述,该描述被嵌入到 agent 的系统提示中。这让 agent 知道它可以使用哪些工具以及为什么。
### 默认工具箱
Transformers 附带了一个用于增强 agent 的默认工具箱,您可以在初始化时通过参数 `add_base_tools = True` 将其添加到您的 agent 中:
- **DuckDuckGo 网页搜索**:使用 DuckDuckGo 浏览器执行网页搜索。
- **Python 代码解释器**:在安全环境中运行 LLM 生成的 Python 代码。只有在使用 `add_base_tools=True` 初始化 [`ToolCallingAgent`] 时才会添加此工具,因为基于代码的 agent 已经可以原生执行 Python 代码
- **转录器**:基于 Whisper-Turbo 构建的语音转文本管道,将音频转录为文本。
您可以通过调用 [`load_tool`] 函数和要执行的任务手动使用工具。
```python
from smolagents import DuckDuckGoSearchTool
search_tool = DuckDuckGoSearchTool()
print(search_tool("Who's the current president of Russia?"))
```
### 创建一个新工具
您可以创建自己的工具,用于 Hugging Face 默认工具未涵盖的用例。
例如,让我们创建一个工具,返回 Hub 上给定任务下载量最多的模型。
您将从以下代码开始。
```python
from huggingface_hub import list_models
task = "text-classification"
most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1)))
print(most_downloaded_model.id)
```
这段代码可以通过将其包装在一个函数中并添加 `tool` 装饰器快速转换为工具:
这不是构建工具的唯一方法:您可以直接将其定义为 [`Tool`] 的子类,这为您提供了更多的灵活性,例如初始化重型类属性的可能性。
让我们看看这两种选项的工作原理:
<hfoptions id="构建工具">
<hfoption id="使用@tool装饰一个函数">
```py
from smolagents import tool
@tool
def model_download_tool(task: str) -> str:
"""
This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub.
It returns the name of the checkpoint.
Args:
task: The task for which to get the download count.
"""
most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1)))
return most_downloaded_model.id
```
该函数需要:
- 一个清晰的名称。名称应该足够描述此工具的功能,以帮助为 agent 提供动力的 LLM。由于此工具返回任务下载量最多的模型,我们将其命名为 `model_download_tool`。
- 输入和输出的类型提示
- 一个描述,其中包括一个 'Args:' 部分,其中每个参数都被描述(这次没有类型指示,它将从类型提示中提取)。与工具名称一样,此描述是为您的 agent 提供动力的 LLM 的说明书,所以不要忽视它。
所有这些元素将在初始化时自动嵌入到 agent 的系统提示中:因此要努力使它们尽可能清晰!
> [!TIP]
> 此定义格式与 `apply_chat_template` 中使用的工具模式相同,唯一的区别是添加了 `tool` 装饰器:[这里](https://huggingface.co/blog/unified-tool-use#passing-tools-to-a-chat-template) 了解更多关于我们的工具使用 API。
</hfoption>
<hfoption id="子类化Tool">
```py
from smolagents import Tool
class ModelDownloadTool(Tool):
name = "model_download_tool"
description = "This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. It returns the name of the checkpoint."
inputs = {"task": {"type": "string", "description": "The task for which to get the download count."}}
output_type = "string"
def forward(self, task: str) -> str:
most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1)))
return most_downloaded_model.id
```
子类需要以下属性:
- 一个清晰的 `name`。名称应该足够描述此工具的功能,以帮助为 agent 提供动力的 LLM。由于此工具返回任务下载量最多的模型,我们将其命名为 `model_download_tool`。
- 一个 `description`。与 `name` 一样,此描述是为您的 agent 提供动力的 LLM 的说明书,所以不要忽视它。
- 输入类型和描述
- 输出类型
所有这些属性将在初始化时自动嵌入到 agent 的系统提示中:因此要努力使它们尽可能清晰!
</hfoption>
</hfoptions>
然后您可以直接初始化您的 agent:
```py
from smolagents import CodeAgent, HfApiModel
agent = CodeAgent(tools=[model_download_tool], model=HfApiModel())
agent.run(
"Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?"
)
```
您将获得以下日志:
```text
╭──────────────────────────────────────── New run ─────────────────────────────────────────╮
│ │
│ Can you give me the name of the model that has the most downloads in the 'text-to-video' │
│ task on the Hugging Face Hub? │
│ │
╰─ HfApiModel - Qwen/Qwen2.5-Coder-32B-Instruct ───────────────────────────────────────────╯
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Step 0 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
╭─ Executing this code: ───────────────────────────────────────────────────────────────────╮
│ 1 model_name = model_download_tool(task="text-to-video") │
│ 2 print(model_name) │
╰──────────────────────────────────────────────────────────────────────────────────────────╯
Execution logs:
ByteDance/AnimateDiff-Lightning
Out: None
[Step 0: Duration 0.27 seconds| Input tokens: 2,069 | Output tokens: 60]
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Step 1 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
╭─ Executing this code: ───────────────────────────────────────────────────────────────────╮
│ 1 final_answer("ByteDance/AnimateDiff-Lightning") │
╰──────────────────────────────────────────────────────────────────────────────────────────╯
Out - Final answer: ByteDance/AnimateDiff-Lightning
[Step 1: Duration 0.10 seconds| Input tokens: 4,288 | Output tokens: 148]
Out[20]: 'ByteDance/AnimateDiff-Lightning'
```
> [!TIP]
> 在 [专用教程](./tutorials/tools#what-is-a-tool-and-how-to-build-one) 中了解更多关于工具的内容。
## 多 agent
多 agent 系统是随着微软的框架 [Autogen](https://huggingface.co/papers/2308.08155) 引入的。
在这种类型的框架中,您有多个 agent 一起工作来解决您的任务,而不是只有一个。
经验表明,这在大多数基准测试中表现更好。这种更好表现的原因在概念上很简单:对于许多任务,与其使用一个全能系统,您更愿意将单元专门用于子任务。在这里,拥有具有单独工具集和内存的 agent 可以实现高效的专业化。例如,为什么要用网页搜索 agent 访问的所有网页内容填充代码生成 agent 的内存?最好将它们分开。
您可以使用 `smolagents` 轻松构建分层多 agent 系统。
为此,将 agent 封装在 [`ManagedAgent`] 对象中。此对象需要参数 `agent`、`name` 和 `description`,这些参数将嵌入到管理 agent 的系统提示中,以让它知道如何调用此托管 agent,就像我们对工具所做的那样。
以下是一个使用我们的 [`DuckDuckGoSearchTool`] 制作一个管理特定网页搜索 agent 的 agent 的示例:
```py
from smolagents import CodeAgent, HfApiModel, DuckDuckGoSearchTool, ManagedAgent
model = HfApiModel()
web_agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=model)
managed_web_agent = ManagedAgent(
agent=web_agent,
name="web_search",
description="Runs web searches for you. Give it your query as an argument."
)
manager_agent = CodeAgent(
tools=[], model=model, managed_agents=[managed_web_agent]
)
manager_agent.run("Who is the CEO of Hugging Face?")
```
> [!TIP]
> 有关高效多 agent 实现的深入示例,请参阅 [我们如何将多 agent 系统推向 GAIA 排行榜的顶部](https://huggingface.co/blog/beating-gaia)。
## 与您的 agent 交谈并在酷炫的 Gradio 界面中可视化其思考过程
您可以使用 `GradioUI` 交互式地向您的 agent 提交任务并观察其思考和执行过程,以下是一个示例:
```py
from smolagents import (
load_tool,
CodeAgent,
HfApiModel,
GradioUI
)
# 从 Hub 导入工具
image_generation_tool = load_tool("m-ric/text-to-image")
model = HfApiModel(model_id)
# 使用图像生成工具初始化 agent
agent = CodeAgent(tools=[image_generation_tool], model=model)
GradioUI(agent).launch()
```
在底层,当用户输入新答案时,agent 会以 `agent.run(user_request, reset=False)` 启动。
`reset=False` 标志意味着在启动此新任务之前不会刷新 agent 的内存,这使得对话可以继续。
您也可以在其他 agent 化应用程序中使用此 `reset=False` 参数来保持对话继续。
## 下一步
要更深入地使用,您将需要查看我们的教程:
- [我们的代码 agent 如何工作的解释](./tutorials/secure_code_execution)
- [本指南关于如何构建好的 agent](./tutorials/building_good_agents)。
- [工具使用的深入指南](./tutorials/tools)。
| smolagents/docs/source/zh/guided_tour.md/0 | {
"file_path": "smolagents/docs/source/zh/guided_tour.md",
"repo_id": "smolagents",
"token_count": 9318
} |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "1.9.0.dev0"
from .agent_types import * # noqa: I001
from .agents import * # Above noqa avoids a circular dependency due to cli.py
from .default_tools import *
from .e2b_executor import *
from .gradio_ui import *
from .local_python_executor import *
from .memory import *
from .models import *
from .monitoring import *
from .tools import *
from .utils import *
from .cli import *
| smolagents/src/smolagents/__init__.py/0 | {
"file_path": "smolagents/src/smolagents/__init__.py",
"repo_id": "smolagents",
"token_count": 303
} |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import base64
import importlib.metadata
import importlib.util
import inspect
import json
import re
import textwrap
import types
from functools import lru_cache
from io import BytesIO
from typing import TYPE_CHECKING, Any, Dict, Tuple, Union
if TYPE_CHECKING:
from smolagents.memory import AgentLogger
__all__ = ["AgentError"]
@lru_cache
def _is_package_available(package_name: str) -> bool:
try:
importlib.metadata.version(package_name)
return True
except importlib.metadata.PackageNotFoundError:
return False
@lru_cache
def _is_pillow_available():
return importlib.util.find_spec("PIL") is not None
BASE_BUILTIN_MODULES = [
"collections",
"datetime",
"itertools",
"math",
"queue",
"random",
"re",
"stat",
"statistics",
"time",
"unicodedata",
]
class AgentError(Exception):
"""Base class for other agent-related exceptions"""
def __init__(self, message, logger: "AgentLogger"):
super().__init__(message)
self.message = message
logger.log(f"[bold red]{message}[/bold red]", level="ERROR")
def dict(self) -> Dict[str, str]:
return {"type": self.__class__.__name__, "message": str(self.message)}
class AgentParsingError(AgentError):
"""Exception raised for errors in parsing in the agent"""
pass
class AgentExecutionError(AgentError):
"""Exception raised for errors in execution in the agent"""
pass
class AgentMaxStepsError(AgentError):
"""Exception raised for errors in execution in the agent"""
pass
class AgentGenerationError(AgentError):
"""Exception raised for errors in generation in the agent"""
pass
def make_json_serializable(obj: Any) -> Any:
"""Recursive function to make objects JSON serializable"""
if obj is None:
return None
elif isinstance(obj, (str, int, float, bool)):
# Try to parse string as JSON if it looks like a JSON object/array
if isinstance(obj, str):
try:
if (obj.startswith("{") and obj.endswith("}")) or (obj.startswith("[") and obj.endswith("]")):
parsed = json.loads(obj)
return make_json_serializable(parsed)
except json.JSONDecodeError:
pass
return obj
elif isinstance(obj, (list, tuple)):
return [make_json_serializable(item) for item in obj]
elif isinstance(obj, dict):
return {str(k): make_json_serializable(v) for k, v in obj.items()}
elif hasattr(obj, "__dict__"):
# For custom objects, convert their __dict__ to a serializable format
return {"_type": obj.__class__.__name__, **{k: make_json_serializable(v) for k, v in obj.__dict__.items()}}
else:
# For any other type, convert to string
return str(obj)
def parse_json_blob(json_blob: str) -> Dict[str, str]:
try:
first_accolade_index = json_blob.find("{")
last_accolade_index = [a.start() for a in list(re.finditer("}", json_blob))][-1]
json_blob = json_blob[first_accolade_index : last_accolade_index + 1].replace('\\"', "'")
json_data = json.loads(json_blob, strict=False)
return json_data
except json.JSONDecodeError as e:
place = e.pos
if json_blob[place - 1 : place + 2] == "},\n":
raise ValueError(
"JSON is invalid: you probably tried to provide multiple tool calls in one action. PROVIDE ONLY ONE TOOL CALL."
)
raise ValueError(
f"The JSON blob you used is invalid due to the following error: {e}.\n"
f"JSON blob was: {json_blob}, decoding failed on that specific part of the blob:\n"
f"'{json_blob[place - 4 : place + 5]}'."
)
except Exception as e:
raise ValueError(f"Error in parsing the JSON blob: {e}")
def parse_code_blobs(code_blob: str) -> str:
"""Parses the LLM's output to get any code blob inside. Will return the code directly if it's code."""
pattern = r"```(?:py|python)?\n(.*?)\n```"
matches = re.findall(pattern, code_blob, re.DOTALL)
if len(matches) == 0:
try: # Maybe the LLM outputted a code blob directly
ast.parse(code_blob)
return code_blob
except SyntaxError:
pass
if "final" in code_blob and "answer" in code_blob:
raise ValueError(
f"""
Your code snippet is invalid, because the regex pattern {pattern} was not found in it.
Here is your code snippet:
{code_blob}
It seems like you're trying to return the final answer, you can do it as follows:
Code:
```py
final_answer("YOUR FINAL ANSWER HERE")
```<end_code>""".strip()
)
raise ValueError(
f"""
Your code snippet is invalid, because the regex pattern {pattern} was not found in it.
Here is your code snippet:
{code_blob}
Make sure to include code with the correct pattern, for instance:
Thoughts: Your thoughts
Code:
```py
# Your python code here
```<end_code>""".strip()
)
return "\n\n".join(match.strip() for match in matches)
def parse_json_tool_call(json_blob: str) -> Tuple[str, Union[str, None]]:
json_blob = json_blob.replace("```json", "").replace("```", "")
tool_call = parse_json_blob(json_blob)
tool_name_key, tool_arguments_key = None, None
for possible_tool_name_key in ["action", "tool_name", "tool", "name", "function"]:
if possible_tool_name_key in tool_call:
tool_name_key = possible_tool_name_key
for possible_tool_arguments_key in [
"action_input",
"tool_arguments",
"tool_args",
"parameters",
]:
if possible_tool_arguments_key in tool_call:
tool_arguments_key = possible_tool_arguments_key
if tool_name_key is not None:
if tool_arguments_key is not None:
return tool_call[tool_name_key], tool_call[tool_arguments_key]
else:
return tool_call[tool_name_key], None
error_msg = "No tool name key found in tool call!" + f" Tool call: {json_blob}"
raise AgentParsingError(error_msg)
MAX_LENGTH_TRUNCATE_CONTENT = 20000
def truncate_content(content: str, max_length: int = MAX_LENGTH_TRUNCATE_CONTENT) -> str:
if len(content) <= max_length:
return content
else:
return (
content[: max_length // 2]
+ f"\n..._This content has been truncated to stay below {max_length} characters_...\n"
+ content[-max_length // 2 :]
)
class ImportFinder(ast.NodeVisitor):
def __init__(self):
self.packages = set()
def visit_Import(self, node):
for alias in node.names:
# Get the base package name (before any dots)
base_package = alias.name.split(".")[0]
self.packages.add(base_package)
def visit_ImportFrom(self, node):
if node.module: # for "from x import y" statements
# Get the base package name (before any dots)
base_package = node.module.split(".")[0]
self.packages.add(base_package)
def get_method_source(method):
"""Get source code for a method, including bound methods."""
if isinstance(method, types.MethodType):
method = method.__func__
return get_source(method)
def is_same_method(method1, method2):
"""Compare two methods by their source code."""
try:
source1 = get_method_source(method1)
source2 = get_method_source(method2)
# Remove method decorators if any
source1 = "\n".join(line for line in source1.split("\n") if not line.strip().startswith("@"))
source2 = "\n".join(line for line in source2.split("\n") if not line.strip().startswith("@"))
return source1 == source2
except (TypeError, OSError):
return False
def is_same_item(item1, item2):
"""Compare two class items (methods or attributes) for equality."""
if callable(item1) and callable(item2):
return is_same_method(item1, item2)
else:
return item1 == item2
def instance_to_source(instance, base_cls=None):
"""Convert an instance to its class source code representation."""
cls = instance.__class__
class_name = cls.__name__
# Start building class lines
class_lines = []
if base_cls:
class_lines.append(f"class {class_name}({base_cls.__name__}):")
else:
class_lines.append(f"class {class_name}:")
# Add docstring if it exists and differs from base
if cls.__doc__ and (not base_cls or cls.__doc__ != base_cls.__doc__):
class_lines.append(f' """{cls.__doc__}"""')
# Add class-level attributes
class_attrs = {
name: value
for name, value in cls.__dict__.items()
if not name.startswith("__")
and not callable(value)
and not (base_cls and hasattr(base_cls, name) and getattr(base_cls, name) == value)
}
for name, value in class_attrs.items():
if isinstance(value, str):
if "\n" in value:
class_lines.append(f' {name} = """{value}"""')
else:
class_lines.append(f' {name} = "{value}"')
else:
class_lines.append(f" {name} = {repr(value)}")
if class_attrs:
class_lines.append("")
# Add methods
methods = {
name: func
for name, func in cls.__dict__.items()
if callable(func)
and not (
base_cls and hasattr(base_cls, name) and getattr(base_cls, name).__code__.co_code == func.__code__.co_code
)
}
for name, method in methods.items():
method_source = get_source(method)
# Clean up the indentation
method_lines = method_source.split("\n")
first_line = method_lines[0]
indent = len(first_line) - len(first_line.lstrip())
method_lines = [line[indent:] for line in method_lines]
method_source = "\n".join([" " + line if line.strip() else line for line in method_lines])
class_lines.append(method_source)
class_lines.append("")
# Find required imports using ImportFinder
import_finder = ImportFinder()
import_finder.visit(ast.parse("\n".join(class_lines)))
required_imports = import_finder.packages
# Build final code with imports
final_lines = []
# Add base class import if needed
if base_cls:
final_lines.append(f"from {base_cls.__module__} import {base_cls.__name__}")
# Add discovered imports
for package in required_imports:
final_lines.append(f"import {package}")
if final_lines: # Add empty line after imports
final_lines.append("")
# Add the class code
final_lines.extend(class_lines)
return "\n".join(final_lines)
def get_source(obj) -> str:
"""Get the source code of a class or callable object (e.g.: function, method).
First attempts to get the source code using `inspect.getsource`.
In a dynamic environment (e.g.: Jupyter, IPython), if this fails,
falls back to retrieving the source code from the current interactive shell session.
Args:
obj: A class or callable object (e.g.: function, method)
Returns:
str: The source code of the object, dedented and stripped
Raises:
TypeError: If object is not a class or callable
OSError: If source code cannot be retrieved from any source
ValueError: If source cannot be found in IPython history
Note:
TODO: handle Python standard REPL
"""
if not (isinstance(obj, type) or callable(obj)):
raise TypeError(f"Expected class or callable, got {type(obj)}")
inspect_error = None
try:
return textwrap.dedent(inspect.getsource(obj)).strip()
except OSError as e:
# let's keep track of the exception to raise it if all further methods fail
inspect_error = e
try:
import IPython
shell = IPython.get_ipython()
if not shell:
raise ImportError("No active IPython shell found")
all_cells = "\n".join(shell.user_ns.get("In", [])).strip()
if not all_cells:
raise ValueError("No code cells found in IPython session")
tree = ast.parse(all_cells)
for node in ast.walk(tree):
if isinstance(node, (ast.ClassDef, ast.FunctionDef)) and node.name == obj.__name__:
return textwrap.dedent("\n".join(all_cells.split("\n")[node.lineno - 1 : node.end_lineno])).strip()
raise ValueError(f"Could not find source code for {obj.__name__} in IPython history")
except ImportError:
# IPython is not available, let's just raise the original inspect error
raise inspect_error
except ValueError as e:
# IPython is available but we couldn't find the source code, let's raise the error
raise e from inspect_error
def encode_image_base64(image):
buffered = BytesIO()
image.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def make_image_url(base64_image):
return f"data:image/png;base64,{base64_image}"
| smolagents/src/smolagents/utils.py/0 | {
"file_path": "smolagents/src/smolagents/utils.py",
"repo_id": "smolagents",
"token_count": 5527
} |
[package]
name = "grpc-metadata"
version = "0.1.0"
edition = "2021"
[dependencies]
opentelemetry = "^0.20"
tonic = "^0.10"
tracing = "^0.1"
tracing-opentelemetry = "^0.21"
| text-generation-inference/backends/grpc-metadata/Cargo.toml/0 | {
"file_path": "text-generation-inference/backends/grpc-metadata/Cargo.toml",
"repo_id": "text-generation-inference",
"token_count": 83
} |
use std::path::PathBuf;
use thiserror::Error;
use text_generation_router::server;
#[derive(Debug, Error)]
pub enum TensorRtLlmBackendError {
#[error("Provided engine folder {0} doesn't exist")]
EngineFolderDoesntExists(PathBuf),
#[error("Provided executorWorker binary path {0} doesn't exist")]
ExecutorWorkerNotFound(PathBuf),
#[error("TensorRT-LLM Runtime error: {0}")]
Runtime(String),
#[error("Tokenizer error: {0}")]
Tokenizer(String),
#[error("Argument validation error: {0}")]
ArgumentValidation(String),
#[error("WebServer error: {0}")]
WebServer(#[from] server::WebServerError),
#[error("Tokio runtime failed to start: {0}")]
Tokio(#[from] std::io::Error),
}
| text-generation-inference/backends/trtllm/src/errors.rs/0 | {
"file_path": "text-generation-inference/backends/trtllm/src/errors.rs",
"repo_id": "text-generation-inference",
"token_count": 285
} |
[package]
name = "text-generation-router-v3"
description = "Text Generation Webserver"
version.workspace = true
edition.workspace = true
authors.workspace = true
homepage.workspace = true
[lib]
path = "src/lib.rs"
[[bin]]
name = "text-generation-router"
path = "src/main.rs"
[dependencies]
async-trait = "0.1.74"
async-stream = "0.3.5"
axum = { version = "0.7", features = ["json"] }
axum-tracing-opentelemetry = "0.16"
text-generation-router = { path = "../../router" }
clap = { version = "4.4.5", features = ["derive", "env"] }
grpc-metadata = { path = "../grpc-metadata" }
futures = "0.3.28"
hf-hub = { workspace = true }
jsonschema = { version = "0.28.0" }
metrics = { workspace = true }
metrics-exporter-prometheus = { workspace = true }
nohash-hasher = "0.2.0"
opentelemetry = { version = "0.20.0", features = ["rt-tokio"] }
opentelemetry-otlp = "0.13.0"
rand = "0.8.5"
reqwest = { version = "0.11.20", features = [] }
serde = "1.0.188"
serde_json = "1.0.107"
slotmap = "1.0.7"
thiserror = "1.0.48"
tokenizers = { workspace = true }
tokio = { version = "1.32.0", features = [
"rt",
"rt-multi-thread",
"parking_lot",
"signal",
"sync",
] }
tokio-stream = "0.1.14"
tower-http = { version = "0.5.1", features = ["cors"] }
tracing = "0.1.37"
tracing-opentelemetry = "0.21.0"
tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] }
utoipa = { version = "4.2.0", features = ["axum_extras"] }
utoipa-swagger-ui = { version = "6.0.0", features = ["axum"] }
init-tracing-opentelemetry = { version = "0.14.1", features = [
"opentelemetry-otlp",
] }
minijinja = { workspace = true }
minijinja-contrib = { workspace = true }
futures-util = "0.3.30"
regex = "1.10.3"
once_cell = "1.19.0"
image = "0.25.1"
base64 = { workspace = true }
prost = "^0.12"
tonic = "^0.10"
tower = "^0.4"
[build-dependencies]
tonic-build = "0.10.1"
prost-build = "0.12.1"
[dev-dependencies]
criterion = "0.3"
itertools = "0.13"
[features]
default = ["ngrok"]
ngrok = ["text-generation-router/ngrok"]
google = ["text-generation-router/google"]
kserve = ["text-generation-router/kserve"]
[[bench]]
name = "prefix_cache"
harness = false
| text-generation-inference/backends/v3/Cargo.toml/0 | {
"file_path": "text-generation-inference/backends/v3/Cargo.toml",
"repo_id": "text-generation-inference",
"token_count": 911
} |
use std::time::{Duration, Instant};
use text_generation_client::v3::{
Batch, CachedBatch, NextTokenChooserParameters, Request, ShardedClient,
StoppingCriteriaParameters,
};
use text_generation_client::{Chunk, ClientError, Input};
use tokenizers::{Tokenizer, TruncationDirection};
use tokio::sync::{broadcast, mpsc};
const LOREM_IPSUM: &str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.";
#[derive(Debug, Clone)]
pub(crate) struct Prefill {
pub(crate) latency: Duration,
pub(crate) throughput: f64,
}
#[derive(Debug, Clone)]
pub(crate) struct Decode {
pub(crate) latency: Duration,
pub(crate) token_latency: Duration,
pub(crate) throughput: f64,
}
#[derive(Debug)]
pub(crate) enum Message {
Warmup,
Prefill(Prefill),
Decode(Decode),
EndRun,
EndBatch,
}
/// Benchmarking task
#[allow(clippy::too_many_arguments)]
pub(crate) async fn generation_task(
tokenizer: Tokenizer,
batch_size: Vec<u32>,
sequence_length: u32,
decode_length: u32,
top_n_tokens: Option<u32>,
n_runs: usize,
warmups: usize,
parameters: NextTokenChooserParameters,
client: ShardedClient,
run_sender: mpsc::Sender<Result<Message, ClientError>>,
mut shutdown_receiver: broadcast::Receiver<()>,
_shutdown_guard_sender: mpsc::Sender<()>,
) {
// End task if a message is received on shutdown_receiver
// _shutdown_guard_sender will be dropped once the task is finished
tokio::select! {
res = generate_runs(tokenizer, batch_size, sequence_length, decode_length, top_n_tokens, n_runs, warmups, parameters, client, run_sender.clone()) => {
if let Err(err) = res {
run_sender.send(Err(err)).await.unwrap_or(());
}
},
_ = shutdown_receiver.recv() => {}
}
}
/// Benchmark prefill/decode
#[allow(clippy::too_many_arguments)]
async fn generate_runs(
tokenizer: Tokenizer,
batch_size: Vec<u32>,
sequence_length: u32,
decode_length: u32,
top_n_tokens: Option<u32>,
n_runs: usize,
warmups: usize,
parameters: NextTokenChooserParameters,
mut client: ShardedClient,
run_sender: mpsc::Sender<Result<Message, ClientError>>,
) -> Result<(), ClientError> {
// Create a dummy sequence
let sequence = create_sequence(sequence_length, tokenizer);
for b in batch_size {
// Warmups on batch size
for _ in 0..warmups {
let (_, decode_batch) = prefill(
sequence.clone(),
sequence_length,
b,
decode_length,
parameters.clone(),
top_n_tokens,
&mut client,
)
.await?;
let _ = decode(decode_batch, &mut client).await?;
// Send warmup message
run_sender.send(Ok(Message::Warmup)).await.unwrap_or(());
}
for _ in 0..n_runs {
let (prefill, decode_batch) = prefill(
sequence.clone(),
sequence_length,
b,
decode_length,
parameters.clone(),
top_n_tokens,
&mut client,
)
.await?;
// Send prefill message
run_sender
.send(Ok(Message::Prefill(prefill)))
.await
.unwrap_or(());
let decode = decode(decode_batch, &mut client).await?;
// Send decode message
run_sender
.send(Ok(Message::Decode(decode)))
.await
.unwrap_or(());
// Send run ended message
run_sender.send(Ok(Message::EndRun)).await.unwrap_or(());
}
// Batch ended
run_sender.send(Ok(Message::EndBatch)).await.unwrap_or(());
}
Ok(())
}
// Run a prefill step
async fn prefill(
sequence: String,
sequence_length: u32,
batch_size: u32,
decode_length: u32,
parameters: NextTokenChooserParameters,
top_n_tokens: Option<u32>,
client: &mut ShardedClient,
) -> Result<(Prefill, CachedBatch), ClientError> {
// Create requests
let requests = (0..batch_size)
.map(|id| Request {
id: id.into(),
prefill_logprobs: false,
input_chunks: Some(Input {
chunks: vec![Chunk::Text(sequence.clone()).into()],
}),
inputs: sequence.clone(),
truncate: sequence_length,
add_special_tokens: true,
parameters: Some(parameters.clone()),
stopping_parameters: Some(StoppingCriteriaParameters {
max_new_tokens: decode_length,
stop_sequences: vec![],
ignore_eos_token: true, // Will not stop even if a eos token is generated
}),
top_n_tokens: top_n_tokens.unwrap_or(0),
blocks: vec![],
slots: vec![],
cache_len: 0,
chunk_len: None,
adapter_id: None,
})
.collect();
let batch = Batch {
id: 0,
requests,
size: batch_size,
max_tokens: batch_size * (sequence_length + decode_length),
max_blocks: 0,
};
// Run prefill
let start_time = Instant::now();
let (_, decode_batch, _) = client.prefill(batch.clone(), None).await?;
// Get latency
let latency = start_time.elapsed();
// Compute throughput from latency and batch size
let throughput = (batch_size * sequence_length) as f64 / latency.as_secs_f64();
// Decode batch cannot be empty
let decode_batch = decode_batch.expect("decode_batch is None. This is a bug.");
let step = Prefill {
latency,
throughput,
};
Ok((step, decode_batch))
}
/// Run a full decode
async fn decode(batch: CachedBatch, client: &mut ShardedClient) -> Result<Decode, ClientError> {
let mut decode_length = 0;
let batch_size = batch.size;
let start_time = Instant::now();
// Full decode over decode length
let mut next_batch = Some(batch);
while let Some(batch) = next_batch {
let result = client.decode(vec![batch]).await?;
next_batch = result.1;
decode_length += 1;
}
// Get latency
let latency = start_time.elapsed();
let token_latency = latency / decode_length;
// Compute throughput from latency, batch size and decode length
let throughput = (batch_size * decode_length) as f64 / latency.as_secs_f64();
let step = Decode {
latency,
token_latency,
throughput,
};
Ok(step)
}
/// Create a dummy sequence of the correct length
fn create_sequence(sequence_length: u32, tokenizer: Tokenizer) -> String {
let lorem_ipsum_length = tokenizer.encode(LOREM_IPSUM, true).unwrap().len();
// Repeat lorem ipsum to cover sequence length
let string_sequence =
LOREM_IPSUM.repeat((0..sequence_length).step_by(lorem_ipsum_length).len());
// Encode sequence
let mut encoding = tokenizer.encode(string_sequence, true).unwrap();
// Truncate to sequence_length
encoding.truncate(sequence_length as usize, 0, TruncationDirection::Left);
// Decode
tokenizer.decode(encoding.get_ids(), false).unwrap()
}
| text-generation-inference/benchmark/src/generation.rs/0 | {
"file_path": "text-generation-inference/benchmark/src/generation.rs",
"repo_id": "text-generation-inference",
"token_count": 3420
} |
import json
import requests
import warnings
from aiohttp import ClientSession, ClientTimeout
from pydantic import ValidationError
from typing import Dict, Optional, List, AsyncIterator, Iterator, Union
from text_generation import DEPRECATION_WARNING
from text_generation.types import (
StreamResponse,
Response,
Request,
Parameters,
Grammar,
CompletionRequest,
Completion,
CompletionComplete,
ChatRequest,
ChatCompletionChunk,
ChatComplete,
Message,
Tool,
)
from text_generation.errors import parse_error
# emit deprecation warnings
warnings.simplefilter("always", DeprecationWarning)
class Client:
"""Client to make calls to a text-generation-inference instance
Example:
```python
>>> from text_generation import Client
>>> client = Client("https://api-inference.huggingface.co/models/bigscience/bloomz")
>>> client.generate("Why is the sky blue?").generated_text
' Rayleigh scattering'
>>> result = ""
>>> for response in client.generate_stream("Why is the sky blue?"):
>>> if not response.token.special:
>>> result += response.token.text
>>> result
' Rayleigh scattering'
```
"""
def __init__(
self,
base_url: str,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: int = 10,
):
"""
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
"""
warnings.warn(DEPRECATION_WARNING, DeprecationWarning)
self.base_url = base_url
self.headers = headers
self.cookies = cookies
self.timeout = timeout
def completion(
self,
prompt: str,
frequency_penalty: Optional[float] = None,
max_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
seed: Optional[int] = None,
stream: bool = False,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
stop: Optional[List[str]] = None,
):
"""
Given a prompt, generate a response synchronously
Args:
prompt (`str`):
Prompt
frequency_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
max_tokens (`int`):
Maximum number of generated tokens
repetition_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
seed (`int`):
Random sampling seed
stream (`bool`):
Stream the response
temperature (`float`):
The value used to module the logits distribution.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation
stop (`List[str]`):
Stop generating tokens if a member of `stop` is generated
"""
request = CompletionRequest(
model="tgi",
prompt=prompt,
frequency_penalty=frequency_penalty,
max_tokens=max_tokens,
repetition_penalty=repetition_penalty,
seed=seed,
stream=stream,
temperature=temperature,
top_p=top_p,
stop=stop,
)
if not stream:
resp = requests.post(
f"{self.base_url}/v1/completions",
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
)
payload = resp.json()
if resp.status_code != 200:
raise parse_error(resp.status_code, payload)
return Completion(**payload)
else:
return self._completion_stream_response(request)
def _completion_stream_response(self, request):
resp = requests.post(
f"{self.base_url}/v1/completions",
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
stream=True,
)
# iterate and print stream
for byte_payload in resp.iter_lines():
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
if payload.startswith("data:"):
json_payload = json.loads(payload.lstrip("data:").rstrip("\n"))
try:
response = CompletionComplete(**json_payload)
yield response
except ValidationError:
raise parse_error(resp.status, json_payload)
def chat(
self,
messages: List[Message],
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
top_logprobs: Optional[int] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
stream: bool = False,
seed: Optional[int] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
tools: Optional[List[Tool]] = None,
tool_prompt: Optional[str] = None,
tool_choice: Optional[str] = None,
stop: Optional[List[str]] = None,
):
"""
Given a list of messages, generate a response asynchronously
Args:
messages (`List[Message]`):
List of messages
repetition_penalty (`float`):
The parameter for repetition penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
logit_bias (`List[float]`):
Adjust the likelihood of specified tokens
logprobs (`bool`):
Include log probabilities in the response
top_logprobs (`int`):
Include the `n` most likely tokens at each step
max_tokens (`int`):
Maximum number of generated tokens
n (`int`):
Generate `n` completions
presence_penalty (`float`):
The parameter for presence penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
stream (`bool`):
Stream the response
seed (`int`):
Random sampling seed
temperature (`float`):
The value used to module the logits distribution.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation
tools (`List[Tool]`):
List of tools to use
tool_prompt (`str`):
A prompt to be appended before the tools
tool_choice (`str`):
The tool to use
stop (`List[str]`):
Stop generating tokens if a member of `stop` is generated
"""
request = ChatRequest(
model="tgi",
messages=messages,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
logprobs=logprobs,
top_logprobs=top_logprobs,
max_tokens=max_tokens,
n=n,
presence_penalty=presence_penalty,
stream=stream,
seed=seed,
temperature=temperature,
top_p=top_p,
tools=tools,
tool_prompt=tool_prompt,
tool_choice=tool_choice,
stop=stop,
)
if not stream:
resp = requests.post(
f"{self.base_url}/v1/chat/completions",
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
)
payload = resp.json()
if resp.status_code != 200:
raise parse_error(resp.status_code, payload)
return ChatComplete(**payload)
else:
return self._chat_stream_response(request)
def _chat_stream_response(self, request):
resp = requests.post(
f"{self.base_url}/v1/chat/completions",
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
stream=True,
)
# iterate and print stream
for byte_payload in resp.iter_lines():
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
if payload.startswith("data:"):
json_payload = json.loads(payload.lstrip("data:").rstrip("\n"))
try:
response = ChatCompletionChunk(**json_payload)
yield response
except ValidationError:
raise parse_error(resp.status, json_payload)
def generate(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
best_of: Optional[int] = None,
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
decoder_input_details: bool = False,
top_n_tokens: Optional[int] = None,
grammar: Optional[Grammar] = None,
) -> Response:
"""
Given a prompt, generate the following text
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
best_of (`int`):
Generate best_of sequences and return the one if the highest token logprobs
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
decoder_input_details (`bool`):
Return the decoder input token logprobs and ids
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
Response: generated response
"""
# Validate parameters
parameters = Parameters(
best_of=best_of,
details=True,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
decoder_input_details=decoder_input_details,
top_n_tokens=top_n_tokens,
grammar=grammar,
)
request = Request(inputs=prompt, stream=False, parameters=parameters)
resp = requests.post(
self.base_url,
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
)
payload = resp.json()
if resp.status_code != 200:
raise parse_error(resp.status_code, payload)
return Response(**payload[0])
def generate_stream(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
top_n_tokens: Optional[int] = None,
grammar: Optional[Grammar] = None,
) -> Iterator[StreamResponse]:
"""
Given a prompt, generate the following stream of tokens
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
Iterator[StreamResponse]: stream of generated tokens
"""
# Validate parameters
parameters = Parameters(
best_of=None,
details=True,
decoder_input_details=False,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
top_n_tokens=top_n_tokens,
grammar=grammar,
)
request = Request(inputs=prompt, stream=True, parameters=parameters)
resp = requests.post(
self.base_url,
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
stream=True,
)
if resp.status_code != 200:
raise parse_error(resp.status_code, resp.json())
# Parse ServerSentEvents
for byte_payload in resp.iter_lines():
# Skip line
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
# Event data
if payload.startswith("data:"):
# Decode payload
json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
# Parse payload
try:
response = StreamResponse(**json_payload)
except ValidationError:
# If we failed to parse the payload, then it is an error payload
raise parse_error(resp.status_code, json_payload)
yield response
class AsyncClient:
"""Asynchronous Client to make calls to a text-generation-inference instance
Example:
```python
>>> from text_generation import AsyncClient
>>> client = AsyncClient("https://api-inference.huggingface.co/models/bigscience/bloomz")
>>> response = await client.generate("Why is the sky blue?")
>>> response.generated_text
' Rayleigh scattering'
>>> result = ""
>>> async for response in client.generate_stream("Why is the sky blue?"):
>>> if not response.token.special:
>>> result += response.token.text
>>> result
' Rayleigh scattering'
```
"""
def __init__(
self,
base_url: str,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: int = 10,
):
"""
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
"""
warnings.warn(DEPRECATION_WARNING, DeprecationWarning)
self.base_url = base_url
self.headers = headers
self.cookies = cookies
self.timeout = ClientTimeout(timeout)
async def completion(
self,
prompt: str,
frequency_penalty: Optional[float] = None,
max_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
seed: Optional[int] = None,
stream: bool = False,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
stop: Optional[List[str]] = None,
) -> Union[Completion, AsyncIterator[CompletionComplete]]:
"""
Given a prompt, generate a response asynchronously
Args:
prompt (`str`):
Prompt
frequency_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
max_tokens (`int`):
Maximum number of generated tokens
repetition_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
seed (`int`):
Random sampling seed
stream (`bool`):
Stream the response
temperature (`float`):
The value used to module the logits distribution.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation
stop (`List[str]`):
Stop generating tokens if a member of `stop` is generated
"""
request = CompletionRequest(
model="tgi",
prompt=prompt,
frequency_penalty=frequency_penalty,
max_tokens=max_tokens,
repetition_penalty=repetition_penalty,
seed=seed,
stream=stream,
temperature=temperature,
top_p=top_p,
stop=stop,
)
if not stream:
return await self._completion_single_response(request)
else:
return self._completion_stream_response(request)
async def _completion_single_response(self, request):
async with ClientSession(
headers=self.headers, cookies=self.cookies, timeout=self.timeout
) as session:
async with session.post(
f"{self.base_url}/v1/completions", json=request.dict()
) as resp:
payload = await resp.json()
if resp.status != 200:
raise parse_error(resp.status, payload)
return Completion(**payload)
async def _completion_stream_response(self, request):
async with ClientSession(
headers=self.headers, cookies=self.cookies, timeout=self.timeout
) as session:
async with session.post(
f"{self.base_url}/v1/completions", json=request.dict()
) as resp:
async for byte_payload in resp.content:
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
if payload.startswith("data:"):
json_payload = json.loads(payload.lstrip("data:").rstrip("\n"))
try:
response = CompletionComplete(**json_payload)
yield response
except ValidationError:
raise parse_error(resp.status, json_payload)
async def chat(
self,
messages: List[Message],
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
top_logprobs: Optional[int] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
stream: bool = False,
seed: Optional[int] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
tools: Optional[List[Tool]] = None,
tool_prompt: Optional[str] = None,
tool_choice: Optional[str] = None,
stop: Optional[List[str]] = None,
) -> Union[ChatComplete, AsyncIterator[ChatCompletionChunk]]:
"""
Given a list of messages, generate a response asynchronously
Args:
messages (`List[Message]`):
List of messages
repetition_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
logit_bias (`List[float]`):
Adjust the likelihood of specified tokens
logprobs (`bool`):
Include log probabilities in the response
top_logprobs (`int`):
Include the `n` most likely tokens at each step
max_tokens (`int`):
Maximum number of generated tokens
n (`int`):
Generate `n` completions
presence_penalty (`float`):
The parameter for presence penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
stream (`bool`):
Stream the response
seed (`int`):
Random sampling seed
temperature (`float`):
The value used to module the logits distribution.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation
tools (`List[Tool]`):
List of tools to use
tool_prompt (`str`):
A prompt to be appended before the tools
tool_choice (`str`):
The tool to use
stop (`List[str]`):
Stop generating tokens if a member of `stop` is generated
"""
request = ChatRequest(
model="tgi",
messages=messages,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
logprobs=logprobs,
top_logprobs=top_logprobs,
max_tokens=max_tokens,
n=n,
presence_penalty=presence_penalty,
stream=stream,
seed=seed,
temperature=temperature,
top_p=top_p,
tools=tools,
tool_prompt=tool_prompt,
tool_choice=tool_choice,
stop=stop,
)
if not stream:
return await self._chat_single_response(request)
else:
return self._chat_stream_response(request)
async def _chat_single_response(self, request):
async with ClientSession(
headers=self.headers, cookies=self.cookies, timeout=self.timeout
) as session:
async with session.post(
f"{self.base_url}/v1/chat/completions", json=request.dict()
) as resp:
payload = await resp.json()
if resp.status != 200:
raise parse_error(resp.status, payload)
return ChatComplete(**payload)
async def _chat_stream_response(self, request):
async with ClientSession(
headers=self.headers, cookies=self.cookies, timeout=self.timeout
) as session:
async with session.post(
f"{self.base_url}/v1/chat/completions", json=request.dict()
) as resp:
async for byte_payload in resp.content:
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
if payload.startswith("data:"):
payload_data = (
payload.lstrip("data:").rstrip("\n").removeprefix(" ")
)
if payload_data == "[DONE]":
break
json_payload = json.loads(payload_data)
try:
response = ChatCompletionChunk(**json_payload)
yield response
except ValidationError:
raise parse_error(resp.status, json_payload)
async def generate(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
best_of: Optional[int] = None,
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
decoder_input_details: bool = False,
top_n_tokens: Optional[int] = None,
grammar: Optional[Grammar] = None,
) -> Response:
"""
Given a prompt, generate the following text asynchronously
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
best_of (`int`):
Generate best_of sequences and return the one if the highest token logprobs
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
decoder_input_details (`bool`):
Return the decoder input token logprobs and ids
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
Response: generated response
"""
# Validate parameters
parameters = Parameters(
best_of=best_of,
details=True,
decoder_input_details=decoder_input_details,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
top_n_tokens=top_n_tokens,
grammar=grammar,
)
request = Request(inputs=prompt, stream=False, parameters=parameters)
async with ClientSession(
headers=self.headers, cookies=self.cookies, timeout=self.timeout
) as session:
async with session.post(self.base_url, json=request.dict()) as resp:
payload = await resp.json()
if resp.status != 200:
raise parse_error(resp.status, payload)
return Response(**payload[0])
async def generate_stream(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
top_n_tokens: Optional[int] = None,
grammar: Optional[Grammar] = None,
) -> AsyncIterator[StreamResponse]:
"""
Given a prompt, generate the following stream of tokens asynchronously
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
AsyncIterator[StreamResponse]: stream of generated tokens
"""
# Validate parameters
parameters = Parameters(
best_of=None,
details=True,
decoder_input_details=False,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
top_n_tokens=top_n_tokens,
grammar=grammar,
)
request = Request(inputs=prompt, stream=True, parameters=parameters)
async with ClientSession(
headers=self.headers, cookies=self.cookies, timeout=self.timeout
) as session:
async with session.post(self.base_url, json=request.dict()) as resp:
if resp.status != 200:
raise parse_error(resp.status, await resp.json())
# Parse ServerSentEvents
async for byte_payload in resp.content:
# Skip line
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
# Event data
if payload.startswith("data:"):
# Decode payload
json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
# Parse payload
try:
response = StreamResponse(**json_payload)
except ValidationError:
# If we failed to parse the payload, then it is an error payload
raise parse_error(resp.status, json_payload)
yield response
| text-generation-inference/clients/python/text_generation/client.py/0 | {
"file_path": "text-generation-inference/clients/python/text_generation/client.py",
"repo_id": "text-generation-inference",
"token_count": 19241
} |
# Model safety.
[Pytorch uses pickle](https://pytorch.org/docs/master/generated/torch.load.html) by default meaning that for quite a long while
*Every* model using that format is potentially executing unintended code while purely loading the model.
There is a big red warning on Python's page for pickle [link](https://docs.python.org/3/library/pickle.html) but for quite a while
this was ignored by the community. Now that AI/ML is getting used much more ubiquitously we need to switch away from this format.
HuggingFace is leading the effort here by creating a new format which contains pure data ([safetensors](https://github.com/huggingface/safetensors))
and moving slowly but surely all the libs to make use of it by default.
The move is intentionnally slow in order to make breaking changes as little impact as possible on users throughout.
# TGI 2.0
Since the release of TGI 2.0, we take the opportunity of this major version increase to break backward compatibility for these pytorch
models (since they are a huge security risk for anyone deploying them).
From now on, TGI will not convert automatically pickle files without having `--trust-remote-code` flag or `TRUST_REMOTE_CODE=true` in the environment variables.
This flag is already used for community defined inference code, and is therefore quite representative of the level of confidence you are giving the model providers.
If you want to use a model that uses pickle, but you still do not want to trust the authors entirely we recommend making a convertion on our space made for that.
https://huggingface.co/spaces/safetensors/convert
This space will create a PR on the original model, which you are use directly regardless of merge status from the original authors. Just use
```
docker run .... --revision refs/pr/#ID # Or use REVISION=refs/pr/#ID in the environment
```
| text-generation-inference/docs/source/basic_tutorials/safety.md/0 | {
"file_path": "text-generation-inference/docs/source/basic_tutorials/safety.md",
"repo_id": "text-generation-inference",
"token_count": 465
} |
# Text Generation Inference
Text Generation Inference (TGI) is a toolkit for deploying and serving Large Language Models (LLMs). TGI enables high-performance text generation for the most popular open-source LLMs, including Llama, Falcon, StarCoder, BLOOM, GPT-NeoX, and T5.

Text Generation Inference implements many optimizations and features, such as:
- Simple launcher to serve most popular LLMs
- Production ready (distributed tracing with Open Telemetry, Prometheus metrics)
- Tensor Parallelism for faster inference on multiple GPUs
- Token streaming using Server-Sent Events (SSE)
- Continuous batching of incoming requests for increased total throughput
- Optimized transformers code for inference using [Flash Attention](https://github.com/HazyResearch/flash-attention) and [Paged Attention](https://github.com/vllm-project/vllm) on the most popular architectures
- Quantization with [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) and [GPT-Q](https://arxiv.org/abs/2210.17323)
- [Safetensors](https://github.com/huggingface/safetensors) weight loading
- Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
- Logits warper (temperature scaling, top-p, top-k, repetition penalty)
- Stop sequences
- Log probabilities
- Fine-tuning Support: Utilize fine-tuned models for specific tasks to achieve higher accuracy and performance.
- [Guidance](conceptual/guidance): Enable function calling and tool-use by forcing the model to generate structured outputs based on your own predefined output schemas.
Text Generation Inference is used in production by multiple projects, such as:
- [Hugging Chat](https://github.com/huggingface/chat-ui), an open-source interface for open-access models, such as Open Assistant and Llama
- [OpenAssistant](https://open-assistant.io/), an open-source community effort to train LLMs in the open
- [nat.dev](http://nat.dev/), a playground to explore and compare LLMs.
| text-generation-inference/docs/source/index.md/0 | {
"file_path": "text-generation-inference/docs/source/index.md",
"repo_id": "text-generation-inference",
"token_count": 562
} |
{
inputs = {
crate2nix = {
url = "github:nix-community/crate2nix";
inputs.nixpkgs.follows = "tgi-nix/nixpkgs";
};
nix-filter.url = "github:numtide/nix-filter";
tgi-nix.url = "github:huggingface/text-generation-inference-nix";
nixpkgs.follows = "tgi-nix/nixpkgs";
flake-utils.url = "github:numtide/flake-utils";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs.nixpkgs.follows = "tgi-nix/nixpkgs";
};
};
outputs =
{
self,
crate2nix,
nix-filter,
nixpkgs,
flake-utils,
rust-overlay,
tgi-nix,
}:
flake-utils.lib.eachDefaultSystem (
system:
let
cargoNix = crate2nix.tools.${system}.appliedCargoNix {
name = "tgi";
src = ./.;
additionalCargoNixArgs = [ "--all-features" ];
};
pkgs = import nixpkgs {
inherit system;
inherit (tgi-nix.lib) config;
overlays = [
rust-overlay.overlays.default
tgi-nix.overlays.default
(import nix/overlay.nix)
];
};
crateOverrides = import ./nix/crate-overrides.nix { inherit pkgs nix-filter; };
benchmark = cargoNix.workspaceMembers.text-generation-benchmark.build.override {
inherit crateOverrides;
};
launcher = cargoNix.workspaceMembers.text-generation-launcher.build.override {
inherit crateOverrides;
};
router =
let
routerUnwrapped = cargoNix.workspaceMembers.text-generation-router-v3.build.override {
inherit crateOverrides;
};
packagePath =
with pkgs.python3.pkgs;
makePythonPath [
protobuf
sentencepiece
torch
transformers
];
in
pkgs.writeShellApplication {
name = "text-generation-router";
text = ''
PYTHONPATH="${packagePath}" ${routerUnwrapped}/bin/text-generation-router "$@"
'';
};
server = pkgs.python3.pkgs.callPackage ./nix/server.nix { inherit nix-filter; };
client = pkgs.python3.pkgs.callPackage ./nix/client.nix { };
in
{
checks = {
rust =
with pkgs;
rustPlatform.buildRustPackage {
name = "rust-checks";
src = ./.;
cargoLock = {
lockFile = ./Cargo.lock;
};
buildInputs = [ openssl.dev ];
nativeBuildInputs = [
clippy
pkg-config
protobuf
python3
rustfmt
];
buildPhase = ''
cargo check
'';
checkPhase = ''
cargo fmt -- --check
cargo test -j $NIX_BUILD_CORES
cargo clippy
'';
installPhase = "touch $out";
};
};
formatter = pkgs.nixfmt-rfc-style;
devShells = with pkgs; rec {
default = pure;
pure = mkShell {
buildInputs = [
benchmark
launcher
router
server
];
};
test = mkShell {
buildInputs =
[
benchmark
launcher
router
server
client
openssl.dev
pkg-config
cargo
rustfmt
clippy
]
++ (with python3.pkgs; [
docker
pytest
pytest-asyncio
syrupy
pre-commit
ruff
]);
};
impure = callPackage ./nix/impure-shell.nix { inherit server; };
impureWithCuda = callPackage ./nix/impure-shell.nix {
inherit server;
withCuda = true;
};
impure-flash-attn-v1 = callPackage ./nix/impure-shell.nix {
server = server.override { flash-attn = python3.pkgs.flash-attn-v1; };
};
};
packages = rec {
inherit server;
default = pkgs.writeShellApplication {
name = "text-generation-inference";
runtimeInputs = [
server
router
];
text = ''
${launcher}/bin/text-generation-launcher "$@"
'';
};
dockerImage = pkgs.callPackage nix/docker.nix {
text-generation-inference = default;
};
dockerImageStreamed = pkgs.callPackage nix/docker.nix {
text-generation-inference = default;
stream = true;
};
};
}
);
}
| text-generation-inference/flake.nix/0 | {
"file_path": "text-generation-inference/flake.nix",
"repo_id": "text-generation-inference",
"token_count": 2853
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 5380,
"logprob": 0.0,
"special": false,
"text": "?\n"
},
{
"id": 34564,
"logprob": 0.0,
"special": false,
"text": "Deep"
},
{
"id": 6975,
"logprob": 0.0,
"special": false,
"text": " learning"
},
{
"id": 11,
"logprob": 0.0,
"special": false,
"text": ","
},
{
"id": 1101,
"logprob": -1.0136719,
"special": false,
"text": " also"
},
{
"id": 3967,
"logprob": 0.0,
"special": false,
"text": " known"
},
{
"id": 439,
"logprob": 0.0,
"special": false,
"text": " as"
},
{
"id": 30828,
"logprob": 0.0,
"special": false,
"text": " neural"
},
{
"id": 4009,
"logprob": -0.21923828,
"special": false,
"text": " network"
},
{
"id": 477,
"logprob": -1.4824219,
"special": false,
"text": " or"
}
],
"top_tokens": null
},
"generated_text": "What is deep learning?\nDeep learning, also known as neural network or"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_w8a8_int/test_compressed_tensors_w8a8_int_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_w8a8_int/test_compressed_tensors_w8a8_int_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 853
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.9306641,
"special": false,
"text": "\n"
},
{
"id": 5618,
"logprob": -2.4550781,
"special": false,
"text": "What"
},
{
"id": 338,
"logprob": -0.5732422,
"special": false,
"text": " is"
},
{
"id": 278,
"logprob": -1.5761719,
"special": false,
"text": " the"
},
{
"id": 4328,
"logprob": -1.5888672,
"special": false,
"text": " difference"
},
{
"id": 1546,
"logprob": -0.026504517,
"special": false,
"text": " between"
},
{
"id": 21784,
"logprob": -1.4287109,
"special": false,
"text": " Deep"
},
{
"id": 29257,
"logprob": -0.15856934,
"special": false,
"text": " Learning"
},
{
"id": 322,
"logprob": -0.17456055,
"special": false,
"text": " and"
},
{
"id": 6189,
"logprob": -0.62646484,
"special": false,
"text": " Machine"
}
],
"top_tokens": null
},
"generated_text": "\nWhat is the difference between Deep Learning and Machine"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq.json",
"repo_id": "text-generation-inference",
"token_count": 868
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 369,
"logprob": -2.1816406,
"special": false,
"text": " for"
},
{
"id": 279,
"logprob": -2.6992188,
"special": false,
"text": " the"
},
{
"id": 220,
"logprob": -3.6308594,
"special": false,
"text": " "
},
{
"id": 679,
"logprob": -1.7900391,
"special": false,
"text": "201"
},
{
"id": 24,
"logprob": -1.3554688,
"special": false,
"text": "9"
},
{
"id": 12,
"logprob": -2.0039062,
"special": false,
"text": "-"
},
{
"id": 2366,
"logprob": -0.4489746,
"special": false,
"text": "202"
},
{
"id": 15,
"logprob": -0.037109375,
"special": false,
"text": "0"
},
{
"id": 2978,
"logprob": -0.8100586,
"special": false,
"text": " school"
},
{
"id": 1060,
"logprob": -0.013015747,
"special": false,
"text": " year"
}
],
"top_tokens": null
},
"generated_text": " for the 2019-2020 school year"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_fp8/test_flash_llama_fp8.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_fp8/test_flash_llama_fp8.json",
"repo_id": "text-generation-inference",
"token_count": 862
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.88378906,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.94921875,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2402344,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3725586,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.078125,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.67822266,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.3837891,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7050781,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.052001953,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0390625,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox.json",
"repo_id": "text-generation-inference",
"token_count": 854
} |
{
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "The image showcases a stunning cityscape, featuring the iconic Statue of Liberty in the foreground. The image displays Lady Liberty's imposing presence, with her towering base standing beside her. Behind the statue, the city's skyline extends across the horizon, adorned with numerous tall buildings, including the Empire State Building and other notable skyscrapers. The water reflecting the sun's rays creates a serene and picturesque scene, emphasizing the beauty and resilience of this global landmark. The sky is a clear, pale blue, adding to the overall tranquility of the scene.",
"name": null,
"role": "assistant",
"tool_calls": null
},
"usage": null
}
],
"created": 1738348090,
"id": "",
"model": "Qwen/Qwen2-VL-7B-Instruct",
"object": "chat.completion",
"system_fingerprint": "3.1.1-dev0-native",
"usage": {
"completion_tokens": 110,
"prompt_tokens": 8736,
"total_tokens": 8846
}
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2_vl/test_flash_qwen2_vl_bay.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2_vl/test_flash_qwen2_vl_bay.json",
"repo_id": "text-generation-inference",
"token_count": 376
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 2,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 284,
"logprob": -1.1679688,
"special": false,
"text": "\n "
},
{
"id": 0,
"logprob": null,
"special": true,
"text": "<|endoftext|>"
}
],
"top_tokens": null
},
"generated_text": "\n "
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json",
"repo_id": "text-generation-inference",
"token_count": 259
} |
{
"details": {
"finish_reason": "length",
"generated_tokens": 40,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -0.27416992,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.17016602,
"special": false,
"text": "\n"
},
{
"id": 28737,
"logprob": -2.7109375,
"special": false,
"text": "I"
},
{
"id": 28809,
"logprob": -1.5,
"special": false,
"text": "’"
},
{
"id": 28719,
"logprob": -0.34204102,
"special": false,
"text": "m"
},
{
"id": 459,
"logprob": -1.6914062,
"special": false,
"text": " not"
},
{
"id": 1864,
"logprob": -0.69140625,
"special": false,
"text": " sure"
},
{
"id": 513,
"logprob": -1.6171875,
"special": false,
"text": " if"
},
{
"id": 315,
"logprob": -1.3837891,
"special": false,
"text": " I"
},
{
"id": 541,
"logprob": -1.2226562,
"special": false,
"text": " can"
},
{
"id": 1567,
"logprob": -1.8652344,
"special": false,
"text": " come"
},
{
"id": 582,
"logprob": -0.0070228577,
"special": false,
"text": " up"
},
{
"id": 395,
"logprob": -0.0054092407,
"special": false,
"text": " with"
},
{
"id": 28705,
"logprob": -0.62597656,
"special": false,
"text": " "
},
{
"id": 28770,
"logprob": -0.0035572052,
"special": false,
"text": "3"
},
{
"id": 4842,
"logprob": -0.93603516,
"special": false,
"text": " unique"
},
{
"id": 3085,
"logprob": -0.028411865,
"special": false,
"text": " words"
},
{
"id": 369,
"logprob": -1.0400391,
"special": false,
"text": " that"
},
{
"id": 6685,
"logprob": -0.09710693,
"special": false,
"text": " describe"
},
{
"id": 528,
"logprob": -0.066467285,
"special": false,
"text": " me"
},
{
"id": 28725,
"logprob": -1.0722656,
"special": false,
"text": ","
},
{
"id": 562,
"logprob": -0.33422852,
"special": false,
"text": " but"
},
{
"id": 315,
"logprob": -0.5136719,
"special": false,
"text": " I"
},
{
"id": 28809,
"logprob": -0.8989258,
"special": false,
"text": "’"
},
{
"id": 584,
"logprob": -0.2076416,
"special": false,
"text": "ll"
},
{
"id": 1464,
"logprob": -0.8808594,
"special": false,
"text": " try"
},
{
"id": 28723,
"logprob": -0.88427734,
"special": false,
"text": "."
},
{
"id": 13,
"logprob": -0.91064453,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.08105469,
"special": false,
"text": "\n"
},
{
"id": 28740,
"logprob": -1.8486328,
"special": false,
"text": "1"
},
{
"id": 28723,
"logprob": -0.111572266,
"special": false,
"text": "."
},
{
"id": 23626,
"logprob": -3.15625,
"special": false,
"text": " Creative"
},
{
"id": 13,
"logprob": -0.9194336,
"special": false,
"text": "\n"
},
{
"id": 28750,
"logprob": -0.24841309,
"special": false,
"text": "2"
},
{
"id": 28723,
"logprob": -9.393692e-05,
"special": false,
"text": "."
},
{
"id": 6785,
"logprob": -3.1386719,
"special": false,
"text": " Fun"
},
{
"id": 1780,
"logprob": -0.53564453,
"special": false,
"text": "ny"
},
{
"id": 13,
"logprob": -0.09033203,
"special": false,
"text": "\n"
},
{
"id": 28770,
"logprob": -0.00466156,
"special": false,
"text": "3"
},
{
"id": 28723,
"logprob": -0.00016450882,
"special": false,
"text": "."
}
]
},
"generated_text": "\n\nI’m not sure if I can come up with 3 unique words that describe me, but I’ll try.\n\n1. Creative\n2. Funny\n3."
}
| text-generation-inference/integration-tests/models/__snapshots__/test_lora_mistral/test_lora_mistral_with_customer_support_adapter.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_lora_mistral/test_lora_mistral_with_customer_support_adapter.json",
"repo_id": "text-generation-inference",
"token_count": 3128
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 510,
"logprob": -0.5878906,
"special": false,
"text": "The"
},
{
"id": 3159,
"logprob": -0.5449219,
"special": false,
"text": " word"
},
{
"id": 346,
"logprob": -0.05038452,
"special": false,
"text": " \""
},
{
"id": 6441,
"logprob": -0.002292633,
"special": false,
"text": "mem"
},
{
"id": 70,
"logprob": -1.3828278e-05,
"special": false,
"text": "e"
},
{
"id": 3,
"logprob": -0.0010242462,
"special": false,
"text": "\""
},
{
"id": 369,
"logprob": -0.090270996,
"special": false,
"text": " was"
},
{
"id": 806,
"logprob": -0.12719727,
"special": false,
"text": " first"
},
{
"id": 908,
"logprob": -0.016571045,
"special": false,
"text": " used"
},
{
"id": 275,
"logprob": -0.43432617,
"special": false,
"text": " in"
}
]
},
"generated_text": "The word \"meme\" was first used in"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox.json",
"repo_id": "text-generation-inference",
"token_count": 857
} |
{
"choices": [
{
"delta": {
"content": null,
"role": "assistant",
"tool_calls": {
"function": {
"arguments": "<|eot_id|>",
"name": null
},
"id": "",
"index": 0,
"type": "function"
}
},
"finish_reason": "stop",
"index": 0,
"logprobs": null
}
],
"created": 1732293246,
"id": "",
"model": "meta-llama/Llama-3.1-8B-Instruct",
"object": "chat.completion.chunk",
"system_fingerprint": "2.4.1-dev0-native",
"usage": null
}
| text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_sea_creatures_stream_required.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_sea_creatures_stream_required.json",
"repo_id": "text-generation-inference",
"token_count": 325
} |
import pytest
@pytest.fixture(scope="module")
def flash_gemma_handle(launcher):
with launcher("google/gemma-2b", num_shard=1) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_gemma(flash_gemma_handle):
await flash_gemma_handle.health(300)
return flash_gemma_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_gemma_simple(flash_gemma, response_snapshot):
response = await flash_gemma.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_gemma_all_params(flash_gemma, response_snapshot):
response = await flash_gemma.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_gemma_load(flash_gemma, generate_load, response_snapshot):
responses = await generate_load(flash_gemma, "Test request", max_new_tokens=10, n=4)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_gemma.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_gemma.py",
"repo_id": "text-generation-inference",
"token_count": 678
} |
import pytest
@pytest.fixture(scope="module")
def flash_mixtral_handle(launcher):
with launcher("mistralai/Mixtral-8x7B-v0.1", num_shard=8) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_mixtral(flash_mixtral_handle):
await flash_mixtral_handle.health(300)
return flash_mixtral_handle.client
@pytest.mark.skip(reason="requires > 4 shards")
@pytest.mark.asyncio
async def test_flash_mixtral(flash_mixtral, response_snapshot):
response = await flash_mixtral.generate(
"What is gradient descent?\n\n", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "Gradient descent is an optimization algorithm used to minimize"
)
assert response == response_snapshot
@pytest.mark.skip(reason="requires > 4 shards")
@pytest.mark.asyncio
async def test_flash_mixtral_all_params(flash_mixtral, response_snapshot):
response = await flash_mixtral.generate(
"What is gradient descent?\n\n",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "What is gradient descent?\n\nIt seems to me, that if you're"
)
assert response == response_snapshot
@pytest.mark.skip(reason="requires > 4 shards")
@pytest.mark.asyncio
async def test_flash_mixtral_load(flash_mixtral, generate_load, response_snapshot):
responses = await generate_load(
flash_mixtral, "What is gradient descent?\n\n", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert responses[0].details.generated_tokens == 10
assert (
responses[0].generated_text
== "Gradient descent is an optimization algorithm used to minimize"
)
assert all(
[r.generated_text == responses[0].generated_text for r in responses]
), f"{[r.generated_text for r in responses]}"
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_mixtral.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_mixtral.py",
"repo_id": "text-generation-inference",
"token_count": 926
} |
import pytest
import json
from text_generation.types import GrammarType
@pytest.fixture(scope="module")
def non_flash_llama_grammar_handle(launcher):
with launcher(
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
num_shard=1,
disable_grammar_support=False,
use_flash_attention=False,
) as handle:
yield handle
@pytest.fixture(scope="module")
async def non_flash_llama_grammar(non_flash_llama_grammar_handle):
await non_flash_llama_grammar_handle.health(300)
return non_flash_llama_grammar_handle.client
@pytest.mark.release
@pytest.mark.skip
@pytest.mark.asyncio
async def test_non_flash_llama_grammar_json(non_flash_llama_grammar, response_snapshot):
response = await non_flash_llama_grammar.generate(
"info: david holtz like trees and has two cats. ",
max_new_tokens=100,
decoder_input_details=True,
seed=0,
grammar={
"type": GrammarType.Json,
"value": json.dumps(
{
"type": "object",
"$id": "https://example.com/person.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "Person",
"properties": {
"firstName": {
"type": "string",
"description": "The person'''s first name.",
},
"lastName": {
"type": "string",
"description": "The person'''s last name.",
},
"hobby": {
"description": "The person'''s hobby.",
"type": "string",
},
"numCats": {
"description": "The number of cats the person has.",
"type": "integer",
"minimum": 0,
},
},
"required": ["firstName", "lastName", "hobby", "numCats"],
}
),
},
)
assert response.details.generated_tokens == 30
assert (
response.generated_text
== '{"firstName":"David","hobby":"Trees","lastName":"Holtz","numCats":2}'
)
assert response == response_snapshot
| text-generation-inference/integration-tests/models/test_grammar_llama.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_grammar_llama.py",
"repo_id": "text-generation-inference",
"token_count": 1346
} |
import pytest
import requests
import json
@pytest.fixture(scope="module")
def flash_llama_grammar_tools_handle(launcher):
with launcher(
"meta-llama/Meta-Llama-3.1-8B-Instruct",
num_shard=2,
disable_grammar_support=False,
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_grammar_tools(flash_llama_grammar_tools_handle):
await flash_llama_grammar_tools_handle.health(300)
return flash_llama_grammar_tools_handle.client
# tools to be used in the following tests
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"format": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The temperature unit to use. Infer this from the users location.",
},
},
"required": ["location", "format"],
"additionalProperties": False,
},
},
},
{
"type": "function",
"function": {
"name": "get_n_day_weather_forecast",
"description": "Get an N-day weather forecast",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"format": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The temperature unit to use. Infer this from the users location.",
},
"num_days": {
"type": "integer",
"description": "The number of days to forecast",
},
},
"required": ["location", "format", "num_days"],
"additionalProperties": False,
},
},
},
]
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_grammar_tools(flash_llama_grammar_tools, response_snapshot):
response = await flash_llama_grammar_tools.chat(
max_tokens=100,
seed=1,
tools=tools,
temperature=0.0,
messages=[
{
"role": "system",
"content": "Youre a helpful assistant! Answer the users question best you can.",
},
{
"role": "user",
"content": "What is the weather like in Brooklyn, New York?",
},
],
)
assert response.choices[0].message.content is None
assert response.choices[0].message.tool_calls == [
{
"id": "0",
"type": "function",
"function": {
"description": None,
"name": "get_current_weather",
"arguments": {"format": "celsius", "location": "Brooklyn, New York"},
},
}
]
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_grammar_tools_auto(
flash_llama_grammar_tools, response_snapshot
):
response = await flash_llama_grammar_tools.chat(
max_tokens=100,
seed=1,
tools=tools,
temperature=0.0,
tool_choice="auto",
messages=[
{
"role": "system",
"content": "Youre a helpful assistant! Answer the users question best you can.",
},
{
"role": "user",
"content": "What is the weather like in Brooklyn, New York?",
},
],
)
assert response.choices[0].message.content is None
assert response.choices[0].message.tool_calls == [
{
"id": "0",
"type": "function",
"function": {
"description": None,
"name": "get_current_weather",
"arguments": {"format": "celsius", "location": "Brooklyn, New York"},
},
}
]
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_grammar_tools_choice(
flash_llama_grammar_tools, response_snapshot
):
response = await flash_llama_grammar_tools.chat(
max_tokens=100,
seed=1,
tools=tools,
temperature=0.0,
tool_choice="get_current_weather",
messages=[
{
"role": "system",
"content": "Youre a helpful assistant! Answer the users question best you can.",
},
{
"role": "user",
"content": "What is the weather like in Brooklyn, New York?",
},
],
)
assert response.choices[0].message.content is None
assert response.choices[0].message.tool_calls == [
{
"id": "0",
"type": "function",
"function": {
"description": None,
"name": "get_current_weather",
"arguments": {"format": "celsius", "location": "Brooklyn, New York"},
},
}
]
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_grammar_tools_stream(
flash_llama_grammar_tools, response_snapshot
):
responses = await flash_llama_grammar_tools.chat(
max_tokens=100,
seed=1,
tools=tools,
temperature=0.0,
tool_choice="get_current_weather",
messages=[
{
"role": "system",
"content": "Youre a helpful assistant! Answer the users question best you can.",
},
{
"role": "user",
"content": "What is the weather like in Paris, France?",
},
],
stream=True,
)
count = 0
tool_calls_generated = ""
last_response = None
async for response in responses:
count += 1
tool_calls_generated += response.choices[0].delta.tool_calls.function.arguments
last_response = response
assert response.choices[0].delta.content is None
assert (
tool_calls_generated
== '{"function": {"_name": "get_current_weather", "location": "Paris, France", "format": "celsius"}}<|eot_id|>'
)
assert count == 28
assert last_response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_grammar_tools_insufficient_information(
flash_llama_grammar_tools, response_snapshot
):
responses = await flash_llama_grammar_tools.chat(
max_tokens=100,
seed=24,
tools=tools,
tool_choice="auto",
messages=[
{
"role": "system",
"content": "You're a helpful assistant! Answer the users question best you can.",
},
{
"role": "user",
"content": "Who are you?",
},
],
stream=False,
)
assert responses.choices[0].message.tool_calls is None
assert responses.choices[0].message.content == "I am an AI assistant"
assert responses == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_grammar_tools_insufficient_information_stream(
flash_llama_grammar_tools, response_snapshot
):
responses = await flash_llama_grammar_tools.chat(
max_tokens=100,
seed=24,
tools=tools,
tool_choice="auto",
messages=[
{
"role": "system",
"content": "You're a helpful assistant! Answer the users question best you can.",
},
{
"role": "user",
"content": "Who are you?",
},
],
stream=True,
)
count = 0
content_generated = ""
last_response = None
async for response in responses:
count += 1
content_generated += response.choices[0].delta.content
last_response = response
assert response.choices[0].delta.tool_calls is None
assert count == 5
assert content_generated == "I am an AI assistant"
assert last_response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_grammar_tools_sea_creatures_stream(
flash_llama_grammar_tools, response_snapshot
):
responses = await flash_llama_grammar_tools.chat(
max_tokens=100,
seed=24,
tools=tools,
tool_choice="auto",
messages=[
{
"role": "system",
"content": "You're a helpful assistant! Answer the users question best you can. If the question is not answerable by the tools, just generate a response.",
},
{
"role": "user",
"content": "Tell me a story about 3 sea creatures",
},
],
stream=True,
)
count = 0
content_generated = ""
last_response = None
async for response in responses:
count += 1
content_generated += response.choices[0].delta.content
last_response = response
assert response.choices[0].delta.tool_calls is None
assert count == 62
assert (
content_generated
== "Once upon a time, in the ocean, there lived three sea creatures. There was a wise old octopus named Bob, a mischievous seagull named Sam, and a gentle sea turtle named Luna. They all lived together in a beautiful coral reef, surrounded by colorful fish and swaying sea fans"
)
assert last_response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_grammar_tools_sea_creatures_stream_required(
flash_llama_grammar_tools, response_snapshot
):
responses = await flash_llama_grammar_tools.chat(
max_tokens=100,
seed=24,
tools=tools,
tool_choice="required",
messages=[
{
"role": "system",
"content": "You're a helpful assistant! Answer the users question best you can. If the question is not answerable by the tools, just generate a response.",
},
{
"role": "user",
"content": "Tell me a story about 3 sea creatures",
},
],
stream=True,
)
count = 0
tool_calls_generated = ""
last_response = None
async for response in responses:
count += 1
assert response.choices[0].delta.content is None
tool_calls_generated += response.choices[0].delta.tool_calls.function.arguments
last_response = response
assert count == 29
assert (
tool_calls_generated
== '{"function": {"_name": "get_current_weather", "location": "San Francisco, CA", "format": "celsius"}}<|eot_id|>'
)
assert last_response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_grammar_tools_sea_creatures_stream_none(
flash_llama_grammar_tools, response_snapshot
):
responses = await flash_llama_grammar_tools.chat(
max_tokens=100,
seed=24,
tools=tools,
tool_choice="none",
messages=[
{
"role": "system",
"content": "You're a helpful assistant! Answer the users question best you can. If the question is not answerable by the tools, just generate a response.",
},
{
"role": "user",
"content": "Tell me a story about 3 sea creatures",
},
],
stream=True,
)
count = 0
content_generated = ""
last_response = None
async for response in responses:
count += 1
content_generated += response.choices[0].delta.content
last_response = response
assert response.choices[0].delta.tool_calls is None
assert count == 100
print(content_generated)
assert (
content_generated
== "Once upon a time, in a vibrant ocean filled with coral reefs and schools of shimmering fish, lived three dear friends: Luna the sea turtle, Finley the friendly fish, and Crusty the wise crab.\n\nLuna was the oldest of the three. She had traveled the world, exploring hidden caves and shipwrecks, and collecting sparkling shells and shiny pebbles. Her shell was a beautiful mosaic of blues and greens, and her gentle eyes twinkled with the secrets of the deep"
)
assert last_response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_grammar_tools_sea_creatures_stream_function_object(
flash_llama_grammar_tools, response_snapshot
):
# using `requests` to send the request until the client library supports tool_choice as a function object
responses = requests.post(
f"{flash_llama_grammar_tools.base_url}/v1/chat/completions",
headers=flash_llama_grammar_tools.headers,
json={
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You're a helpful assistant! Answer the users question best you can. If the question is not answerable by the tools, just generate a response.",
},
{
"role": "user",
"content": "Tell me a story about 3 sea creatures",
},
],
"tools": tools,
"tool_choice": {
"type": "function",
"function": {"name": "get_n_day_weather_forecast"},
},
"seed": 24,
"max_tokens": 100,
"stream": True,
},
stream=True,
)
# iterate over the response in chunks
count = 0
tool_calls_generated = ""
last_response = None
for chunk in responses.iter_content(chunk_size=1024):
if chunk:
count += 1
# remove the "data: " prefix, trailing newline, and split the chunk into individual lines
lines = chunk.decode("utf-8").replace("data: ", "").rstrip("\n").split("\n")
for line in lines:
if line == "[DONE]":
break
response = json.loads(line)
tool_calls_generated += response["choices"][0]["delta"]["tool_calls"][
"function"
]["arguments"]
last_response = response
assert count == 39
assert (
tool_calls_generated
== '{"function": {"_name": "get_n_day_weather_forecast", "location": "San Francisco, CA", "format": "celsius", "num_days":3}}<|eot_id|>'
)
assert last_response == response_snapshot
| text-generation-inference/integration-tests/models/test_tools_llama.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_tools_llama.py",
"repo_id": "text-generation-inference",
"token_count": 7205
} |
# https://www.gutenberg.org/cache/epub/103/pg103.txt
from openai import OpenAI
import os
import requests
if not os.path.exists("pg103.txt"):
response = requests.get("https://www.gutenberg.org/cache/epub/103/pg103.txt")
with open("pg103.txt", "w") as f:
f.write(response.text)
length = 130000
with open("pg103.txt", "r") as f:
data = f.read()
messages = [{"role": "user", "content": data[: length * 4]}]
client = OpenAI(base_url="http://localhost:8000/v1", api_key="w")
completion = client.chat.completions.create(
model="meta-llama/Llama-3.1-8B-Instruct", messages=messages, max_tokens=2
)
| text-generation-inference/load_tests/long_prompt2.py/0 | {
"file_path": "text-generation-inference/load_tests/long_prompt2.py",
"repo_id": "text-generation-inference",
"token_count": 250
} |
eetq_commit := 81e0b14d64088d58ef6acd2c8f3e788d59324407
eetq:
# Clone eetq
pip install packaging
git clone https://github.com/NetEase-FuXi/EETQ.git eetq
build-eetq: eetq
cd eetq && git fetch && git checkout $(eetq_commit) && git submodule update --init --recursive
cd eetq && python setup.py build
install-eetq: build-eetq
cd eetq && python setup.py install
| text-generation-inference/server/Makefile-eetq/0 | {
"file_path": "text-generation-inference/server/Makefile-eetq",
"repo_id": "text-generation-inference",
"token_count": 156
} |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#include "column_remap.cuh"
#include "../util.cuh"
const int SHUF_BLOCKSIZE_X = 256;
const int SHUF_BLOCKSIZE_Y = 16;
__global__ void column_remap_kernel
(
const half* __restrict__ x,
half* __restrict__ x_new,
const int x_width,
const int x_height,
const uint32_t* x_map
)
{
int x_column = SHUF_BLOCKSIZE_X * blockIdx.x + threadIdx.x;
int x_row = SHUF_BLOCKSIZE_Y * blockIdx.y;
int x_stride = x_width;
int x_idx = x_row * x_stride + x_column;
int x_row_end = min(x_row + SHUF_BLOCKSIZE_Y, x_height);
int x_idx_end = x_row_end * x_stride + x_column;
int s_column = x_map[x_column];
int s_idx = x_row * x_stride + s_column;
while (x_idx < x_idx_end)
{
x_new[x_idx] = x[s_idx];
x_idx += x_stride;
s_idx += x_stride;
}
}
// Remap columns in x to correspond to sequential group index before matmul
//
// perform x -> seq_x such that seq_x @ seq_w == x @ w
void column_remap_cuda
(
const half* x,
half* x_new,
const int x_height,
const int x_width,
const uint32_t* x_map
)
{
dim3 threads(SHUF_BLOCKSIZE_X, 1, 1);
dim3 blocks
(
(x_width + SHUF_BLOCKSIZE_X - 1) / SHUF_BLOCKSIZE_X,
(x_height + SHUF_BLOCKSIZE_Y - 1) / SHUF_BLOCKSIZE_Y,
1
);
column_remap_kernel<<<blocks, threads>>>(x, x_new, x_width, x_height, x_map);
}
| text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/column_remap.cu/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/column_remap.cu",
"repo_id": "text-generation-inference",
"token_count": 696
} |
#include "q_gemm.cuh"
#include "util.cuh"
#include "matrix_view.cuh"
#include "../config.h"
#include "quant/qdq_2.cuh"
#include "quant/qdq_3.cuh"
#include "quant/qdq_4.cuh"
#include "quant/qdq_5.cuh"
#include "quant/qdq_6.cuh"
#include "quant/qdq_8.cuh"
#define GPTQ_BLOCK_KN_SIZE 128
#define GPTQ_BLOCK_M_SIZE_MAX 8
#define GPTQ_MAX_GROUPS_IN_BLOCK (GPTQ_BLOCK_KN_SIZE / 32)
#define EXL2_BLOCK_KN_SIZE 64
#define EXL2_BLOCK_M_SIZE_MAX 8
#define EXL2_MAX_GROUPS_IN_BLOCK (EXL2_BLOCK_KN_SIZE / 32)
#define CLEAR_N_SIZE 256
#include "q_gemm_kernel.cuh"
#include "q_gemm_kernel_gptq.cuh"
void gemm_half_q_half_cuda_part
(
const half* a,
QMatrix* b,
half* c,
int size_m,
int size_n,
int size_k,
int m_count,
bool clear,
const half* r_weights,
int r_weights_stride,
bool mul_r_weights
)
{
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (!b->is_gptq)
{
dim3 blockDim, gridDim;
blockDim.x = EXL2_BLOCK_KN_SIZE;
blockDim.y = 1;
blockDim.z = 1;
gridDim.x = DIVIDE(size_n, EXL2_BLOCK_KN_SIZE * 4);
gridDim.y = DIVIDE(size_m, m_count);
gridDim.z = DIVIDE(size_k, EXL2_BLOCK_KN_SIZE);
fp_gemm_half_q_half_kernel kernel = pick_gemm_half_q_half_kernel(m_count, r_weights != NULL, mul_r_weights);
kernel<<<gridDim, blockDim, 0, stream>>>
(
a,
b->cuda_q_weight,
b->cuda_q_scale,
b->cuda_q_scale_max,
c,
size_m,
size_n,
size_k,
b->groups,
b->cuda_q_group_map,
b->cuda_q_perm,
b->rows_8,
b->rows_6,
b->rows_5,
b->rows_4,
b->rows_3,
b->rows_2,
clear,
r_weights,
r_weights_stride
);
}
else
{
dim3 blockDim, gridDim;
blockDim.x = GPTQ_BLOCK_KN_SIZE;
blockDim.y = 1;
blockDim.z = 1;
gridDim.x = DIVIDE(size_n, GPTQ_BLOCK_KN_SIZE * 4);
gridDim.y = DIVIDE(size_m, m_count);
gridDim.z = DIVIDE(size_k, GPTQ_BLOCK_KN_SIZE);
fp_gemm_half_q_half_gptq_kernel kernel = pick_gemm_half_q_half_gptq_kernel(m_count, r_weights != NULL, mul_r_weights);
// DBGX((uint64_t) r_weights);
// if (r_weights)
// print_global_mem(r_weights, 1, 1, 1);
// DBGI(r_weights_stride);
kernel<<<gridDim, blockDim, 0, stream>>>
(
a,
b->cuda_q_weight,
b->cuda_gptq_qzeros,
b->cuda_gptq_scales,
c,
size_m,
size_n,
size_k,
b->groups,
b->gptq_groupsize,
b->cuda_q_perm,
b->rows_4,
clear,
r_weights,
r_weights_stride
);
}
}
void gemm_half_q_half_cuda
(
cublasHandle_t cublas_handle,
const half* a,
QMatrix* b,
half* c,
int size_m,
int size_n,
int size_k,
bool clear,
half* temp_dq,
bool force_cuda,
const half* r_weights,
const int r_weights_stride,
bool mul_r_weights
)
{
if (size_m > MAX_Q_GEMM_ROWS && !force_cuda)
{
// Reconstruct FP16 matrix, then cuBLAS
if (!temp_dq) temp_dq = b->temp_dq;
b->reconstruct(temp_dq);
//cublasSetMathMode(cublas_handle, CUBLAS_TENSOR_OP_MATH);
const half alpha = __float2half(1.0f);
const half beta = clear ? __float2half(0.0f) : __float2half(1.0f);
cublasHgemm(cublas_handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
size_n, size_m, size_k,
&alpha, temp_dq, size_n,
a, size_k,
&beta, c, size_n);
//const float alpha = 1.0f;
//const float beta = clear ? 0.0f : 1.0f;
//cublasSgemmEx(cublas_handle,
// CUBLAS_OP_N,
// CUBLAS_OP_N,
// size_n, size_m, size_k,
// &alpha, temp_dq, CUDA_R_16F, size_n,
// a, CUDA_R_16F, size_k,
// &beta, c, CUDA_R_16F, size_n);
//const float alpha = 1.0f;
//const float beta = clear ? 0.0f : 1.0f;
//cublasGemmEx(cublas_handle,
// CUBLAS_OP_N, CUBLAS_OP_N,
// size_n, size_m, size_k,
// &alpha, temp_dq, CUDA_R_16F, size_n,
// a, CUDA_R_16F, size_k,
// &beta, c, CUDA_R_16F, size_n,
// CUDA_R_16F, CUBLAS_GEMM_DFALT_TENSOR_OP);
}
else
{
// Quantized matmul
int block_m_size_max = b->is_gptq ? GPTQ_BLOCK_M_SIZE_MAX : EXL2_BLOCK_M_SIZE_MAX;
int max_chunks = size_m / block_m_size_max;
int last_chunk = max_chunks * block_m_size_max;
int last_chunk_size = size_m - last_chunk;
if (max_chunks)
{
gemm_half_q_half_cuda_part(a, b, c, last_chunk, size_n, size_k, block_m_size_max, clear, r_weights, r_weights_stride, mul_r_weights);
}
if (last_chunk_size)
{
gemm_half_q_half_cuda_part(a + last_chunk * size_k, b, c + last_chunk * size_n, last_chunk_size, size_n, size_k, last_chunk_size, clear, r_weights, r_weights_stride, mul_r_weights);
}
}
}
__global__ void clear_kernel
(
half* __restrict__ c,
const int size_m,
const int size_n
)
{
int m = blockIdx.y;
int n = (blockIdx.x * CLEAR_N_SIZE + threadIdx.x) * 8;
if (n >= size_n) return;
int4* c_ptr = (int4*)(c + m * size_n + n);
*c_ptr = {};
}
void clear_tensor_cuda
(
half* c,
int size_m,
int size_n
)
{
// dim3 blockDim, gridDim;
// blockDim.x = CLEAR_N_SIZE;
// blockDim.y = 1;
// gridDim.x = DIVIDE(size_n / 8, CLEAR_N_SIZE);
// gridDim.y = size_m;
// clear_kernel<<<gridDim, blockDim>>>(c, size_m, size_n);
}
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu",
"repo_id": "text-generation-inference",
"token_count": 3563
} |
[project]
name = "text-generation-server"
version = "2.0.5-dev0"
description = "Text Generation Inference Python gRPC Server"
readme = "README.md"
requires-python = ">=3.9"
authors = [
{name = "Olivier Dehaene", email = "[email protected]"},
{name = "Nicolas Patry", email = "[email protected]"},
]
dependencies = [
"einops>=0.8.0",
"grpc-interceptor>=0.15.4",
"grpcio>=1.67.0",
"grpcio-reflection>=1.67.0",
"grpcio-status>=1.67.0",
"hf-transfer>=0.1.8",
"loguru>=0.7.3",
"numpy>=1.26,<3",
"opentelemetry-api>=1.27.0",
"opentelemetry-exporter-otlp>=1.27.0",
"opentelemetry-instrumentation-grpc>=0.50b0",
"pillow>=11.1.0",
"prometheus-client>=0.21.0",
"protobuf>=5.28.3",
"py-cpuinfo>=9.0.0",
"rich>=13.8.1",
"safetensors>=0.4.5",
"scipy>=1.13.1",
"sentencepiece>=0.2.0",
"tokenizers>=0.20.3",
"typer>=0.15.1",
"transformers>=4.48.0"
]
[project.scripts]
text-generation-server = "text_generation_server.cli:app"
[project.optional-dependencies]
accelerate = [
"accelerate>=1.2.1,<2",
]
bnb = [
"bitsandbytes>=0.45.0",
]
compressed-tensors = [
"compressed-tensors>=0.9.0",
]
peft = [
"peft>=0.14.0",
]
outlines = [
"outlines>=0.1.13",
]
dev = [
"grpcio-tools>=1.51.1,<2.0",
"pytest>=7.3.0,<8"
]
quantize = [
"texttable>=1.6.7,<2",
"datasets>=2.21,<3",
]
moe = [ "moe-kernels" ]
attention = [ "attention-kernels" ]
marlin = [ "marlin-kernels" ]
gen = [
"grpcio-tools>=1.69.0",
"mypy-protobuf>=3.6.0",
]
[tool.uv.sources]
attention-kernels.url = "https://github.com/danieldk/attention-kernels/releases/download/v0.2.0.post2/attention_kernels-0.2.0.post2+cu123torch2.5-cp39-abi3-linux_x86_64.whl"
marlin-kernels = [
{ url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.7/marlin_kernels-0.3.7+cu123torch2.5-cp39-cp39-linux_x86_64.whl", marker = "python_version == '3.9'" },
{ url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.7/marlin_kernels-0.3.7+cu123torch2.5-cp310-cp310-linux_x86_64.whl", marker = "python_version == '3.10'" },
{ url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.7/marlin_kernels-0.3.7+cu123torch2.5-cp311-cp311-linux_x86_64.whl", marker = "python_version == '3.11'" },
{ url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.7/marlin_kernels-0.3.7+cu123torch2.5-cp312-cp312-linux_x86_64.whl", marker = "python_version == '3.12'" },
]
moe-kernels.url = "https://github.com/danieldk/moe-kernels/releases/download/v0.8.2/moe_kernels-0.8.2+cu123torch2.5-cp39-abi3-linux_x86_64.whl"
[tool.pytest.ini_options]
markers = ["private: marks tests as requiring an admin hf token (deselect with '-m \"not private\"')"]
[tool.isort]
profile = "black"
[tool.uv]
package = true
[tool.setuptools.packages.find]
include = ["text_generation_server*"]
| text-generation-inference/server/pyproject.toml/0 | {
"file_path": "text-generation-inference/server/pyproject.toml",
"repo_id": "text-generation-inference",
"token_count": 1427
} |
import torch
from text_generation_server.utils.tokens import (
StopSequenceCriteria,
StoppingCriteria,
FinishReason,
batch_top_tokens,
)
def test_stop_sequence_criteria():
criteria = StopSequenceCriteria("/test;")
assert not criteria("/")
assert not criteria("/test")
assert criteria("/test;")
assert not criteria("/test; ")
def test_stop_sequence_criteria_escape():
criteria = StopSequenceCriteria("<|stop|>")
assert not criteria("<")
assert not criteria("<|stop")
assert criteria("<|stop|>")
assert not criteria("<|stop|> ")
def test_stopping_criteria():
criteria = StoppingCriteria(0, [StopSequenceCriteria("/test;")], max_new_tokens=5)
assert criteria(65827, "/test") == (False, None)
assert criteria(30, ";") == (True, FinishReason.FINISH_REASON_STOP_SEQUENCE)
def test_stopping_criteria_eos():
criteria = StoppingCriteria(0, [StopSequenceCriteria("/test;")], max_new_tokens=5)
assert criteria(1, "") == (False, None)
assert criteria(0, "") == (True, FinishReason.FINISH_REASON_EOS_TOKEN)
def test_stopping_criteria_max():
criteria = StoppingCriteria(0, [StopSequenceCriteria("/test;")], max_new_tokens=5)
assert criteria(1, "") == (False, None)
assert criteria(1, "") == (False, None)
assert criteria(1, "") == (False, None)
assert criteria(1, "") == (False, None)
assert criteria(1, "") == (True, FinishReason.FINISH_REASON_LENGTH)
def test_batch_top_tokens():
top_n_tokens = [0, 2, 3, 4, 5]
top_n_tokens_tensor = torch.tensor(top_n_tokens)
inp_logprobs = torch.tensor([[-1.0, -3.0, -4.0, -2.0, -3.0]] * 5)
accepted_ids = torch.ones_like(top_n_tokens_tensor)
topn_tok_ids, topn_tok_logprobs = batch_top_tokens(
top_n_tokens, top_n_tokens_tensor, inp_logprobs, accepted_ids
)
assert topn_tok_ids[0] == [[]]
assert topn_tok_ids[1] == [[0, 3]]
assert topn_tok_ids[2] == [[0, 3, 1, 4]]
assert topn_tok_ids[3] == [[0, 3, 1, 4]]
assert topn_tok_ids[4] == [[0, 3, 1, 4, 2]]
assert topn_tok_logprobs[0] == [[]]
assert topn_tok_logprobs[1] == [[-1, -2]]
assert topn_tok_logprobs[2] == [[-1, -2, -3, -3]]
assert topn_tok_logprobs[3] == [[-1, -2, -3, -3]]
assert topn_tok_logprobs[4] == [[-1, -2, -3, -3, -4]]
# Now let's make second member of the batch be speculated
inp_logprobs = torch.tensor([[-1.0, -3.0, -4.0, -2.0, -3.0]] * 5 * 2)
accepted_ids[1] = 2
topn_tok_ids, topn_tok_logprobs = batch_top_tokens(
top_n_tokens, top_n_tokens_tensor, inp_logprobs, accepted_ids
)
assert topn_tok_ids[0] == [[]]
assert topn_tok_ids[1] == [[0, 3], [0, 3]]
assert topn_tok_ids[2] == [[0, 3, 1, 4]]
assert topn_tok_ids[3] == [[0, 3, 1, 4]]
assert topn_tok_ids[4] == [[0, 3, 1, 4, 2]]
assert topn_tok_logprobs[0] == [[]]
assert topn_tok_logprobs[1] == [[-1, -2], [-1, -2]]
assert topn_tok_logprobs[2] == [[-1, -2, -3, -3]]
assert topn_tok_logprobs[3] == [[-1, -2, -3, -3]]
assert topn_tok_logprobs[4] == [[-1, -2, -3, -3, -4]]
| text-generation-inference/server/tests/utils/test_tokens.py/0 | {
"file_path": "text-generation-inference/server/tests/utils/test_tokens.py",
"repo_id": "text-generation-inference",
"token_count": 1427
} |
from typing import Optional
from contextvars import ContextVar
from contextlib import contextmanager
import flashinfer
import torch
prefill_state: ContextVar[flashinfer.BatchPrefillWithRaggedKVCacheWrapper] = ContextVar(
"prefill_state"
)
prefill_with_paged_kv_state: ContextVar[
flashinfer.BatchPrefillWithPagedKVCacheWrapper
] = ContextVar("prefill_with_paged_kv_state")
decode_state: ContextVar[flashinfer.BatchDecodeWithPagedKVCacheWrapper] = ContextVar(
"decode_state"
)
workspace: Optional[torch.Tensor] = None
def get_workspace(device):
"""Get shared flashinfer workspace."""
global workspace
if workspace is None:
workspace = torch.empty(128 * 1024 * 1024, dtype=torch.uint8, device=device)
return workspace
def create_prefill_with_paged_kv_state(
*,
device: torch.device,
):
"""Create a prefill state that uses the KV cache."""
workspace_buffer = get_workspace(device)
return flashinfer.BatchPrefillWithPagedKVCacheWrapper(
workspace_buffer, kv_layout="NHD", use_cuda_graph=False
)
@contextmanager
def use_prefill_with_paged_kv_state(
*,
state: flashinfer.BatchPrefillWithPagedKVCacheWrapper,
block_tables: torch.Tensor,
cu_seqlens: torch.Tensor,
input_lengths: torch.Tensor,
num_heads: int,
num_kv_heads: int,
head_size: int,
page_size: int,
kv_dtype: torch.dtype,
q_dtype: torch.dtype,
window_left: int,
):
"""
Context manager to set the active flashinfer prefill state to the given
`state` and parameters. This state will be used by all calls to the
`attention` function while the context manager is active.
"""
indptr = torch.zeros(
input_lengths.shape[0] + 1, device=input_lengths.device, dtype=torch.int32
)
# Round up to page size and then calculate the cumulative sum to get
# the indices into the block table.
torch.add(input_lengths, page_size - 1, out=indptr[1:])
indptr[1:].div_(page_size, rounding_mode="floor")
indptr[1:].cumsum_(-1)
# Get the lengths of the last page in a block.
if page_size == 1:
last_page_len = torch.ones(
input_lengths.shape[0], dtype=torch.int32, device=input_lengths.device
)
else:
last_page_len = torch.empty(
input_lengths.shape[0], dtype=torch.int32, device=input_lengths.device
)
torch.sub(input_lengths, 1, out=last_page_len)
last_page_len.remainder_(page_size)
last_page_len += 1
token = prefill_with_paged_kv_state.set(state)
try:
state.plan(
qo_indptr=cu_seqlens,
paged_kv_indptr=indptr,
paged_kv_indices=block_tables,
paged_kv_last_page_len=last_page_len,
num_qo_heads=num_heads,
num_kv_heads=num_kv_heads,
head_dim=head_size,
kv_data_type=kv_dtype,
q_data_type=q_dtype,
page_size=page_size,
window_left=-1 if window_left is None else window_left,
)
yield
finally:
if token is not None:
prefill_with_paged_kv_state.reset(token)
def create_prefill_state(
*,
device: torch.device,
):
"""Create a prefill state."""
workspace_buffer = get_workspace(device)
return flashinfer.BatchPrefillWithRaggedKVCacheWrapper(
workspace_buffer, kv_layout="NHD", use_cuda_graph=False
)
def create_decode_state(
*,
device: torch.device,
num_heads: int,
num_kv_heads: int,
):
"""Create a decode state."""
workspace_buffer = get_workspace(device)
num_groups = num_heads // num_kv_heads
return flashinfer.BatchDecodeWithPagedKVCacheWrapper(
workspace_buffer,
kv_layout="NHD",
use_cuda_graph=False,
# Taken from https://github.com/flashinfer-ai/flashinfer/blob/33ef95700981ba70f4cab63b8931e562bc795b21/python/flashinfer/decode.py#L57-L60
use_tensor_cores=num_groups not in [1, 2, 4, 8],
)
def create_decode_state_cuda_graphs(
*,
device: torch.device,
block_tables: torch.Tensor,
block_tables_ptr: torch.Tensor,
last_page_len: torch.Tensor,
num_heads: int,
num_kv_heads: int,
):
"""
Create a decode state for use with CUDA Graphs. `block_tables`,
`block_tables_ptr`, and `last_page_len` are used in CUDA Graphs and are
therefore stored as part of the state.
"""
workspace_buffer = get_workspace(device)
num_groups = num_heads // num_kv_heads
return flashinfer.BatchDecodeWithPagedKVCacheWrapper(
workspace_buffer,
kv_layout="NHD",
use_cuda_graph=True,
paged_kv_indices_buffer=block_tables,
paged_kv_indptr_buffer=block_tables_ptr,
paged_kv_last_page_len_buffer=last_page_len,
# Taken from https://github.com/flashinfer-ai/flashinfer/blob/33ef95700981ba70f4cab63b8931e562bc795b21/python/flashinfer/decode.py#L57-L60
use_tensor_cores=num_groups not in [1, 2, 4, 8],
)
@contextmanager
def use_decode_state(
*,
state: flashinfer.BatchDecodeWithPagedKVCacheWrapper,
input_lengths: torch.Tensor,
block_tables: torch.Tensor,
num_heads: int,
num_kv_heads: int,
head_size: int,
page_size: int,
kv_cache_dtype: torch.dtype,
q_dtype: torch.dtype,
window_left: int,
):
"""
Context manager to set the active flashinfer decoding state to the given
`state` and parameters. This state will be used by all calls to the
`paged_attention` function while the context manager is active.
"""
indptr = torch.zeros(
input_lengths.shape[0] + 1, device=input_lengths.device, dtype=torch.int32
)
# Round up to page size and then calculate the cumulative sum to get
# the indices into the block table.
torch.add(input_lengths, page_size - 1, out=indptr[1:])
indptr[1:].div_(page_size, rounding_mode="floor")
indptr[1:].cumsum_(-1)
# Get the lengths of the last page in a block.
last_page_len = torch.empty(
input_lengths.shape[0], dtype=torch.int32, device=input_lengths.device
)
torch.sub(input_lengths, 1, out=last_page_len)
last_page_len.remainder_(page_size)
last_page_len += 1
token = decode_state.set(state)
try:
state.plan(
indptr=indptr,
indices=block_tables,
last_page_len=last_page_len,
num_qo_heads=num_heads,
num_kv_heads=num_kv_heads,
head_dim=head_size,
page_size=page_size,
data_type=kv_cache_dtype,
q_data_type=q_dtype,
window_left=-1 if window_left is None else window_left,
)
yield
finally:
if token is not None:
decode_state.reset(token)
| text-generation-inference/server/text_generation_server/layers/attention/flashinfer.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/attention/flashinfer.py",
"repo_id": "text-generation-inference",
"token_count": 3030
} |
from dataclasses import dataclass
import torch
from EETQ import quant_weights, w8_a16_gemm
from text_generation_server.utils.weights import UnquantizedWeight
@dataclass
class EETQWeight(UnquantizedWeight):
weight: torch.Tensor
def get_linear(self, bias: torch.Tensor):
try:
from text_generation_server.layers.eetq import EETQLinear
return EETQLinear(self.weight, bias)
except ImportError:
raise ImportError(
"Please install EETQ from https://github.com/NetEase-FuXi/EETQ"
)
class EETQLinear(torch.nn.Module):
def __init__(
self,
weight,
bias,
) -> None:
super().__init__()
device = weight.device
if weight.dtype != torch.float16:
weight = weight.to(dtype=torch.float16)
weight = torch.t(weight).contiguous().cpu()
weight, scale = quant_weights(weight, torch.int8, False)
self.weight = weight.cuda(device)
self.scale = scale.cuda(device)
self.bias = bias.cuda(device) if bias is not None else None
def forward(self, input: torch.Tensor) -> torch.Tensor:
output = w8_a16_gemm(input, self.weight, self.scale)
output = output + self.bias if self.bias is not None else output
return output
| text-generation-inference/server/text_generation_server/layers/eetq.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/eetq.py",
"repo_id": "text-generation-inference",
"token_count": 574
} |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy
import torch
import torch.nn as nn
from loguru import logger
from text_generation_server.layers.marlin.util import (
_check_marlin_kernels,
marlin_zero_points,
permute_scales,
unpack_cols,
)
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.utils.log import log_once
from text_generation_server.utils.weights import Weight, Weights, WeightsLoader
try:
import marlin_kernels
except ImportError:
marlin_kernels = None
try:
major, _minor = torch.cuda.get_device_capability()
has_sm_8_0 = major >= 8
except Exception:
has_sm_8_0 = False
GPTQ_MARLIN_BITS = [4, 8]
GPTQ_MARLIN_GROUP_SIZES = [-1, 32, 64, 128]
MARLIN_TILE_SIZE = 16
def can_use_gptq_marlin(
*, bits: int, groupsize: int, quant_method: str, quantize: str, sym: bool
) -> bool:
return (
SYSTEM == "cuda"
and marlin_kernels is not None
and has_sm_8_0
and quantize in {"awq", "gptq"}
and quant_method in {"awq", "gptq"}
and bits in GPTQ_MARLIN_BITS
and groupsize in GPTQ_MARLIN_GROUP_SIZES
# We only support asymmetric quantization for AWQ.
and (sym or quant_method == "awq")
)
class GPTQMarlinWeightsLoader(WeightsLoader):
"""
Loader for using GPTQ- and AWQ-quantized weights with Marlin kernels.
"""
def __init__(
self,
*,
bits: int,
desc_act: bool,
groupsize: int,
quant_method: str,
quantize: str,
sym: bool,
):
self.bits = bits
self.desc_act = desc_act
self.groupsize = groupsize
self.quant_method = quant_method
self.quantize = quantize
self.sym = sym
def get_weights(self, weights: Weights, prefix: str):
log_once(logger.info, "Using GPTQ-Marlin kernels")
try:
qweight = weights.get_tensor(f"{prefix}.qweight")
except RuntimeError:
raise RuntimeError(
f"Cannot load `{self.quantize}` weight for GPTQ -> Marlin repacking, make sure the model is already quantized"
)
if not self.sym:
qzeros = weights.get_tensor(f"{prefix}.qzeros")
else:
qzeros = None
if self.quant_method == "awq":
g_idx = None
else:
g_idx = weights.get_tensor(f"{prefix}.g_idx")
scales = weights.get_tensor(f"{prefix}.scales")
return repack_gptq_for_marlin(
qweight=qweight,
scales=scales,
qzeros=qzeros,
g_idx=g_idx,
bits=self.bits,
desc_act=self.desc_act,
groupsize=self.groupsize,
quant_method=self.quant_method,
sym=self.sym,
sharded_infeatures=False,
)
def get_weights_col_packed(
self,
weights: Weights,
prefix: str,
block_sizes: Union[int, List[int]],
):
try:
qweight = weights.get_packed_sharded(
f"{prefix}.qweight", dim=1, block_sizes=block_sizes
)
except RuntimeError:
raise RuntimeError(
f"Cannot load `{self.quantize}` weight, make sure the model is already quantized."
)
scales = weights.get_packed_sharded(
f"{prefix}.scales", dim=1, block_sizes=block_sizes
)
scales = scales.to(dtype=weights.dtype)
if not self.sym:
qzeros = weights.get_packed_sharded(
f"{prefix}.qzeros", dim=1, block_sizes=block_sizes
)
else:
qzeros = None
if self.quant_method == "awq":
g_idx = None
else:
g_idx = weights.get_tensor(f"{prefix}.g_idx")
return repack_gptq_for_marlin(
qweight=qweight,
scales=scales,
qzeros=qzeros,
g_idx=g_idx,
bits=self.bits,
desc_act=self.desc_act,
groupsize=self.groupsize,
quant_method=self.quant_method,
sym=self.sym,
sharded_infeatures=False,
)
def get_multi_weights_col(self, weights: Weights, prefixes: List[str], dim: int):
try:
qweight = torch.cat(
[weights.get_sharded(f"{p}.qweight", dim=1) for p in prefixes], dim=1
)
except RuntimeError:
raise RuntimeError(
f"Cannot load `{self.quantize}` weight, make sure the model is already quantized"
)
scales = torch.cat(
[weights.get_sharded(f"{p}.scales", dim=1) for p in prefixes], dim=1
)
if not self.sym:
qzeros = torch.cat(
[weights.get_sharded(f"{p}.qzeros", dim=1) for p in prefixes], dim=1
)
else:
qzeros = None
if self.quant_method == "awq":
g_idx = None
else:
w = [weights.get_tensor(f"{p}.g_idx") for p in prefixes]
for w2 in w[1:]:
torch.testing.assert_close(w2, w[0])
g_idx = w[0]
return repack_gptq_for_marlin(
qweight=qweight,
scales=scales,
qzeros=qzeros,
g_idx=g_idx,
bits=self.bits,
desc_act=self.desc_act,
groupsize=self.groupsize,
quant_method=self.quant_method,
sym=self.sym,
sharded_infeatures=False,
)
def get_weights_row(self, weights: Weights, prefix: str):
log_once(logger.info, "Using GPTQ-Marlin kernels")
try:
qweight = weights.get_sharded(f"{prefix}.qweight", dim=0)
except RuntimeError:
raise RuntimeError(
f"Cannot load `{self.quantize}` weight for GPTQ -> Marlin repacking, make sure the model is already quantized"
)
if not self.sym:
if self.desc_act or self.groupsize == -1:
qzeros = weights.get_tensor(f"{prefix}.qzeros")
else:
qzeros = weights.get_sharded(f"{prefix}.qzeros", dim=0)
else:
qzeros = None
if self.quant_method == "awq":
g_idx = None
else:
g_idx = weights.get_sharded(f"{prefix}.g_idx", dim=0)
if self.desc_act or self.groupsize == -1:
scales = weights.get_tensor(f"{prefix}.scales")
else:
scales = weights.get_sharded(f"{prefix}.scales", dim=0)
sharded_in_features = weights.process_group.size() > 1
return repack_gptq_for_marlin(
qweight=qweight,
scales=scales,
qzeros=qzeros,
g_idx=g_idx,
bits=self.bits,
desc_act=self.desc_act,
groupsize=self.groupsize,
quant_method=self.quant_method,
sym=self.sym,
sharded_infeatures=sharded_in_features,
)
def _get_gptq_params(self, weights: Weights):
if weights.has_tensor("gptq_bits") and weights.has_tensor("gptq_groupsize"):
self.bits = weights.get_tensor("gptq_bits").item()
self.groupsize = weights.get_tensor("gptq_groupsize").item()
self.desc_act = False
# `server quantize` used asymmetric quantization unconditionally
# before the `gptq_sym` setting tensor was added.
self.sym = (
weights.get_tensor("gptq_sym").item()
if weights.has_tensor("gptq_sym")
else False
)
self.quant_method = "gptq"
@dataclass
class GPTQMarlinWeight(Weight):
"""
Repacked GPTQ Marlin weights.
"""
qweight: torch.Tensor
qzeros: torch.Tensor
scales: torch.Tensor
g_idx: torch.Tensor
perm: torch.Tensor
bits: int
is_full_k: bool
def __post_init__(self):
assert self.qweight.dtype == torch.int32
assert self.scales.dtype in (torch.float16, torch.bfloat16)
assert self.g_idx.dtype == torch.int32
assert self.perm.dtype == torch.int32
def get_linear(self, bias: torch.Tensor):
return GPTQMarlinLinear(
weight=self,
bias=bias,
)
def repack_gptq_for_marlin(
*,
qweight: torch.Tensor,
qzeros: Optional[torch.Tensor],
scales: torch.Tensor,
g_idx: Optional[torch.Tensor],
bits: int,
desc_act: bool,
groupsize: int,
quant_method: str,
sym: bool,
sharded_infeatures: bool,
) -> GPTQMarlinWeight:
"""Convert GPTQ weights to a layout that's compatible with GPTQ-Marlin kernels."""
_check_marlin_kernels()
assert marlin_kernels is not None
if bits not in GPTQ_MARLIN_BITS:
supported_bits = ", ".join(str(b) for b in GPTQ_MARLIN_BITS)
raise RuntimeError(
f"Repacking {bits}-bit GPTQ weights as Marlin is not supported, must be one of: {supported_bits}"
)
if groupsize not in GPTQ_MARLIN_GROUP_SIZES:
supported_sizes = ", ".join(str(b) for b in GPTQ_MARLIN_GROUP_SIZES)
raise RuntimeError(
f"Repacking GPTQ weights with group size {groupsize} as Marlin is not supported, must be one of: {supported_sizes}"
)
if not (sym or quant_method == "awq" or quant_method == "compressed-tensors"):
raise RuntimeError(
"Repacking GPTQ weights with asymmetric quantization as Marlin is not supported."
)
log_once(logger.info, f"Converting {quant_method} model to Marlin packing format.")
weights_per_int = 32 // bits
in_features = qweight.shape[0]
out_features = qweight.shape[1]
# AWQ uses column packing, GPTQ uses row packing
if quant_method == "awq":
out_features *= weights_per_int
else:
in_features *= weights_per_int
if in_features % groupsize != 0:
raise ValueError(
f"Number of input features ({in_features}) not divisible by group size ({groupsize})"
)
if g_idx is not None and desc_act and groupsize != -1:
perm = torch.argsort(g_idx).to(torch.int)
g_idx = g_idx[perm]
else:
perm = torch.empty(0, dtype=torch.int, device=qweight.device)
g_idx = torch.empty(0, dtype=torch.int, device=qweight.device)
if quant_method == "awq":
repacked = marlin_kernels.awq_marlin_repack(
qweight, in_features, out_features, bits
)
if qzeros is not None:
qzeros = awq_to_marlin_zero_points(
qzeros,
in_features // groupsize,
out_features,
bits,
)
else:
repacked = marlin_kernels.gptq_marlin_repack(
qweight, perm, in_features, out_features, bits
)
if qzeros is None:
qzeros = torch.empty(0, dtype=torch.int, device=qweight.device)
scales = permute_scales(scales)
is_full_k = not (desc_act and groupsize != -1 and sharded_infeatures)
return GPTQMarlinWeight(
qweight=repacked,
qzeros=qzeros,
scales=scales,
g_idx=g_idx,
perm=perm,
bits=bits,
is_full_k=is_full_k,
)
class GPTQMarlinLinear(nn.Module):
"""
Linear layer for GPTQ weights that were converted for the GPTQ-Marlin
kernels.
"""
def __init__(
self,
*,
weight: GPTQMarlinWeight,
bias: Optional[torch.Tensor],
):
super().__init__()
_check_marlin_kernels()
assert marlin_kernels is not None
in_features = weight.qweight.shape[0] * MARLIN_TILE_SIZE
out_features = weight.scales.shape[1]
_check_valid_shape(in_features=in_features, out_features=out_features)
self.bits = weight.bits
self.is_full_k = weight.is_full_k
self.qweight = weight.qweight
self.qzeros = weight.qzeros
self.scales = weight.scales
self.g_idx = weight.g_idx
self.perm = weight.perm
if bias is not None:
self.bias = bias
else:
self.bias = None
self.workspace = torch.zeros(
out_features // 64 * 16, dtype=torch.int, device=weight.qweight.device
)
def forward(self, A: torch.Tensor) -> torch.Tensor:
assert marlin_kernels is not None
A_flat = A.view(-1, A.shape[-1])
C = marlin_kernels.gptq_marlin_gemm(
A_flat,
self.qweight,
self.scales,
self.qzeros,
self.g_idx,
self.perm,
self.workspace,
self.bits,
A_flat.shape[0],
self.scales.shape[1],
A_flat.shape[1],
self.is_full_k,
self.qzeros.numel() > 0,
True,
)
C = C.reshape(A.shape[:-1] + (self.scales.shape[1],))
if self.bias is not None:
C += self.bias
return C
def awq_to_marlin_zero_points(
q_zp_packed: torch.Tensor, size_k: int, size_n: int, num_bits: int
) -> torch.Tensor:
# AWQ zero-points are quantized and packed on the column dim.
# In addition, the values are permuted based on dequantizer.
# Here we undo both of these, and then apply marlin permutation
# and pack it back.
q_zp = unpack_cols(q_zp_packed, num_bits, size_k, size_n)
# Undo interleaving (use argsort(..) to get inverse perm)
if num_bits == 4:
undo_interleave = numpy.argsort(numpy.array([0, 2, 4, 6, 1, 3, 5, 7]))
elif num_bits == 8:
undo_interleave = numpy.argsort(numpy.array([0, 2, 1, 3]))
else:
raise Exception("num_bits must be 4 or 8, got {}".format(num_bits))
q_zp = q_zp.reshape((-1, len(undo_interleave)))[:, undo_interleave].ravel()
q_zp = q_zp.reshape((-1, size_n)).contiguous()
marlin_zp = marlin_zero_points(q_zp, size_k, size_n, num_bits)
return marlin_zp
def _check_valid_shape(in_features: int, out_features: int):
if (in_features % 128 != 0 or out_features % 64 != 0) and (
in_features % 64 != 0 or out_features % 128 != 0
):
raise ValueError(
f"The GPTQ Marlin kernel does not have a valid thread configuration for weight matrix with shape ({out_features}, {in_features})."
" The shape elements must be divisible by (128, 64) or (64, 128)."
)
| text-generation-inference/server/text_generation_server/layers/marlin/gptq.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/marlin/gptq.py",
"repo_id": "text-generation-inference",
"token_count": 7162
} |
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from transformers.configuration_utils import PretrainedConfig
from typing import Optional, List, Tuple
from text_generation_server.layers.attention import (
paged_attention,
attention,
Seqlen,
)
from text_generation_server.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
get_linear,
)
from text_generation_server.layers.attention.kv_cache import get_kv_scales
from text_generation_server.layers.layernorm import (
FastLayerNorm,
)
from text_generation_server.layers.rotary import (
PositionRotaryEmbedding,
)
class PhiConfig(PretrainedConfig):
def __init__(
self,
vocab_size=51200,
hidden_size=2560,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=32,
hidden_act="gelu_fast", # llama uses silu
layer_norm_eps=1e-05, # rms in llama,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
rope_theta=10000.0,
resid_pdrop=0.1, # llama doesn't have this
partial_rotary_factor=0.5, # important difference between llama and phi
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.rope_theta = rope_theta
self.resid_pdrop = resid_pdrop
self.partial_rotary_factor = partial_rotary_factor
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
# this is the same as llama except for Phi uses bias=True
def load_attention(config, prefix, weights):
if config.num_attention_heads != config.num_key_value_heads:
return _load_gqa(config, prefix, weights)
else:
return TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=True,
)
def _load_gqa(config, prefix: str, weights):
assert config.hidden_size % config.num_attention_heads == 0
assert config.num_attention_heads % weights.process_group.size() == 0
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
)
if config.quantize not in ["gptq", "awq", "marlin"]:
weight = weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.hidden_size // config.num_attention_heads
num_heads = config.num_attention_heads // weights.process_group.size()
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
assert list(weight.shape) == [
(num_heads + 2 * num_key_value_heads) * head_size,
config.hidden_size,
], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
# this is the same as llama except for Phi uses bias=True
return TensorParallelColumnLinear(get_linear(weight, bias=True))
class FlashPhiAttention(torch.nn.Module):
def __init__(
self,
prefix: str,
config,
weights,
):
super().__init__()
self.num_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
self.softmax_scale = self.head_size**-0.5
self.rotary_dim = int(config.partial_rotary_factor * self.head_size)
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.rotary_dim,
base=config.rope_theta,
device=weights.device,
)
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
# in llama the dense layer is called "o_proj" and has bias=False
self.dense = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.dense",
weights=weights,
bias=True,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
# Compute query, key, value and split
qkv = self.query_key_value(hidden_states)
query, kv = qkv.split(
[
self.head_size * self.num_heads,
2 * self.head_size * self.num_key_value_heads,
],
dim=1,
)
# Reshape query and key for rotary embeddings
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
# NOTE: this is the main difference between Llama and Phi
# in llama the rotary embeddings are applied to the whole query and key.
# Phi uses PARTIAL rotary embeddings, which are applied to the first 32 dimensions
#
# Apply partial positional embeddings in place
self.rotary_emb(
query[:, :, : self.rotary_dim], kv[:, 0, :, : self.rotary_dim], cos, sin
)
# Reshape key and value and cache
kv_cache.store(
key=kv[:, 0],
value=kv[:, 1],
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
attn_output = attention(
query=query,
key=kv[:, 0],
value=kv[:, 1],
kv_scales=self.kv_scales,
kv_cache=kv_cache,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=self.kv_scales,
)
return self.dense(attn_output.view(-1, self.num_heads * self.head_size))
class PhiMLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
act = config.hidden_act
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
# llama weights are up_proj and down_proj and bias=False
self.up_proj = TensorParallelColumnLinear.load(
config,
prefix=f"{prefix}.fc1",
weights=weights,
bias=True,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.fc2",
weights=weights,
bias=True,
)
def forward(self, hidden_states):
# NOTE: Llama requires the gate up states to an intermediate size
# Phi does not and we can avoid the `view` operation
return self.down_proj(self.act(self.up_proj(hidden_states)))
class FlashPhiLayer(nn.Module):
def __init__(self, prefix: str, layer_id, config, weights):
super().__init__()
prefix = f"{prefix}.layers.{layer_id}"
self.self_attn = FlashPhiAttention(
prefix=f"{prefix}.self_attn", config=config, weights=weights
)
self.mlp = PhiMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
self.input_layernorm = FastLayerNorm.load(
prefix=f"{prefix}.input_layernorm",
weights=weights,
eps=config.layer_norm_eps,
)
self.resid_dropout = torch.nn.Dropout(config.resid_pdrop)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
hidden_states = self.resid_dropout(attn_output).add(
self.resid_dropout(self.mlp(hidden_states))
)
return hidden_states, res
class FlashPhiModel(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
process_group = weights.process_group
self.tp_rank = process_group.rank()
self.tp_world_size = process_group.size()
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.embed_tokens", weights=weights
)
self.layers = nn.ModuleList(
[
FlashPhiLayer(
prefix,
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.gradient_checkpointing = False
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
self.norm = FastLayerNorm.load(
prefix="model.final_layernorm",
weights=weights,
eps=config.layer_norm_eps,
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashPhiForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
if not prefix:
prefix = "model"
else:
prefix = f"{prefix}.model"
self.model = FlashPhiModel(prefix, config, weights)
self.lm_head = SpeculativeHead.load(
config,
prefix="lm_head",
weights=weights,
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> torch.Tensor:
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
return self.lm_head(hidden_states)
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 6794
} |
# coding=utf-8
# Copyright 2024 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Mllama model."""
from typing import Optional, Tuple, List
import torch
import torch.utils.checkpoint
from torch import nn
from text_generation_server.utils.import_utils import SYSTEM
if SYSTEM == "ipex":
import intel_extension_for_pytorch as ipex
else:
import flash_attn_2_cuda
from transformers.activations import ACT2FN
import torch.nn.functional as F
from text_generation_server.layers import (
TensorParallelColumnLinear,
TensorParallelEmbedding,
TensorParallelRowLinear,
FastLinear,
)
from text_generation_server.layers.attention import (
Seqlen,
)
from text_generation_server.models.custom_modeling.flash_llama_modeling import (
FlashLlamaForCausalLM,
)
def _prepare_aspect_ratio_attention_mask(
aspect_ratio_mask: torch.Tensor,
num_patches: int,
target_length: int,
dtype: torch.dtype,
) -> torch.Tensor:
# Expand aspect ratio mask to target_length
batch_size, max_num_tiles = aspect_ratio_mask.shape
attention_mask = aspect_ratio_mask.view(batch_size, max_num_tiles, 1, 1).to(dtype)
attention_mask = attention_mask.repeat(1, 1, target_length, 1)
# Mask padding patches
pad_patches = target_length - num_patches
attention_mask[:, :, -pad_patches:] = 0
# Invert the mask (0 -> 1, 1 -> 0)
attention_mask = 1 - attention_mask
# Reshape to 2D and create 4D attention mask
# (batch_size, 1, max_num_tiles * target_length, max_num_tiles * target_length)
attention_mask = attention_mask.reshape(
batch_size, max_num_tiles * target_length, 1
)
attention_mask = (
attention_mask @ attention_mask.transpose(-1, -2) * torch.finfo(dtype).min
)
attention_mask = attention_mask.unsqueeze(1)
return attention_mask
# Copied from transformers.models.llama.modeling_llama._prepare_4d_causal_attention_mask_with_cache_position
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
device: torch.device,
min_dtype: float,
cache_position: torch.Tensor,
batch_size: int,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
device (`torch.device`):
The device to plcae the 4D attention mask on.
min_dtype (`float`):
The minimum value representable with the dtype `dtype`.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
causal_mask = torch.full(
(sequence_length, target_length),
fill_value=min_dtype,
dtype=dtype,
device=device,
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(
target_length, device=device
) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = (
causal_mask.clone()
) # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1]
padding_mask = (
causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[
:, :, :, :mask_length
].masked_fill(padding_mask, min_dtype)
return causal_mask
def _prepare_cross_attention_mask(
cross_attention_mask: torch.Tensor,
num_vision_tokens: int,
dtype: str,
) -> Tuple[torch.Tensor, torch.Tensor]:
# reshape so it can be used by attn module
batch_size, text_total_length, *_ = cross_attention_mask.shape
cross_attention_mask = cross_attention_mask.repeat_interleave(
num_vision_tokens, dim=3
)
cross_attention_mask = cross_attention_mask.view(batch_size, text_total_length, -1)
cross_attention_mask = cross_attention_mask.unsqueeze(1)
# invert the mask
inverted_cross_attn_mask = (1.0 - cross_attention_mask).to(dtype)
cross_attention_mask = inverted_cross_attn_mask.masked_fill(
inverted_cross_attn_mask.to(torch.bool), torch.finfo(dtype).min
)
# apply full-row bias, which return 4D tensor of shape [B, H, S1, 1] where value is 0 if the a full row in cross attn mask's
# last dimension contains negative infinity values, otherwise it's 1
negative_inf_value = torch.finfo(dtype).min
full_text_row_masked_out_mask = (
(cross_attention_mask != negative_inf_value)
.any(dim=-1)
.type_as(cross_attention_mask)[..., None]
)
cross_attention_mask *= full_text_row_masked_out_mask
return cross_attention_mask, full_text_row_masked_out_mask
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->MllamaVision
class MllamaVisionMLP(nn.Module):
def __init__(self, *, prefix, config, weights):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = TensorParallelColumnLinear.load(
prefix=f"{prefix}.fc1", weights=weights, config=config, bias=True
)
self.fc2 = TensorParallelRowLinear.load(
prefix=f"{prefix}.fc2", weights=weights, config=config, bias=True
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class MllamaVisionSdpaAttention(nn.Module):
def __init__(self, *, prefix, config, weights):
super().__init__()
self.embed_dim = config.hidden_size
self.head_dim = config.hidden_size // config.attention_heads
self.num_heads = config.attention_heads // weights.process_group.size()
self.qkv_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=False,
)
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=False,
)
def forward(
self,
hidden_state: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
qkv = self.qkv_proj(hidden_state)
query, key, value = qkv.split(
[
self.head_dim * self.num_heads,
self.head_dim * self.num_heads,
self.head_dim * self.num_heads,
],
dim=2,
)
batch_size, q_seq_len, _ = query.shape
_, kv_seq_len, _ = key.shape
query = query.view(batch_size, q_seq_len, self.num_heads, self.head_dim)
key = key.view(batch_size, kv_seq_len, self.num_heads, self.head_dim)
value = value.view(batch_size, kv_seq_len, self.num_heads, self.head_dim)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
attn_output = F.scaled_dot_product_attention(
query, key, value, attn_mask=attention_mask
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(batch_size, q_seq_len, -1)
output = self.o_proj(attn_output)
return output
class MllamaVisionEncoderLayer(nn.Module):
def __init__(self, *, prefix, config, weights, is_gated: bool):
super().__init__()
self.hidden_size = config.hidden_size
self.num_attention_heads = config.attention_heads
self.is_gated = is_gated
self.intermediate_size = config.intermediate_size
self.self_attn = MllamaVisionSdpaAttention(
prefix=f"{prefix}.self_attn", config=config, weights=weights
)
self.mlp = MllamaVisionMLP(
prefix=f"{prefix}.mlp", config=config, weights=weights
)
self.input_layernorm = nn.LayerNorm.load(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=1e-05
)
self.post_attention_layernorm = nn.LayerNorm.load(
prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=1e-05
)
# there used to be an if else here, no code path
if is_gated:
self.gate_attn = nn.Parameter(
weights.get_tensor(f"{prefix}.gate_attn"), requires_grad=False
)
self.gate_ffn = nn.Parameter(
weights.get_tensor(f"{prefix}.gate_ffn"), requires_grad=False
)
def forward(
self,
hidden_state: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
):
# Self Attention
residual = hidden_state
hidden_state = self.input_layernorm(hidden_state)
hidden_state = self.self_attn(hidden_state, attention_mask=attention_mask)
gate_attn = 1 if not self.is_gated else self.gate_attn.tanh()
hidden_state = residual + gate_attn * hidden_state
# Feed forward
residual = hidden_state
hidden_state = self.post_attention_layernorm(hidden_state)
hidden_state = self.mlp(hidden_state)
gate_ffn = 1 if not self.is_gated else self.gate_ffn.tanh()
hidden_state = residual + gate_ffn * hidden_state
return hidden_state
class MllamaVisionEncoder(nn.Module):
def __init__(self, *, prefix, config, weights, is_gated: bool, num_layers: int):
super().__init__()
self.config = config
self.layers = [
MllamaVisionEncoderLayer(
prefix=f"{prefix}.layers.{i}",
config=config,
weights=weights,
is_gated=is_gated,
)
for i in range(num_layers)
]
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
):
encoder_states = [hidden_states]
for encoder_layer in self.layers:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
)
hidden_states = layer_outputs
encoder_states.append(hidden_states)
return hidden_states, encoder_states
class MllamaPrecomputedAspectRatioEmbedding(nn.Module):
def __init__(self, *, prefix, config, weights):
super().__init__()
self.max_num_tiles = config.max_num_tiles
self.hidden_size = config.hidden_size
self.max_aspect_ratio_id = config.max_aspect_ratio_id
self.embedding = TensorParallelEmbedding(
prefix=f"{prefix}.embedding", weights=weights
)
self.gate = nn.Parameter(
weights.get_tensor(f"{prefix}.gate"), requires_grad=False
)
def forward(
self, hidden_state: torch.Tensor, aspect_ratio_ids: torch.Tensor
) -> torch.Tensor:
embeddings = self.embedding(aspect_ratio_ids)
embeddings = embeddings.reshape(-1, self.max_num_tiles, 1, self.hidden_size)
# Always gated.
embeddings = embeddings * self.gate.tanh()
hidden_state = hidden_state + embeddings
return hidden_state
class MllamaPrecomputedPositionEmbedding(nn.Module):
def __init__(self, *, prefix, config, weights):
super().__init__()
self.max_num_tiles = config.max_num_tiles
self.max_aspect_ratio_id = config.max_aspect_ratio_id
self.num_patches = (config.image_size // config.patch_size) ** 2 + 1
self.hidden_size = config.hidden_size
self.scale = config.hidden_size**-0.5
self.gate = nn.Parameter(
weights.get_tensor(f"{prefix}.gate"), requires_grad=False
)
# position embedding
embedding = nn.Parameter(
weights.get_tensor(f"{prefix}.embedding"), requires_grad=False
)
self.gated_position_embedding = (1 - self.gate.tanh()) * embedding
self.tile_embedding = TensorParallelEmbedding(
prefix=f"{prefix}.tile_embedding", weights=weights
)
def forward(
self, hidden_state: torch.Tensor, aspect_ratio_ids: torch.Tensor
) -> torch.Tensor:
# position embeddings
hidden_state = hidden_state + self.gated_position_embedding.view(
1, 1, self.num_patches, self.hidden_size
)
# precomputed tile position embeddings
tile_position_embedding = self.tile_embedding(aspect_ratio_ids)
batch_size = hidden_state.shape[0]
tile_position_embedding = tile_position_embedding.reshape(
batch_size, self.max_num_tiles, self.num_patches, self.hidden_size
)
gated_tile_position_embedding = self.gate.tanh() * tile_position_embedding
hidden_state = hidden_state + gated_tile_position_embedding
return hidden_state
class MllamaVisionModel(nn.Module):
def __init__(self, *, prefix, config, weights):
super().__init__()
self.image_size = config.image_size
self.patch_size = config.patch_size
self.max_num_tiles = config.max_num_tiles
self.hidden_size = config.hidden_size
self.num_channels = config.num_channels
self.intermediate_layers_indices = config.intermediate_layers_indices
self.num_patches = (self.image_size // self.patch_size) ** 2 + 1
self.scale = config.hidden_size**-0.5
self.dtype = weights.dtype
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.hidden_size,
kernel_size=self.patch_size,
stride=self.patch_size,
padding="valid",
bias=False,
)
self.patch_embedding.weight = nn.Parameter(
weights.get_tensor(f"{prefix}.patch_embedding.weight"), requires_grad=False
)
self.class_embedding = nn.Parameter(
weights.get_tensor(f"{prefix}.class_embedding"), requires_grad=False
)
self.gated_positional_embedding = MllamaPrecomputedPositionEmbedding(
prefix=f"{prefix}.gated_positional_embedding",
config=config,
weights=weights,
)
self.pre_tile_positional_embedding = MllamaPrecomputedAspectRatioEmbedding(
prefix=f"{prefix}.pre_tile_positional_embedding",
config=config,
weights=weights,
)
self.post_tile_positional_embedding = MllamaPrecomputedAspectRatioEmbedding(
prefix=f"{prefix}.post_tile_positional_embedding",
config=config,
weights=weights,
)
## layer norms
self.layernorm_pre = nn.LayerNorm.load(
prefix=f"{prefix}.layernorm_pre",
weights=weights,
# torch default
eps=1e-05,
)
self.layernorm_post = nn.LayerNorm.load(
prefix=f"{prefix}.layernorm_post",
weights=weights,
# torch default
eps=1e-05,
)
## encoders
self.transformer = MllamaVisionEncoder(
prefix=f"{prefix}.transformer",
config=config,
weights=weights,
is_gated=False,
num_layers=config.num_hidden_layers,
)
self.global_transformer = MllamaVisionEncoder(
prefix=f"{prefix}.global_transformer",
config=config,
weights=weights,
is_gated=True,
num_layers=config.num_global_layers,
)
def apply_class_embedding(self, hidden_state: torch.Tensor) -> torch.Tensor:
batch_size, _, hidden_size = hidden_state.shape
class_embedding = self.class_embedding.expand(batch_size, 1, hidden_size)
hidden_state = torch.cat([class_embedding, hidden_state], dim=1)
return hidden_state
def forward(
self,
pixel_values: torch.Tensor,
aspect_ratio_ids: torch.Tensor,
attention_mask: torch.Tensor,
) -> torch.Tensor:
(
batch_size,
num_concurrent_media,
num_tiles,
num_channels,
height,
width,
) = pixel_values.shape
pixel_values = pixel_values.reshape(
batch_size * num_concurrent_media * num_tiles, num_channels, height, width
)
aspect_ratio_ids = aspect_ratio_ids.reshape(
batch_size * num_concurrent_media, -1
)
# patch embedding
patch_embeds = self.patch_embedding(pixel_values)
hidden_state = patch_embeds.flatten(2).transpose(1, 2)
# tile embeddings
_, num_patches, dim = hidden_state.shape
hidden_state = hidden_state.reshape(
batch_size * num_concurrent_media, num_tiles, -1, dim
)
hidden_state = self.pre_tile_positional_embedding(
hidden_state, aspect_ratio_ids
)
# apply cls token
hidden_state = hidden_state.reshape(
batch_size * num_concurrent_media * num_tiles, num_patches, dim
)
hidden_state = self.apply_class_embedding(hidden_state)
num_patches += 1
# apply position embeddings
hidden_state = hidden_state.reshape(
batch_size * num_concurrent_media, num_tiles, num_patches, dim
)
hidden_state = self.gated_positional_embedding(hidden_state, aspect_ratio_ids)
# apply encoder
hidden_state = self.layernorm_pre(hidden_state)
# Compute the number of tokens to pad
num_padding_patches = (8 - (hidden_state.shape[-2] % 8)) % 8
# Compute padding tuple for pad function
padding = (
0,
0,
0,
num_padding_patches,
) # (pad_left, pad_right, pad_left for dim -2, pad_right for dim -2)
# Pad the tensor
hidden_state = F.pad(hidden_state, padding, mode="constant", value=0)
slice_index = -num_padding_patches if num_padding_patches > 0 else None
if attention_mask is not None:
attention_mask = attention_mask.reshape(
batch_size * num_concurrent_media, -1
)
attention_mask = _prepare_aspect_ratio_attention_mask(
aspect_ratio_mask=attention_mask,
num_patches=self.num_patches,
target_length=hidden_state.shape[2],
dtype=self.dtype,
)
hidden_state = hidden_state.view(batch_size * num_concurrent_media, -1, dim)
hidden_state, all_intermediate_hidden_states = self.transformer(
hidden_state,
attention_mask=attention_mask,
)
intermediate_hidden_states = [
hidden_state
for idx, hidden_state in enumerate(all_intermediate_hidden_states)
if idx in self.intermediate_layers_indices
]
intermediate_hidden_states = torch.stack(intermediate_hidden_states, dim=-1)
# apply global encoder
hidden_state = self.layernorm_post(hidden_state)
hidden_state = hidden_state.reshape(
batch_size * num_concurrent_media,
num_tiles,
num_patches + num_padding_patches,
dim,
)
hidden_state = self.post_tile_positional_embedding(
hidden_state, aspect_ratio_ids
)
hidden_state = hidden_state.reshape(
batch_size * num_concurrent_media,
num_tiles * (num_patches + num_padding_patches),
dim,
)
hidden_state, _ = self.global_transformer(
hidden_state, attention_mask=attention_mask
)
hidden_state = hidden_state.reshape(
batch_size * num_concurrent_media,
num_tiles,
num_patches + num_padding_patches,
dim,
)
hidden_state = hidden_state[:, :, :slice_index]
# adding intermediate layer outputs
hidden_state = hidden_state.reshape(
batch_size, num_concurrent_media, num_tiles, num_patches, dim
)
intermediate_hidden_states = intermediate_hidden_states.reshape(
batch_size * num_concurrent_media,
num_tiles,
num_patches + num_padding_patches,
-1,
)
intermediate_hidden_states = intermediate_hidden_states[:, :, :slice_index]
intermediate_hidden_states = intermediate_hidden_states.reshape(
batch_size, num_concurrent_media, num_tiles, num_patches, -1
)
hidden_state = torch.cat([hidden_state, intermediate_hidden_states], dim=-1)
return hidden_state
class MllamaTextCrossAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, *, prefix, config, weights, layer_idx):
super().__init__()
self.config = config
self.num_heads = self.config.num_attention_heads
self.num_key_value_heads = self.config.num_key_value_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.head_size = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.layer_idx = layer_idx
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
self.num_key_value_heads // weights.process_group.size()
)
self.q_proj = TensorParallelColumnLinear.load(
config,
prefix=f"{prefix}.q_proj",
weights=weights,
bias=False,
)
self.k_proj = TensorParallelColumnLinear.load(
config,
prefix=f"{prefix}.k_proj",
weights=weights,
bias=False,
)
self.v_proj = TensorParallelColumnLinear.load(
config,
prefix=f"{prefix}.v_proj",
weights=weights,
bias=False,
)
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=False,
)
self.q_norm = MllamaTextRMSNorm.load(
prefix=f"{prefix}.q_norm", weights=weights, eps=config.rms_norm_eps
)
self.k_norm = MllamaTextRMSNorm.load(
prefix=f"{prefix}.k_norm", weights=weights, eps=config.rms_norm_eps
)
self.softmax_scale = self.head_size**-0.5
def forward(
self,
hidden_states: torch.Tensor,
cross_attention_states: Optional[torch.Tensor] = None,
# past_key_value=None,
# attention_mask: Optional[torch.Tensor] = None,
# cache_position: Optional[torch.LongTensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# hidden_states = hidden_states.unsqueeze(0)
# bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
query_states = query_states.view(-1, self.num_heads, self.head_size)
query_states = self.q_norm(query_states)
(
cross_attention_states,
cu_seqlen_q,
cu_seqlen_k,
max_q,
max_k,
indices,
) = cross_attention_states
key_states = self.k_proj(cross_attention_states)
value_states = self.v_proj(cross_attention_states)
key_states = key_states.view(-1, self.num_key_value_heads, self.head_size)
value_states = value_states.view(-1, self.num_key_value_heads, self.head_size)
key_states = self.k_norm(key_states)
# key_states = key_states.repeat(1, self.num_key_value_groups, 1)
# value_states = value_states.repeat(1, self.num_key_value_groups, 1)
causal = False
# logger.info(
# f"Q: {query_states.shape} -K {key_states.shape} - V{value_states.shape}"
# )
if SYSTEM == "ipex":
attn_output = torch.empty_like(query_states)
ipex.llm.functional.varlen_attention(
(
query_states.contiguous()
if query_states.device.type == "xpu"
else query_states
),
(
key_states.contiguous()
if key_states.device.type == "xpu"
else key_states
),
(
value_states.contiguous()
if value_states.device.type == "xpu"
else value_states
),
attn_output,
cu_seqlen_q,
cu_seqlen_k,
max_q,
max_k,
0.0,
self.softmax_scale,
False,
causal,
False,
None,
)
else:
attn_output = flash_attn_2_cuda.varlen_fwd(
query_states,
key_states,
value_states,
None,
cu_seqlen_q,
cu_seqlen_k,
None,
None,
None, # block_tables
None,
max_q,
max_k,
0.0,
self.softmax_scale,
False,
causal, # Causal
-1, # window_size_left,
-1,
0.0, # softcap
False,
None,
)[0]
attn_output = self.o_proj(attn_output.view(-1, self.num_heads * self.head_size))
return attn_output
# Copied from transformers.models.gemma2.modeling_gemma2.Gemma2MLP with Gemma2->MllamaText
class MllamaTextMLP(nn.Module):
def __init__(self, *, prefix, config, weights):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = (
config.intermediate_size // weights.process_group.size()
)
self.gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=False,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=False,
)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
shape = x.shape
gate_up_states = self.gate_up_proj(x)
gate_up_states = gate_up_states.view(*shape[:-1], 2, self.intermediate_size)
result = self.down_proj(
self.act_fn(gate_up_states[:, 0]) * gate_up_states[:, 1]
)
return result
class FlashLlamaCrossLayer(torch.nn.Module):
"""Cross-attention transformer block with tanh-gated attention and feedforward."""
def __init__(self, *, prefix, config, weights, index) -> None:
layer_idx = index
super().__init__()
self.cross_attn = MllamaTextCrossAttention(
prefix=f"{prefix}.cross_attn",
config=config,
weights=weights,
layer_idx=layer_idx,
)
self.input_layernorm = MllamaTextRMSNorm.load(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.cross_attn_attn_gate = torch.nn.Parameter(
weights.get_tensor(f"{prefix}.cross_attn_attn_gate"), requires_grad=False
)
self.mlp = MllamaTextMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
self.post_attention_layernorm = MllamaTextRMSNorm.load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
self.cross_attn_mlp_gate = torch.nn.Parameter(
weights.get_tensor(f"{prefix}.cross_attn_mlp_gate"), requires_grad=False
)
self.layer_idx = layer_idx
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
adapter_data,
cross_attention_states, # [ IB, ...]
) -> Tuple[torch.Tensor, torch.Tensor]:
if cross_attention_states is None:
return hidden_states, residual
if residual is not None:
hidden_states += residual
indices = cross_attention_states[-1]
out_hidden_states = hidden_states[:]
if len(indices) > 0:
assert max(indices) < hidden_states.shape[0]
hidden_states = hidden_states[indices]
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states = self.cross_attn(
hidden_states=hidden_states,
# attention_mask=cross_attention_mask,
cross_attention_states=cross_attention_states,
)
hidden_states = residual + self.cross_attn_attn_gate.tanh() * hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.cross_attn_mlp_gate.tanh() * hidden_states
out_hidden_states[indices] = hidden_states
hidden_states = out_hidden_states
return hidden_states, None
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->MllamaText
class MllamaTextRMSNorm(nn.Module):
def __init__(self, weight, eps):
super().__init__()
self.weight = weight
self.variance_epsilon = eps
@classmethod
def load(cls, *, prefix, weights, eps):
weight = nn.Parameter(
weights.get_tensor(f"{prefix}.weight"), requires_grad=False
)
return cls(weight=weight, eps=eps)
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class MllamaForConditionalGeneration(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
config.vision_config.quantize = None
config.vision_config.speculator = config.speculator
config.text_config.quantize = config.quantize
config.text_config.speculator = config.speculator
config.text_config._attn_implementation = "sdpa"
self.hidden_size = config.text_config.hidden_size
self.vision_model = MllamaVisionModel(
prefix="vision_model", config=config.vision_config, weights=weights
)
self.multi_modal_projector = FastLinear.load(
prefix="multi_modal_projector", config=config, weights=weights, bias=True
)
self.text_model = FlashLlamaForCausalLM(
prefix="language_model", config=config.text_config, weights=weights
)
self.config = config
self.dtype = weights.dtype
self.device = weights.device
def vision_forward(self, pixel_values, aspect_ratio_ids, aspect_ratio_mask):
if aspect_ratio_ids is None:
raise ValueError(
"`aspect_ratio_ids` must be provided if `pixel_values` is provided"
)
# logger.info(f"PIxel values {pixel_values.shape}")
batch_size = pixel_values.shape[0]
vision_states = self.vision_model(
pixel_values, aspect_ratio_ids, aspect_ratio_mask
)
cross_attention_states = self.multi_modal_projector(vision_states).reshape(
-1, vision_states.shape[-2], self.hidden_size
)
_, _, h = cross_attention_states.shape
cross_attention_states = cross_attention_states.view(batch_size, -1, h)
# logger.info(f"cross {cross_attention_states.shape}")
return cross_attention_states
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor],
adapter_data: Optional[torch.Tensor] = None,
# XXX: Putting these as optional so that the cuda warmup calls can go through.
cross_attention_states: Optional[torch.Tensor] = None,
image_indices=None,
):
if cross_attention_states is not None:
seqlen_q = len(image_indices)
n_images = cross_attention_states.shape[0]
seqlen_k = cross_attention_states.shape[1]
device = cross_attention_states.device
if cu_seqlen_prefill is not None:
offset = 0
cu_q = []
indices = []
for index in image_indices:
cu_q.append(offset)
length = seqlen.input_lengths[index].item()
assert index < seqlen.cu_seqlen_q.shape[0]
input_ids_offset = seqlen.cu_seqlen_q[index]
indices.extend(range(input_ids_offset, input_ids_offset + length))
offset += length
cu_q.append(offset)
cu_seqlen_q = torch.Tensor(cu_q).to(device=device, dtype=torch.int32)
assert max(indices) < input_ids.shape[0]
cu_seqlen_k = (
torch.arange(
n_images + 1,
device=device,
dtype=torch.int32,
)
* seqlen_k
)
max_q = cu_seqlen_q[-1].item()
max_k = seqlen_k
else:
cu_seqlen_q = torch.arange(
seqlen_q + 1, device=device, dtype=torch.int32
)
seqlen_k = cross_attention_states.shape[1]
n_images = cross_attention_states.shape[0]
cu_seqlen_k = (
torch.arange(
n_images + 1,
device=device,
dtype=torch.int32,
)
* seqlen_k
)
max_q = seqlen_q
max_k = seqlen_k
indices = image_indices[:]
cross_attention_states = (
cross_attention_states,
cu_seqlen_q,
cu_seqlen_k,
max_q,
max_k,
indices,
)
outputs = self.text_model(
input_ids=input_ids,
position_ids=position_ids,
cu_seqlen_prefill=cu_seqlen_prefill,
kv_cache=kv_cache,
block_tables=block_tables,
slots=slots,
seqlen=seqlen,
max_s=max_s,
prefill_cache_indices=prefill_cache_indices,
lm_head_indices=lm_head_indices,
adapter_data=adapter_data,
cross_attention_states=cross_attention_states,
)
return outputs
| text-generation-inference/server/text_generation_server/models/custom_modeling/mllama.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/mllama.py",
"repo_id": "text-generation-inference",
"token_count": 18144
} |
import inspect
import torch
from abc import ABC, abstractmethod
from typing import List, Tuple, Optional, TypeVar, Type, Dict
from collections import defaultdict
from transformers import PreTrainedTokenizerBase
from loguru import logger
from text_generation_server.models.globals import (
ATTENTION,
PREFIX_CACHING,
BLOCK_SIZE,
PREFILL_CHUNKING,
)
from text_generation_server.models.types import Batch, Generation
from text_generation_server.utils.log import log_master
from text_generation_server.utils.prefill_chunking import set_support_chunking
from text_generation_server.utils.speculate import get_speculate
from text_generation_server.pb.generate_pb2 import InfoResponse
from text_generation_server.adapters.weights import LayerAdapterWeights
BASE_MODEL_ADAPTER_ID = "__base_model__"
B = TypeVar("B", bound=Batch)
class Model(ABC):
def __init__(
self,
model_id: str,
model: torch.nn.Module,
tokenizer: PreTrainedTokenizerBase,
requires_padding: bool,
dtype: torch.dtype,
device: torch.device,
rank: int = 0,
world_size: int = 1,
sliding_window: Optional[int] = None,
speculate: Optional[int] = None,
adapter_id: str = BASE_MODEL_ADAPTER_ID,
support_chunking: bool = False,
):
self.model_id = model_id
self.model = model.eval()
self.tokenizer = tokenizer
# all_special_ids is not set correctly if the rust tokenizer is unpacked
# TODO report this to transformers.
other_special_ids = {
id for id, token in tokenizer.added_tokens_decoder.items() if token.special
}
self.all_special_ids = set(tokenizer.all_special_ids)
self.all_special_ids.update(other_special_ids)
self.requires_padding = requires_padding
self.dtype = dtype
self.device = device
self.rank = rank
self.world_size = world_size
self.sliding_window = sliding_window if sliding_window != -1 else None
self.layer_to_adapter_weights: Dict[str, LayerAdapterWeights] = defaultdict(
LayerAdapterWeights
)
self.loaded_adapters = set()
self.static_adapter_id = adapter_id
if speculate is None:
speculate = get_speculate()
self.speculate = speculate
support_chunking = support_chunking and PREFILL_CHUNKING
if speculate != 0 and support_chunking:
log_master(
logger.warning,
"Prefill chunking does not support speculation yet. "
"Prefill chunking will be turned off",
)
support_chunking = False
if (
ATTENTION not in ["flashinfer", "flashdecoding", "flashdecoding-ipex"]
and support_chunking
):
log_master(
logger.warning,
"Prefill chunking is only supported with `flashinfer` or `flashdecoding` or `flashdecoding-ipex` attention types.",
)
support_chunking = False
log_master(logger.info, f"Using prefill chunking = {support_chunking}")
self.support_chunking = support_chunking
set_support_chunking(support_chunking)
self.has_position_ids = (
inspect.signature(model.forward).parameters.get("position_ids", None)
is not None
)
self.check_initialized()
@property
def info(self) -> InfoResponse:
if self.requires_padding and self.sliding_window is not None:
raise NotImplementedError("sliding_window is not implemented with padding")
return InfoResponse(
requires_padding=self.requires_padding,
dtype=str(self.dtype),
device_type=self.device.type,
window_size=self.sliding_window,
speculate=self.speculate,
support_chunking=self.support_chunking,
use_prefix_caching=PREFIX_CACHING,
attention_impl=ATTENTION,
block_size=BLOCK_SIZE,
)
@property
@abstractmethod
def batch_type(self) -> Type[B]:
raise NotImplementedError
@abstractmethod
def generate_token(
self, batch: B
) -> Tuple[List[Generation], Optional[B], Tuple[int, int]]:
raise NotImplementedError
def warmup(
self, batch: B, max_input_tokens: Optional[int], max_total_tokens: Optional[int]
) -> Tuple[Optional[int], int, int]:
self.generate_token(batch)
total = sum(len(i) for i in batch.input_ids)
if max_total_tokens is None:
max_total_tokens = total
if max_input_tokens is None:
max_input_tokens = max_total_tokens - 1
return None, max_input_tokens, max_total_tokens
def decode_token(
self,
all_input_ids: List[int],
prefix_offset: int = 0,
read_offset: int = 0,
skip_special_tokens: bool = False,
) -> Tuple[str, int, int]:
"""Hack to hopefully support generate_stream for the maximum number of tokenizers"""
# The prefix text is necessary only to defeat cleanup algorithms in the decode
# which decide to add a space or not depending on the surrounding ids.
prefix_text = self.tokenizer.decode(
all_input_ids[prefix_offset:read_offset],
skip_special_tokens=skip_special_tokens,
)
new_text = self.tokenizer.decode(
all_input_ids[prefix_offset:], skip_special_tokens=skip_special_tokens
)
if len(new_text) > len(prefix_text) and not new_text.endswith("�"):
# utf-8 char at the end means it's a potential unfinished byte sequence
# from byte fallback tokenization.
# If it's in the middle, it's probably a real invalid id generated
# by the model
new_text = new_text[len(prefix_text) :]
return new_text, read_offset, len(all_input_ids)
else:
return "", prefix_offset, read_offset
def check_initialized(self):
uninitialized_parameters = []
for n, p in self.model.named_parameters():
if p.data.device == torch.device("meta"):
uninitialized_parameters.append(n)
if uninitialized_parameters:
raise RuntimeError(
f"found uninitialized parameters in model {self.__class__.__name__}: {uninitialized_parameters}"
)
| text-generation-inference/server/text_generation_server/models/model.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/model.py",
"repo_id": "text-generation-inference",
"token_count": 2809
} |
from functools import lru_cache
from text_generation_server.utils.dist import RANK
@lru_cache(10)
def log_once(log, msg: str, master=True):
if master:
log_master(log, msg)
else:
log(msg)
def log_master(log, msg: str):
if RANK == 0:
log(msg)
| text-generation-inference/server/text_generation_server/utils/log.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/log.py",
"repo_id": "text-generation-inference",
"token_count": 126
} |
# This CITATION.cff file was generated with cffinit.
# Visit https://bit.ly/cffinit to generate yours today!
cff-version: 1.2.0
title: HuggingFace's Tokenizers
message: >-
Fast State-of-the-Art Tokenizers optimized for Research
and Production.
type: software
authors:
- given-names: Anthony
family-names: Moi
email: [email protected]
affiliation: HuggingFace
- given-names: Nicolas
family-names: Patry
affiliation: HuggingFace
repository-code: 'https://github.com/huggingface/tokenizers'
url: 'https://github.com/huggingface/tokenizers'
repository: 'https://huggingface.co'
abstract: >-
Fast State-of-the-Art Tokenizers optimized for Research
and Production.
keywords:
- Rust
- Tokenizer
- NLP
license: Apache-2.0
commit: 37372b6
version: 0.13.4
date-released: '2023-04-05'
| tokenizers/CITATION.cff/0 | {
"file_path": "tokenizers/CITATION.cff",
"repo_id": "tokenizers",
"token_count": 293
} |
<p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<a href="https://badge.fury.io/js/tokenizers">
<img alt="Build" src="https://badge.fury.io/js/tokenizers.svg">
</a>
<a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue">
</a>
</p>
<br>
NodeJS implementation of today's most used tokenizers, with a focus on performance and
versatility. Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation.
If you are interested in the High-level design, you can go check it there.
## Main features
- Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3
most common BPE versions).
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for research and production.
- Normalization comes with alignments tracking. It's always possible to get the part of the
original sentence that corresponds to a given token.
- Does all the pre-processing: Truncate, Pad, add the special tokens your model needs.
## Installation
```bash
npm install tokenizers@latest
```
## Basic example
```ts
import { Tokenizer } from "tokenizers";
const tokenizer = await Tokenizer.fromFile("tokenizer.json");
const wpEncoded = await tokenizer.encode("Who is John?");
console.log(wpEncoded.getLength());
console.log(wpEncoded.getTokens());
console.log(wpEncoded.getIds());
console.log(wpEncoded.getAttentionMask());
console.log(wpEncoded.getOffsets());
console.log(wpEncoded.getOverflowing());
console.log(wpEncoded.getSpecialTokensMask());
console.log(wpEncoded.getTypeIds());
console.log(wpEncoded.getWordIds());
```
## License
[Apache License 2.0](../../LICENSE)
| tokenizers/bindings/node/README.md/0 | {
"file_path": "tokenizers/bindings/node/README.md",
"repo_id": "tokenizers",
"token_count": 651
} |
/* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable @typescript-eslint/no-empty-function */
import { TruncationStrategy, BPE, Encoding, AddedToken, Tokenizer } from '../../'
// jest.mock('../../bindings/tokenizer');
// jest.mock('../../bindings/models', () => ({
// __esModule: true,
// Model: jest.fn()
// }));
// Or:
// jest.mock('../../bindings/models', () => {
// return require('../../bindings/__mocks__/models');
// });
// const TokenizerMock = mocked(Tokenizer);
describe('AddedToken', () => {
it('instantiates with only content', () => {
const addToken = new AddedToken('test', false)
expect(addToken.constructor.name).toEqual('AddedToken')
})
it('instantiates with empty options', () => {
const addToken = new AddedToken('test', false, {})
expect(addToken.constructor.name).toEqual('AddedToken')
})
it('instantiates with options', () => {
const addToken = new AddedToken('test', false, {
leftStrip: true,
rightStrip: true,
singleWord: true,
})
expect(addToken.constructor.name).toEqual('AddedToken')
})
describe('getContent', () => {
it('returns the string content of AddedToken', () => {
const addedToken = new AddedToken('test', false)
expect(addedToken.getContent()).toEqual('test')
})
})
})
describe('Tokenizer', () => {
it('has expected methods', () => {
const model = BPE.empty()
const tokenizer = new Tokenizer(model)
expect(typeof Tokenizer.fromFile).toBe('function')
expect(typeof Tokenizer.fromString).toBe('function')
// expect(typeof Tokenizer.fromPretrained).toBe('function')
expect(typeof tokenizer.addSpecialTokens).toBe('function')
expect(typeof tokenizer.addTokens).toBe('function')
expect(typeof tokenizer.decode).toBe('function')
expect(typeof tokenizer.decodeBatch).toBe('function')
expect(typeof tokenizer.disablePadding).toBe('function')
expect(typeof tokenizer.disableTruncation).toBe('function')
expect(typeof tokenizer.encode).toBe('function')
expect(typeof tokenizer.encodeBatch).toBe('function')
expect(typeof tokenizer.getDecoder).toBe('function')
expect(typeof tokenizer.getNormalizer).toBe('function')
expect(typeof tokenizer.getPostProcessor).toBe('function')
expect(typeof tokenizer.getPreTokenizer).toBe('function')
expect(typeof tokenizer.getVocab).toBe('function')
expect(typeof tokenizer.getVocabSize).toBe('function')
expect(typeof tokenizer.idToToken).toBe('function')
expect(typeof tokenizer.runningTasks).toBe('function')
expect(typeof tokenizer.save).toBe('function')
expect(typeof tokenizer.setDecoder).toBe('function')
expect(typeof tokenizer.setModel).toBe('function')
expect(typeof tokenizer.setNormalizer).toBe('function')
expect(typeof tokenizer.setPadding).toBe('function')
expect(typeof tokenizer.setPostProcessor).toBe('function')
expect(typeof tokenizer.setPreTokenizer).toBe('function')
expect(typeof tokenizer.setTruncation).toBe('function')
expect(typeof tokenizer.tokenToId).toBe('function')
expect(typeof tokenizer.toString).toBe('function')
expect(typeof tokenizer.train).toBe('function')
})
// it('can be instantiated from the hub', async () => {
// let tokenizer: Tokenizer
// let output: Encoding
// tokenizer = Tokenizer.fromPretrained('bert-base-cased')
// output = await tokenizer.encode('Hey there dear friend!', null, { addSpecialTokens: false })
// expect(output.getTokens()).toEqual(['Hey', 'there', 'dear', 'friend', '!'])
// tokenizer = Tokenizer.fromPretrained('anthony/tokenizers-test')
// output = await tokenizer.encode('Hey there dear friend!', null, { addSpecialTokens: false })
// expect(output.getTokens()).toEqual(['hey', 'there', 'dear', 'friend', '!'])
// tokenizer = Tokenizer.fromPretrained('anthony/tokenizers-test', {
// revision: 'gpt-2',
// })
// output = await tokenizer.encode('Hey there dear friend!', null, { addSpecialTokens: false })
// expect(output.getTokens()).toEqual(['Hey', 'Ġthere', 'Ġdear', 'Ġfriend', '!'])
// }, 10000)
describe('addTokens', () => {
it('accepts a list of string as new tokens when initial model is empty', () => {
const model = BPE.empty()
const tokenizer = new Tokenizer(model)
const nbAdd = tokenizer.addTokens(['my', 'name', 'is', 'john', 'pair'])
expect(nbAdd).toBe(5)
})
it('accepts a list of AddedToken as new tokens when initial model is empty', () => {
const model = BPE.empty()
const tokenizer = new Tokenizer(model)
const addedToken = new AddedToken('test', false)
const nbAdd = tokenizer.addAddedTokens([addedToken])
expect(nbAdd).toBe(1)
})
})
describe('encode', () => {
let tokenizer: Tokenizer
beforeEach(() => {
// Clear all instances and calls to constructor and all methods:
// TokenizerMock.mockClear();
const model = BPE.empty()
tokenizer = new Tokenizer(model)
tokenizer.addTokens(['my', 'name', 'is', 'john', 'pair'])
})
it('accepts a pair of strings as parameters', async () => {
const encoding = await tokenizer.encode('my name is john', 'pair')
expect(encoding).toBeDefined()
})
it('accepts a string with a null pair', async () => {
const encoding = await tokenizer.encode('my name is john', null)
expect(encoding).toBeDefined()
})
// TODO
// it("throws if we try to encode a pre-tokenized string without isPretokenized=true", async () => {
// await expect((encode as any)(["my", "name", "is", "john"], null)).rejects.toThrow(
// "encode with isPreTokenized=false expect string"
// );
// });
// it("accepts a pre-tokenized string as parameter", async () => {
// const encoding = await tokenizer.encode(["my", "name", "is", "john"], undefined, {
// isPretokenized: true,
// });
// expect(encoding).toBeDefined();
// });
// it("throws if we try to encodeBatch pre-tokenized strings without isPretokenized=true", async () => {
// await expect((encodeBatch as any)([["my", "name", "is", "john"]])).rejects.toThrow(
// "encodeBatch with isPretokenized=false expects input to be `EncodeInput[]` " +
// "with `EncodeInput = string | [string, string]`"
// );
// });
// it("accepts a pre-tokenized input in encodeBatch", async () => {
// const encoding = await tokenizer.encodeBatch([["my", "name", "is", "john"]], {
// isPretokenized: true,
// });
// expect(encoding).toBeDefined();
// });
it('Encodes correctly if called with only one argument', async () => {
const encoded = await tokenizer.encode('my name is john')
expect(encoded.getIds()).toEqual([0, 1, 2, 3])
})
it('returns an Encoding', async () => {
const encoding = await tokenizer.encode('my name is john', 'pair')
expect(encoding.getAttentionMask()).toEqual([1, 1, 1, 1, 1])
const ids = encoding.getIds()
expect(Array.isArray(ids)).toBe(true)
expect(ids).toHaveLength(5)
for (const id of ids) {
expect(typeof id).toBe('number')
}
expect(encoding.getOffsets()).toEqual([
[0, 2],
[3, 7],
[8, 10],
[11, 15],
[0, 4],
])
expect(encoding.getOverflowing()).toEqual([])
expect(encoding.getSpecialTokensMask()).toEqual([0, 0, 0, 0, 0])
expect(encoding.getTokens()).toEqual(['my', 'name', 'is', 'john', 'pair'])
expect(encoding.getTypeIds()).toEqual([0, 0, 0, 0, 1])
})
describe('when truncation is enabled', () => {
it('truncates with default if no truncation options provided', async () => {
tokenizer.setTruncation(2)
const singleEncoding = await tokenizer.encode('my name is john', null)
expect(singleEncoding.getTokens()).toEqual(['my', 'name'])
const pairEncoding = await tokenizer.encode('my name is john', 'pair')
expect(pairEncoding.getTokens()).toEqual(['my', 'pair'])
})
it('throws an error with strategy `only_second` and no pair is encoded', async () => {
tokenizer.setTruncation(2, { strategy: TruncationStrategy.OnlySecond })
await expect(tokenizer.encode('my name is john', null)).rejects.toThrow(
'Truncation error: Second sequence not provided',
)
})
})
describe('when padding is enabled', () => {
it('does not pad anything with default options', async () => {
tokenizer.setPadding()
const singleEncoding = await tokenizer.encode('my name', null)
expect(singleEncoding.getTokens()).toEqual(['my', 'name'])
const pairEncoding = await tokenizer.encode('my name', 'pair')
expect(pairEncoding.getTokens()).toEqual(['my', 'name', 'pair'])
})
it('pads to the right by default', async () => {
tokenizer.setPadding({ maxLength: 5 })
const singleEncoding = await tokenizer.encode('my name', null)
expect(singleEncoding.getTokens()).toEqual(['my', 'name', '[PAD]', '[PAD]', '[PAD]'])
const pairEncoding = await tokenizer.encode('my name', 'pair')
expect(pairEncoding.getTokens()).toEqual(['my', 'name', 'pair', '[PAD]', '[PAD]'])
})
it('pads to multiple of the given value', async () => {
tokenizer.setPadding({ padToMultipleOf: 8 })
const singleEncoding = await tokenizer.encode('my name', null)
expect(singleEncoding.getTokens()).toHaveLength(8)
const pairEncoding = await tokenizer.encode('my name', 'pair')
expect(pairEncoding.getTokens()).toHaveLength(8)
})
})
})
describe('decode', () => {
let tokenizer: Tokenizer
beforeEach(() => {
const model = BPE.empty()
tokenizer = new Tokenizer(model)
tokenizer.addTokens(['my', 'name', 'is', 'john', 'pair'])
})
it('has its callback called with the decoded string', async () => {
const decode = tokenizer.decode.bind(tokenizer)
expect(await decode([0, 1, 2, 3], true)).toEqual('my name is john')
})
})
describe('decodeBatch', () => {
let tokenizer: Tokenizer
beforeEach(() => {
const model = BPE.empty()
tokenizer = new Tokenizer(model)
tokenizer.addTokens(['my', 'name', 'is', 'john', 'pair'])
})
it('has its callback called with the decoded string', async () => {
const decodeBatch = tokenizer.decodeBatch.bind(tokenizer)
expect(await decodeBatch([[0, 1, 2, 3], [4]], true)).toEqual(['my name is john', 'pair'])
})
})
describe('getVocab', () => {
it('accepts `undefined` as parameter', () => {
const model = BPE.empty()
const tokenizer = new Tokenizer(model)
expect(tokenizer.getVocab(undefined)).toBeDefined()
})
it('returns the vocabulary', () => {
const model = BPE.empty()
const tokenizer = new Tokenizer(model)
tokenizer.addTokens(['my', 'name', 'is', 'john'])
expect(tokenizer.getVocab(true)).toEqual({
my: 0,
name: 1,
is: 2,
john: 3,
})
})
})
describe('getVocabSize', () => {
it('accepts `undefined` as parameter', () => {
const model = BPE.empty()
const tokenizer = new Tokenizer(model)
expect(tokenizer.getVocabSize(undefined)).toBeDefined()
})
})
describe('setTruncation', () => {
it('returns the full truncation configuration', () => {
const model = BPE.empty()
const tokenizer = new Tokenizer(model)
tokenizer.setTruncation(2)
// TODO Return type is weird
// const expectedConfig: TruncationOptions = {
// maxLength: 2,
// strategy: TruncationStrategy.LongestFirst,
// stride: 0,
// direction: TruncationDirection.Right,
// };
// expect(truncation).toEqual(expectedConfig);
})
})
describe('setPadding', () => {
it('returns the full padding params', () => {
const model = BPE.empty()
const tokenizer = new Tokenizer(model)
tokenizer.setPadding()
// TODO Return type is weird
// const expectedConfig: PaddingOptions = {
// direction: PaddingDirection.Right,
// padId: 0,
// padToken: "[PAD]",
// padTypeId: 0,
// };
// expect(padding).toEqual(expectedConfig);
})
})
describe('postProcess', () => {
let tokenizer: Tokenizer
let firstEncoding: Encoding
let secondEncoding: Encoding
beforeAll(() => {
const model = BPE.empty()
tokenizer = new Tokenizer(model)
tokenizer.addTokens(['my', 'name', 'is', 'john', 'pair'])
})
beforeEach(async () => {
firstEncoding = await tokenizer.encode('my name is john', null)
secondEncoding = await tokenizer.encode('pair', null)
tokenizer.setTruncation(2)
tokenizer.setPadding({ maxLength: 5 })
})
it('returns correctly with a single Encoding param', () => {
const encoding = tokenizer.postProcess(firstEncoding)
expect(encoding.getTokens()).toEqual(['my', 'name', '[PAD]', '[PAD]', '[PAD]'])
})
it('returns correctly with `undefined` as second and third parameters', () => {
const encoding = tokenizer.postProcess(firstEncoding, undefined, undefined)
expect(encoding.getTokens()).toEqual(['my', 'name', '[PAD]', '[PAD]', '[PAD]'])
})
it('returns correctly with 2 encodings', () => {
const encoding = tokenizer.postProcess(firstEncoding, secondEncoding)
expect(encoding.getTokens()).toEqual(['my', 'pair', '[PAD]', '[PAD]', '[PAD]'])
})
})
})
| tokenizers/bindings/node/lib/bindings/tokenizer.test.ts/0 | {
"file_path": "tokenizers/bindings/node/lib/bindings/tokenizer.test.ts",
"repo_id": "tokenizers",
"token_count": 5268
} |
use crate::tokenizer::PaddingOptions;
use napi::bindgen_prelude::*;
use napi_derive::napi;
use tokenizers::utils::truncation::TruncationDirection;
use tokenizers::Encoding;
#[napi(js_name = "Encoding")]
#[derive(Clone, Default)]
pub struct JsEncoding {
pub(crate) encoding: Option<Encoding>,
}
impl From<Encoding> for JsEncoding {
fn from(value: Encoding) -> Self {
Self {
encoding: Some(value),
}
}
}
impl TryFrom<JsEncoding> for Encoding {
type Error = Error;
fn try_from(value: JsEncoding) -> Result<Self> {
value
.encoding
.ok_or(Error::from_reason("Uninitialized encoding".to_string()))
}
}
#[napi(string_enum, js_name = "TruncationDirection")]
pub enum JsTruncationDirection {
Left,
Right,
}
impl From<JsTruncationDirection> for TruncationDirection {
fn from(value: JsTruncationDirection) -> Self {
match value {
JsTruncationDirection::Left => TruncationDirection::Left,
JsTruncationDirection::Right => TruncationDirection::Right,
}
}
}
impl TryFrom<String> for JsTruncationDirection {
type Error = Error;
fn try_from(value: String) -> Result<JsTruncationDirection> {
match value.as_str() {
"left" => Ok(JsTruncationDirection::Left),
"right" => Ok(JsTruncationDirection::Right),
s => Err(Error::from_reason(format!(
"{s:?} is not a valid direction"
))),
}
}
}
#[napi(string_enum, js_name = "TruncationStrategy")]
pub enum JsTruncationStrategy {
LongestFirst,
OnlyFirst,
OnlySecond,
}
impl From<JsTruncationStrategy> for tokenizers::TruncationStrategy {
fn from(value: JsTruncationStrategy) -> Self {
match value {
JsTruncationStrategy::LongestFirst => tokenizers::TruncationStrategy::LongestFirst,
JsTruncationStrategy::OnlyFirst => tokenizers::TruncationStrategy::OnlyFirst,
JsTruncationStrategy::OnlySecond => tokenizers::TruncationStrategy::OnlySecond,
}
}
}
#[napi]
impl JsEncoding {
#[napi(constructor)]
pub fn new() -> Self {
Self { encoding: None }
}
#[napi]
pub fn get_length(&self) -> u32 {
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.get_ids()
.len() as u32
}
#[napi]
pub fn get_n_sequences(&self) -> u32 {
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.n_sequences() as u32
}
#[napi]
pub fn get_ids(&self) -> Vec<u32> {
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.get_ids()
.to_vec()
}
#[napi]
pub fn get_type_ids(&self) -> Vec<u32> {
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.get_type_ids()
.to_vec()
}
#[napi]
pub fn get_attention_mask(&self) -> Vec<u32> {
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.get_attention_mask()
.to_vec()
}
#[napi]
pub fn get_special_tokens_mask(&self) -> Vec<u32> {
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.get_special_tokens_mask()
.to_vec()
}
#[napi]
pub fn get_tokens(&self) -> Vec<String> {
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.get_tokens()
.to_vec()
}
#[napi]
pub fn get_offsets(&self) -> Vec<Vec<u32>> {
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.get_offsets()
.iter()
.map(|(a, b)| vec![*a as u32, *b as u32])
.collect()
}
#[napi]
pub fn get_word_ids(&self) -> Vec<Option<u32>> {
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.get_word_ids()
.to_vec()
}
#[napi]
pub fn char_to_token(&self, pos: u32, seq_id: Option<u32>) -> Option<u32> {
let seq_id = seq_id.unwrap_or(0);
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.char_to_token(pos as usize, seq_id as usize)
.map(|i| i as u32)
}
#[napi]
pub fn char_to_word(&self, pos: u32, seq_id: Option<u32>) -> Option<u32> {
let seq_id = seq_id.unwrap_or(0);
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.char_to_word(pos as usize, seq_id as usize)
}
#[napi]
pub fn pad(&mut self, length: u32, options: Option<PaddingOptions>) -> Result<()> {
let params: tokenizers::PaddingParams = options.unwrap_or_default().try_into()?;
self.encoding.as_mut().expect("Uninitialized Encoding").pad(
length as usize,
params.pad_id,
params.pad_type_id,
¶ms.pad_token,
params.direction,
);
Ok(())
}
#[napi]
pub fn truncate(
&mut self,
length: u32,
stride: Option<u32>,
direction: Option<Either<String, JsTruncationDirection>>,
) -> Result<()> {
let stride = stride.unwrap_or_default();
let direction = match direction {
None => TruncationDirection::Left,
Some(Either::A(s)) => match s.as_str() {
"left" => TruncationDirection::Left,
"right" => TruncationDirection::Right,
d => {
return Err(Error::from_reason(format!(
"{d} is not a valid truncation direction"
)));
}
},
Some(Either::B(t)) => t.into(),
};
self
.encoding
.as_mut()
.expect("Uninitialized Encoding")
.truncate(length as usize, stride as usize, direction);
Ok(())
}
#[napi(ts_return_type = "[number, number] | null | undefined")]
pub fn word_to_tokens(&self, env: Env, word: u32, seq_id: Option<u32>) -> Result<Option<Array>> {
let seq_id = seq_id.unwrap_or(0);
if let Some((a, b)) = self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.word_to_tokens(word, seq_id as usize)
{
let mut arr = env.create_array(2)?;
arr.set(0, env.create_uint32(a as u32)?)?;
arr.set(1, env.create_uint32(b as u32)?)?;
Ok(Some(arr))
} else {
Ok(None)
}
}
#[napi(ts_return_type = "[number, number] | null | undefined")]
pub fn word_to_chars(&self, env: Env, word: u32, seq_id: Option<u32>) -> Result<Option<Array>> {
let seq_id = seq_id.unwrap_or(0);
if let Some((a, b)) = self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.word_to_chars(word, seq_id as usize)
{
let mut arr = env.create_array(2)?;
arr.set(0, env.create_uint32(a as u32)?)?;
arr.set(1, env.create_uint32(b as u32)?)?;
Ok(Some(arr))
} else {
Ok(None)
}
}
#[napi(ts_return_type = "[number, [number, number]] | null | undefined")]
pub fn token_to_chars(&self, env: Env, token: u32) -> Result<Option<Array>> {
if let Some((_, (start, stop))) = self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.token_to_chars(token as usize)
{
let mut offsets = env.create_array(2)?;
offsets.set(0, env.create_uint32(start as u32)?)?;
offsets.set(1, env.create_uint32(stop as u32)?)?;
Ok(Some(offsets))
} else {
Ok(None)
}
}
#[napi]
pub fn token_to_word(&self, token: u32) -> Result<Option<u32>> {
if let Some((_, index)) = self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.token_to_word(token as usize)
{
Ok(Some(index))
} else {
Ok(None)
}
}
#[napi]
pub fn get_overflowing(&self) -> Vec<JsEncoding> {
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.get_overflowing()
.clone()
.into_iter()
.map(|enc| JsEncoding {
encoding: Some(enc),
})
.collect()
}
#[napi]
pub fn get_sequence_ids(&self) -> Vec<Option<u32>> {
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.get_sequence_ids()
.into_iter()
.map(|s| s.map(|id| id as u32))
.collect()
}
#[napi]
pub fn token_to_sequence(&self, token: u32) -> Option<u32> {
self
.encoding
.as_ref()
.expect("Uninitialized Encoding")
.token_to_sequence(token as usize)
.map(|s| s as u32)
}
}
| tokenizers/bindings/node/src/encoding.rs/0 | {
"file_path": "tokenizers/bindings/node/src/encoding.rs",
"repo_id": "tokenizers",
"token_count": 3778
} |
from .. import decoders
Decoder = decoders.Decoder
ByteLevel = decoders.ByteLevel
Replace = decoders.Replace
WordPiece = decoders.WordPiece
ByteFallback = decoders.ByteFallback
Fuse = decoders.Fuse
Strip = decoders.Strip
Metaspace = decoders.Metaspace
BPEDecoder = decoders.BPEDecoder
CTC = decoders.CTC
Sequence = decoders.Sequence
DecodeStream = decoders.DecodeStream
| tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.py",
"repo_id": "tokenizers",
"token_count": 140
} |
# Generated content DO NOT EDIT
class PostProcessor:
"""
Base class for all post-processors
This class is not supposed to be instantiated directly. Instead, any implementation of
a PostProcessor will return an instance of this class when instantiated.
"""
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class BertProcessing(PostProcessor):
"""
This post-processor takes care of adding the special tokens needed by
a Bert model:
- a SEP token
- a CLS token
Args:
sep (:obj:`Tuple[str, int]`):
A tuple with the string representation of the SEP token, and its id
cls (:obj:`Tuple[str, int]`):
A tuple with the string representation of the CLS token, and its id
"""
def __init__(self, sep, cls):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class ByteLevel(PostProcessor):
"""
This post-processor takes care of trimming the offsets.
By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
want the offsets to include these whitespaces, then this PostProcessor must be used.
Args:
trim_offsets (:obj:`bool`):
Whether to trim the whitespaces from the produced offsets.
"""
def __init__(self, trim_offsets=True):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class RobertaProcessing(PostProcessor):
"""
This post-processor takes care of adding the special tokens needed by
a Roberta model:
- a SEP token
- a CLS token
It also takes care of trimming the offsets.
By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
want the offsets to include these whitespaces, then this PostProcessor should be initialized
with :obj:`trim_offsets=True`
Args:
sep (:obj:`Tuple[str, int]`):
A tuple with the string representation of the SEP token, and its id
cls (:obj:`Tuple[str, int]`):
A tuple with the string representation of the CLS token, and its id
trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to trim the whitespaces from the produced offsets.
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether the add_prefix_space option was enabled during pre-tokenization. This
is relevant because it defines the way the offsets are trimmed out.
"""
def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class Sequence(PostProcessor):
"""
Sequence Processor
Args:
processors (:obj:`List[PostProcessor]`)
The processors that need to be chained
"""
def __init__(self, processors):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class TemplateProcessing(PostProcessor):
"""
Provides a way to specify templates in order to add the special tokens to each
input sequence as relevant.
Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to
delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first
sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair
sequences. The final result looks like this:
- Single sequence: :obj:`[CLS] Hello there [SEP]`
- Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]`
With the type ids as following::
[CLS] ... [SEP] ... [SEP]
0 0 0 1 1
You can achieve such behavior using a TemplateProcessing::
TemplateProcessing(
single="[CLS] $0 [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
)
In this example, each input sequence is identified using a ``$`` construct. This identifier
lets us specify each input sequence, and the type_id to use. When nothing is specified,
it uses the default values. Here are the different ways to specify it:
- Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B``
- Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ...
- Specifying both: ``$A:0``, ``$B:1``, ...
The same construct is used for special tokens: ``<identifier>(:<type_id>)?``.
**Warning**: You must ensure that you are giving the correct tokens/ids as these
will be added to the Encoding without any further check. If the given ids correspond
to something totally different in a `Tokenizer` using this `PostProcessor`, it
might lead to unexpected results.
Args:
single (:obj:`Template`):
The template used for single sequences
pair (:obj:`Template`):
The template used when both sequences are specified
special_tokens (:obj:`Tokens`):
The list of special tokens used in each sequences
Types:
Template (:obj:`str` or :obj:`List`):
- If a :obj:`str` is provided, the whitespace is used as delimiter between tokens
- If a :obj:`List[str]` is provided, a list of tokens
Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`):
- A :obj:`Tuple` with both a token and its associated ID, in any order
- A :obj:`dict` with the following keys:
- "id": :obj:`str` => The special token id, as specified in the Template
- "ids": :obj:`List[int]` => The associated IDs
- "tokens": :obj:`List[str]` => The associated tokens
The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have
the same length.
"""
def __init__(self, single, pair, special_tokens):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
| tokenizers/bindings/python/py_src/tokenizers/processors/__init__.pyi/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/processors/__init__.pyi",
"repo_id": "tokenizers",
"token_count": 4779
} |
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::{Arc, RwLock};
use crate::token::PyToken;
use crate::trainers::PyTrainer;
use pyo3::exceptions;
use pyo3::prelude::*;
use pyo3::types::*;
use serde::{Deserialize, Serialize};
use tk::models::bpe::{BpeBuilder, Merges, Vocab, BPE};
use tk::models::unigram::Unigram;
use tk::models::wordlevel::WordLevel;
use tk::models::wordpiece::{WordPiece, WordPieceBuilder};
use tk::models::ModelWrapper;
use tk::{Model, Token};
use tokenizers as tk;
use super::error::{deprecation_warning, ToPyResult};
/// Base class for all models
///
/// The model represents the actual tokenization algorithm. This is the part that
/// will contain and manage the learned vocabulary.
///
/// This class cannot be constructed directly. Please use one of the concrete models.
#[pyclass(module = "tokenizers.models", name = "Model", subclass)]
#[derive(Clone, Serialize, Deserialize)]
#[serde(transparent)]
pub struct PyModel {
pub model: Arc<RwLock<ModelWrapper>>,
}
impl PyModel {
pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> {
let base = self.clone();
Ok(match *self.model.as_ref().read().unwrap() {
ModelWrapper::BPE(_) => Py::new(py, (PyBPE {}, base))?
.into_pyobject(py)?
.into_any()
.into(),
ModelWrapper::WordPiece(_) => Py::new(py, (PyWordPiece {}, base))?
.into_pyobject(py)?
.into_any()
.into(),
ModelWrapper::WordLevel(_) => Py::new(py, (PyWordLevel {}, base))?
.into_pyobject(py)?
.into_any()
.into(),
ModelWrapper::Unigram(_) => Py::new(py, (PyUnigram {}, base))?
.into_pyobject(py)?
.into_any()
.into(),
})
}
}
impl Model for PyModel {
type Trainer = PyTrainer;
fn tokenize(&self, tokens: &str) -> tk::Result<Vec<Token>> {
self.model.read().unwrap().tokenize(tokens)
}
fn token_to_id(&self, token: &str) -> Option<u32> {
self.model.read().unwrap().token_to_id(token)
}
fn id_to_token(&self, id: u32) -> Option<String> {
self.model.read().unwrap().id_to_token(id)
}
fn get_vocab(&self) -> HashMap<String, u32> {
self.model.read().unwrap().get_vocab()
}
fn get_vocab_size(&self) -> usize {
self.model.read().unwrap().get_vocab_size()
}
fn save(&self, folder: &Path, name: Option<&str>) -> tk::Result<Vec<PathBuf>> {
self.model.read().unwrap().save(folder, name)
}
fn get_trainer(&self) -> Self::Trainer {
self.model.read().unwrap().get_trainer().into()
}
}
impl<I> From<I> for PyModel
where
I: Into<ModelWrapper>,
{
fn from(model: I) -> Self {
Self {
model: Arc::new(RwLock::new(model.into())),
}
}
}
#[pymethods]
impl PyModel {
#[new]
#[pyo3(text_signature = None)]
fn __new__() -> Self {
// Instantiate a default empty model. This doesn't really make sense, but we need
// to be able to instantiate an empty model for pickle capabilities.
PyModel {
model: Arc::new(RwLock::new(BPE::default().into())),
}
}
fn __getstate__(&self, py: Python) -> PyResult<PyObject> {
let data = serde_json::to_string(&self.model).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to pickle Model: {}",
e
))
})?;
Ok(PyBytes::new(py, data.as_bytes()).into())
}
fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> {
match state.extract::<&[u8]>(py) {
Ok(s) => {
self.model = serde_json::from_slice(s).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to unpickle Model: {}",
e
))
})?;
Ok(())
}
Err(e) => Err(e),
}
}
/// Tokenize a sequence
///
/// Args:
/// sequence (:obj:`str`):
/// A sequence to tokenize
///
/// Returns:
/// A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
#[pyo3(text_signature = "(self, sequence)")]
fn tokenize(&self, sequence: &str) -> PyResult<Vec<PyToken>> {
Ok(ToPyResult(self.model.read().unwrap().tokenize(sequence))
.into_py()?
.into_iter()
.map(|t| t.into())
.collect())
}
/// Get the ID associated to a token
///
/// Args:
/// token (:obj:`str`):
/// A token to convert to an ID
///
/// Returns:
/// :obj:`int`: The ID associated to the token
#[pyo3(text_signature = "(self, tokens)")]
fn token_to_id(&self, token: &str) -> Option<u32> {
self.model.read().unwrap().token_to_id(token)
}
/// Get the token associated to an ID
///
/// Args:
/// id (:obj:`int`):
/// An ID to convert to a token
///
/// Returns:
/// :obj:`str`: The token associated to the ID
#[pyo3(text_signature = "(self, id)")]
fn id_to_token(&self, id: u32) -> Option<String> {
self.model.read().unwrap().id_to_token(id)
}
/// Save the current model
///
/// Save the current model in the given folder, using the given prefix for the various
/// files that will get created.
/// Any file with the same name that already exists in this folder will be overwritten.
///
/// Args:
/// folder (:obj:`str`):
/// The path to the target folder in which to save the various files
///
/// prefix (:obj:`str`, `optional`):
/// An optional prefix, used to prefix each file name
///
/// Returns:
/// :obj:`List[str]`: The list of saved files
#[pyo3(signature = (folder, prefix=None, name=None), text_signature = "(self, folder, prefix)")]
fn save<'a>(
&self,
py: Python<'_>,
folder: &str,
mut prefix: Option<&'a str>,
name: Option<&'a str>,
) -> PyResult<Vec<String>> {
if name.is_some() {
deprecation_warning(
py,
"0.10.0",
"Parameter `name` of Model.save has been renamed `prefix`",
)?;
if prefix.is_none() {
prefix = name;
}
}
let saved: PyResult<Vec<_>> =
ToPyResult(self.model.read().unwrap().save(Path::new(folder), prefix)).into();
Ok(saved?
.into_iter()
.map(|path| path.to_string_lossy().into_owned())
.collect())
}
/// Get the associated :class:`~tokenizers.trainers.Trainer`
///
/// Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
/// :class:`~tokenizers.models.Model`.
///
/// Returns:
/// :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
#[pyo3(text_signature = "(self)")]
fn get_trainer(&self, py: Python<'_>) -> PyResult<PyObject> {
PyTrainer::from(self.model.read().unwrap().get_trainer()).get_as_subtype(py)
}
fn __repr__(&self) -> PyResult<String> {
crate::utils::serde_pyo3::repr(self)
.map_err(|e| exceptions::PyException::new_err(e.to_string()))
}
fn __str__(&self) -> PyResult<String> {
crate::utils::serde_pyo3::to_string(self)
.map_err(|e| exceptions::PyException::new_err(e.to_string()))
}
}
/// An implementation of the BPE (Byte-Pair Encoding) algorithm
///
/// Args:
/// vocab (:obj:`Dict[str, int]`, `optional`):
/// A dictionary of string keys and their ids :obj:`{"am": 0,...}`
///
/// merges (:obj:`List[Tuple[str, str]]`, `optional`):
/// A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]`
///
/// cache_capacity (:obj:`int`, `optional`):
/// The number of words that the BPE cache can contain. The cache allows
/// to speed-up the process by keeping the result of the merge operations
/// for a number of words.
///
/// dropout (:obj:`float`, `optional`):
/// A float between 0 and 1 that represents the BPE dropout to use.
///
/// unk_token (:obj:`str`, `optional`):
/// The unknown token to be used by the model.
///
/// continuing_subword_prefix (:obj:`str`, `optional`):
/// The prefix to attach to subword units that don't represent a beginning of word.
///
/// end_of_word_suffix (:obj:`str`, `optional`):
/// The suffix to attach to subword units that represent an end of word.
///
/// fuse_unk (:obj:`bool`, `optional`):
/// Whether to fuse any subsequent unknown tokens into a single one
///
/// byte_fallback (:obj:`bool`, `optional`):
/// Whether to use spm byte-fallback trick (defaults to False)
///
/// ignore_merges (:obj:`bool`, `optional`):
/// Whether or not to match tokens with the vocab before using merges.
#[pyclass(extends=PyModel, module = "tokenizers.models", name = "BPE")]
pub struct PyBPE {}
impl PyBPE {
fn with_builder(
mut builder: BpeBuilder,
kwargs: Option<&Bound<'_, PyDict>>,
) -> PyResult<(Self, PyModel)> {
if let Some(kwargs) = kwargs {
for (key, value) in kwargs {
let key: String = key.extract()?;
match key.as_ref() {
"cache_capacity" => builder = builder.cache_capacity(value.extract()?),
"dropout" => {
if let Some(dropout) = value.extract()? {
builder = builder.dropout(dropout);
}
}
"unk_token" => {
if let Some(unk) = value.extract()? {
builder = builder.unk_token(unk);
}
}
"continuing_subword_prefix" => {
builder = builder.continuing_subword_prefix(value.extract()?)
}
"end_of_word_suffix" => builder = builder.end_of_word_suffix(value.extract()?),
"fuse_unk" => builder = builder.fuse_unk(value.extract()?),
"byte_fallback" => builder = builder.byte_fallback(value.extract()?),
"ignore_merges" => builder = builder.ignore_merges(value.extract()?),
_ => println!("Ignored unknown kwarg option {}", key),
};
}
}
match builder.build() {
Err(e) => Err(exceptions::PyException::new_err(format!(
"Error while initializing BPE: {}",
e
))),
Ok(bpe) => Ok((PyBPE {}, bpe.into())),
}
}
}
macro_rules! getter {
($self: ident, $variant: ident, $($name: tt)+) => {{
let super_ = $self.as_ref();
let model = super_.model.read().unwrap();
if let ModelWrapper::$variant(ref mo) = *model {
mo.$($name)+
} else {
unreachable!()
}
}};
}
macro_rules! setter {
($self: ident, $variant: ident, $name: ident, $value: expr) => {{
let super_ = $self.as_ref();
let mut model = super_.model.write().unwrap();
if let ModelWrapper::$variant(ref mut mo) = *model {
mo.$name = $value;
}
}};
}
#[derive(FromPyObject)]
enum PyVocab {
Vocab(Vocab),
Filename(String),
}
#[derive(FromPyObject)]
enum PyMerges {
Merges(Merges),
Filename(String),
}
#[pymethods]
impl PyBPE {
#[getter]
fn get_dropout(self_: PyRef<Self>) -> Option<f32> {
getter!(self_, BPE, dropout)
}
#[setter]
fn set_dropout(self_: PyRef<Self>, dropout: Option<f32>) {
setter!(self_, BPE, dropout, dropout);
}
#[getter]
fn get_unk_token(self_: PyRef<Self>) -> Option<String> {
getter!(self_, BPE, unk_token.clone())
}
#[setter]
fn set_unk_token(self_: PyRef<Self>, unk_token: Option<String>) {
setter!(self_, BPE, unk_token, unk_token);
}
#[getter]
fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> {
getter!(self_, BPE, continuing_subword_prefix.clone())
}
#[setter]
fn set_continuing_subword_prefix(
self_: PyRef<Self>,
continuing_subword_prefix: Option<String>,
) {
setter!(
self_,
BPE,
continuing_subword_prefix,
continuing_subword_prefix
);
}
#[getter]
fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> {
getter!(self_, BPE, end_of_word_suffix.clone())
}
#[setter]
fn set_end_of_word_suffix(self_: PyRef<Self>, end_of_word_suffix: Option<String>) {
setter!(self_, BPE, end_of_word_suffix, end_of_word_suffix);
}
#[getter]
fn get_fuse_unk(self_: PyRef<Self>) -> bool {
getter!(self_, BPE, fuse_unk)
}
#[setter]
fn set_fuse_unk(self_: PyRef<Self>, fuse_unk: bool) {
setter!(self_, BPE, fuse_unk, fuse_unk);
}
#[getter]
fn get_byte_fallback(self_: PyRef<Self>) -> bool {
getter!(self_, BPE, byte_fallback)
}
#[setter]
fn set_byte_fallback(self_: PyRef<Self>, byte_fallback: bool) {
setter!(self_, BPE, byte_fallback, byte_fallback);
}
#[getter]
fn get_ignore_merges(self_: PyRef<Self>) -> bool {
getter!(self_, BPE, ignore_merges)
}
#[setter]
fn set_ignore_merges(self_: PyRef<Self>, ignore_merges: bool) {
setter!(self_, BPE, ignore_merges, ignore_merges);
}
#[new]
#[pyo3(
signature = (vocab=None, merges=None, **kwargs),
text_signature = "(self, vocab=None, merges=None, cache_capacity=None, dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=None, byte_fallback=False, ignore_merges=False)")]
fn new(
py: Python<'_>,
vocab: Option<PyVocab>,
merges: Option<PyMerges>,
kwargs: Option<&Bound<'_, PyDict>>,
) -> PyResult<(Self, PyModel)> {
if (vocab.is_some() && merges.is_none()) || (vocab.is_none() && merges.is_some()) {
return Err(exceptions::PyValueError::new_err(
"`vocab` and `merges` must be both specified",
));
}
let mut builder = BPE::builder();
if let (Some(vocab), Some(merges)) = (vocab, merges) {
match (vocab, merges) {
(PyVocab::Vocab(vocab), PyMerges::Merges(merges)) => {
builder = builder.vocab_and_merges(vocab, merges);
}
(PyVocab::Filename(vocab_filename), PyMerges::Filename(merges_filename)) => {
deprecation_warning(
py,
"0.9.0",
"BPE.__init__ will not create from files anymore, try `BPE.from_file` instead",
)?;
builder =
builder.files(vocab_filename.to_string(), merges_filename.to_string());
}
_ => {
return Err(exceptions::PyValueError::new_err(
"`vocab` and `merges` must be both be from memory or both filenames",
));
}
}
}
PyBPE::with_builder(builder, kwargs)
}
/// Read a :obj:`vocab.json` and a :obj:`merges.txt` files
///
/// This method provides a way to read and parse the content of these files,
/// returning the relevant data structures. If you want to instantiate some BPE models
/// from memory, this method gives you the expected input from the standard files.
///
/// Args:
/// vocab (:obj:`str`):
/// The path to a :obj:`vocab.json` file
///
/// merges (:obj:`str`):
/// The path to a :obj:`merges.txt` file
///
/// Returns:
/// A :obj:`Tuple` with the vocab and the merges:
/// The vocabulary and merges loaded into memory
#[staticmethod]
#[pyo3(text_signature = "(self, vocab, merges)")]
fn read_file(vocab: &str, merges: &str) -> PyResult<(Vocab, Merges)> {
BPE::read_file(vocab, merges).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while reading vocab & merges files: {}",
e
))
})
}
/// Instantiate a BPE model from the given files.
///
/// This method is roughly equivalent to doing::
///
/// vocab, merges = BPE.read_file(vocab_filename, merges_filename)
/// bpe = BPE(vocab, merges)
///
/// If you don't need to keep the :obj:`vocab, merges` values lying around,
/// this method is more optimized than manually calling
/// :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE`
///
/// Args:
/// vocab (:obj:`str`):
/// The path to a :obj:`vocab.json` file
///
/// merges (:obj:`str`):
/// The path to a :obj:`merges.txt` file
///
/// Returns:
/// :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files
#[classmethod]
#[pyo3(signature = (vocab, merges, **kwargs))]
#[pyo3(text_signature = "(cls, vocab, merge, **kwargs)")]
fn from_file(
_cls: &Bound<'_, PyType>,
py: Python,
vocab: &str,
merges: &str,
kwargs: Option<&Bound<'_, PyDict>>,
) -> PyResult<Py<Self>> {
let (vocab, merges) = BPE::read_file(vocab, merges).map_err(|e| {
exceptions::PyException::new_err(format!("Error while reading BPE files: {}", e))
})?;
Py::new(
py,
PyBPE::new(
py,
Some(PyVocab::Vocab(vocab)),
Some(PyMerges::Merges(merges)),
kwargs,
)?,
)
}
/// Clears the internal cache
#[pyo3(signature = ())]
#[pyo3(text_signature = "(self)")]
fn _clear_cache(self_: PyRef<Self>) -> PyResult<()> {
let super_ = self_.as_ref();
let mut model = super_.model.write().map_err(|e| {
exceptions::PyException::new_err(format!("Error while clearing BPE cache: {}", e))
})?;
model.clear_cache();
Ok(())
}
/// Resize the internal cache
#[pyo3(signature = (capacity))]
#[pyo3(text_signature = "(self, capacity)")]
fn _resize_cache(self_: PyRef<Self>, capacity: usize) -> PyResult<()> {
let super_ = self_.as_ref();
let mut model = super_.model.write().map_err(|e| {
exceptions::PyException::new_err(format!("Error while resizing BPE cache: {}", e))
})?;
model.resize_cache(capacity);
Ok(())
}
}
/// An implementation of the WordPiece algorithm
///
/// Args:
/// vocab (:obj:`Dict[str, int]`, `optional`):
/// A dictionary of string keys and their ids :obj:`{"am": 0,...}`
///
/// unk_token (:obj:`str`, `optional`):
/// The unknown token to be used by the model.
///
/// max_input_chars_per_word (:obj:`int`, `optional`):
/// The maximum number of characters to authorize in a single word.
#[pyclass(extends=PyModel, module = "tokenizers.models", name = "WordPiece")]
pub struct PyWordPiece {}
impl PyWordPiece {
fn with_builder(
mut builder: WordPieceBuilder,
kwargs: Option<&Bound<'_, PyDict>>,
) -> PyResult<(Self, PyModel)> {
if let Some(kwargs) = kwargs {
for (key, val) in kwargs {
let key: String = key.extract()?;
match key.as_ref() {
"unk_token" => {
builder = builder.unk_token(val.extract()?);
}
"max_input_chars_per_word" => {
builder = builder.max_input_chars_per_word(val.extract()?);
}
"continuing_subword_prefix" => {
builder = builder.continuing_subword_prefix(val.extract()?);
}
_ => println!("Ignored unknown kwargs option {}", key),
}
}
}
match builder.build() {
Err(e) => Err(exceptions::PyException::new_err(format!(
"Error while initializing WordPiece: {}",
e
))),
Ok(wordpiece) => Ok((PyWordPiece {}, wordpiece.into())),
}
}
}
#[pymethods]
impl PyWordPiece {
#[getter]
fn get_unk_token(self_: PyRef<Self>) -> String {
getter!(self_, WordPiece, unk_token.clone())
}
#[setter]
fn set_unk_token(self_: PyRef<Self>, unk_token: String) {
setter!(self_, WordPiece, unk_token, unk_token);
}
#[getter]
fn get_continuing_subword_prefix(self_: PyRef<Self>) -> String {
getter!(self_, WordPiece, continuing_subword_prefix.clone())
}
#[setter]
fn set_continuing_subword_prefix(self_: PyRef<Self>, continuing_subword_prefix: String) {
setter!(
self_,
WordPiece,
continuing_subword_prefix,
continuing_subword_prefix
);
}
#[getter]
fn get_max_input_chars_per_word(self_: PyRef<Self>) -> usize {
getter!(self_, WordPiece, max_input_chars_per_word)
}
#[setter]
fn set_max_input_chars_per_word(self_: PyRef<Self>, max: usize) {
setter!(self_, WordPiece, max_input_chars_per_word, max);
}
#[new]
#[pyo3(signature = (vocab=None, **kwargs), text_signature = "(self, vocab, unk_token, max_input_chars_per_word)")]
fn new(
py: Python<'_>,
vocab: Option<PyVocab>,
kwargs: Option<&Bound<'_, PyDict>>,
) -> PyResult<(Self, PyModel)> {
let mut builder = WordPiece::builder();
if let Some(vocab) = vocab {
match vocab {
PyVocab::Vocab(vocab) => {
builder = builder.vocab(vocab);
}
PyVocab::Filename(vocab_filename) => {
deprecation_warning(
py,
"0.9.0",
"WordPiece.__init__ will not create from files anymore, try `WordPiece.from_file` instead",
)?;
builder = builder.files(vocab_filename.to_string());
}
}
}
PyWordPiece::with_builder(builder, kwargs)
}
/// Read a :obj:`vocab.txt` file
///
/// This method provides a way to read and parse the content of a standard `vocab.txt`
/// file as used by the WordPiece Model, returning the relevant data structures. If you
/// want to instantiate some WordPiece models from memory, this method gives you the
/// expected input from the standard files.
///
/// Args:
/// vocab (:obj:`str`):
/// The path to a :obj:`vocab.txt` file
///
/// Returns:
/// :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict`
#[staticmethod]
#[pyo3(text_signature = "(vocab)")]
fn read_file(vocab: &str) -> PyResult<Vocab> {
WordPiece::read_file(vocab).map_err(|e| {
exceptions::PyException::new_err(format!("Error while reading WordPiece file: {}", e))
})
}
/// Instantiate a WordPiece model from the given file
///
/// This method is roughly equivalent to doing::
///
/// vocab = WordPiece.read_file(vocab_filename)
/// wordpiece = WordPiece(vocab)
///
/// If you don't need to keep the :obj:`vocab` values lying around, this method is
/// more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to
/// initialize a :class:`~tokenizers.models.WordPiece`
///
/// Args:
/// vocab (:obj:`str`):
/// The path to a :obj:`vocab.txt` file
///
/// Returns:
/// :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file
#[classmethod]
#[pyo3(signature = (vocab, **kwargs))]
#[pyo3(text_signature = "(vocab, **kwargs)")]
fn from_file(
_cls: &Bound<'_, PyType>,
py: Python,
vocab: &str,
kwargs: Option<&Bound<'_, PyDict>>,
) -> PyResult<Py<Self>> {
let vocab = WordPiece::read_file(vocab).map_err(|e| {
exceptions::PyException::new_err(format!("Error while reading WordPiece file: {}", e))
})?;
Py::new(
py,
PyWordPiece::new(py, Some(PyVocab::Vocab(vocab)), kwargs)?,
)
}
}
/// An implementation of the WordLevel algorithm
///
/// Most simple tokenizer model based on mapping tokens to their corresponding id.
///
/// Args:
/// vocab (:obj:`str`, `optional`):
/// A dictionary of string keys and their ids :obj:`{"am": 0,...}`
///
/// unk_token (:obj:`str`, `optional`):
/// The unknown token to be used by the model.
#[pyclass(extends=PyModel, module = "tokenizers.models", name = "WordLevel")]
pub struct PyWordLevel {}
#[pymethods]
impl PyWordLevel {
#[getter]
fn get_unk_token(self_: PyRef<Self>) -> String {
getter!(self_, WordLevel, unk_token.clone())
}
#[setter]
fn set_unk_token(self_: PyRef<Self>, unk_token: String) {
setter!(self_, WordLevel, unk_token, unk_token);
}
#[new]
#[pyo3(signature = (vocab=None, unk_token = None), text_signature = "(self, vocab, unk_token)")]
fn new(
py: Python<'_>,
vocab: Option<PyVocab>,
unk_token: Option<String>,
) -> PyResult<(Self, PyModel)> {
let mut builder = WordLevel::builder();
if let Some(vocab) = vocab {
match vocab {
PyVocab::Vocab(vocab) => {
builder = builder.vocab(vocab);
}
PyVocab::Filename(vocab_filename) => {
deprecation_warning(
py,
"0.9.0",
"WordLevel.__init__ will not create from files anymore, \
try `WordLevel.from_file` instead",
)?;
builder = builder.files(vocab_filename.to_string());
}
};
}
if let Some(unk_token) = unk_token {
builder = builder.unk_token(unk_token);
}
Ok((
PyWordLevel {},
builder
.build()
.map_err(|e| exceptions::PyException::new_err(e.to_string()))?
.into(),
))
}
/// Read a :obj:`vocab.json`
///
/// This method provides a way to read and parse the content of a vocabulary file,
/// returning the relevant data structures. If you want to instantiate some WordLevel models
/// from memory, this method gives you the expected input from the standard files.
///
/// Args:
/// vocab (:obj:`str`):
/// The path to a :obj:`vocab.json` file
///
/// Returns:
/// :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict`
#[staticmethod]
#[pyo3(text_signature = "(vocab)")]
fn read_file(vocab: &str) -> PyResult<Vocab> {
WordLevel::read_file(vocab).map_err(|e| {
exceptions::PyException::new_err(format!("Error while reading WordLevel file: {}", e))
})
}
/// Instantiate a WordLevel model from the given file
///
/// This method is roughly equivalent to doing::
///
/// vocab = WordLevel.read_file(vocab_filename)
/// wordlevel = WordLevel(vocab)
///
/// If you don't need to keep the :obj:`vocab` values lying around, this method is
/// more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to
/// initialize a :class:`~tokenizers.models.WordLevel`
///
/// Args:
/// vocab (:obj:`str`):
/// The path to a :obj:`vocab.json` file
///
/// Returns:
/// :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file
#[classmethod]
#[pyo3(signature = (vocab, unk_token = None))]
#[pyo3(text_signature = "(vocab, unk_token)")]
fn from_file(
_cls: &Bound<'_, PyType>,
py: Python,
vocab: &str,
unk_token: Option<String>,
) -> PyResult<Py<Self>> {
let vocab = WordLevel::read_file(vocab).map_err(|e| {
exceptions::PyException::new_err(format!("Error while reading WordLevel file: {}", e))
})?;
Py::new(
py,
PyWordLevel::new(py, Some(PyVocab::Vocab(vocab)), unk_token)?,
)
}
}
/// An implementation of the Unigram algorithm
///
/// Args:
/// vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`):
/// A list of vocabulary items and their relative score [("am", -0.2442),...]
#[pyclass(extends=PyModel, module = "tokenizers.models", name = "Unigram")]
pub struct PyUnigram {}
#[pymethods]
impl PyUnigram {
#[new]
#[pyo3(signature = (vocab=None, unk_id=None, byte_fallback=None), text_signature = "(self, vocab, unk_id, byte_fallback)")]
fn new(
vocab: Option<Vec<(String, f64)>>,
unk_id: Option<usize>,
byte_fallback: Option<bool>,
) -> PyResult<(Self, PyModel)> {
match (vocab, unk_id, byte_fallback) {
(Some(vocab), unk_id, byte_fallback) => {
let model =
Unigram::from(vocab, unk_id, byte_fallback.unwrap_or(false)).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while loading Unigram: {}",
e
))
})?;
Ok((PyUnigram {}, model.into()))
}
(None, None, _) => Ok((PyUnigram {}, Unigram::default().into())),
_ => Err(exceptions::PyValueError::new_err(
"`vocab` and `unk_id` must be both specified",
)),
}
}
/// Clears the internal cache
#[pyo3(signature = ())]
#[pyo3(text_signature = "(self)")]
fn _clear_cache(self_: PyRef<Self>) -> PyResult<()> {
let super_ = self_.as_ref();
let mut model = super_.model.write().map_err(|e| {
exceptions::PyException::new_err(format!("Error while clearing Unigram cache: {}", e))
})?;
model.clear_cache();
Ok(())
}
/// Resize the internal cache
#[pyo3(signature = (capacity))]
#[pyo3(text_signature = "(self, capacity)")]
fn _resize_cache(self_: PyRef<Self>, capacity: usize) -> PyResult<()> {
let super_ = self_.as_ref();
let mut model = super_.model.write().map_err(|e| {
exceptions::PyException::new_err(format!("Error while resizing Unigram cache: {}", e))
})?;
model.resize_cache(capacity);
Ok(())
}
}
/// Models Module
#[pymodule]
pub fn models(m: &Bound<'_, PyModule>) -> PyResult<()> {
m.add_class::<PyModel>()?;
m.add_class::<PyBPE>()?;
m.add_class::<PyWordPiece>()?;
m.add_class::<PyWordLevel>()?;
m.add_class::<PyUnigram>()?;
Ok(())
}
#[cfg(test)]
mod test {
use crate::models::PyModel;
use pyo3::prelude::*;
use tk::models::bpe::BPE;
use tk::models::ModelWrapper;
#[test]
fn get_subtype() {
Python::with_gil(|py| {
let py_model = PyModel::from(BPE::default());
let py_bpe = py_model.get_as_subtype(py).unwrap();
assert_eq!("BPE", py_bpe.bind(py).get_type().qualname().unwrap());
})
}
#[test]
fn serialize() {
let rs_bpe = BPE::default();
let rs_bpe_ser = serde_json::to_string(&rs_bpe).unwrap();
let rs_wrapper: ModelWrapper = rs_bpe.into();
let rs_wrapper_ser = serde_json::to_string(&rs_wrapper).unwrap();
let py_model = PyModel::from(rs_wrapper);
let py_ser = serde_json::to_string(&py_model).unwrap();
assert_eq!(py_ser, rs_bpe_ser);
assert_eq!(py_ser, rs_wrapper_ser);
let py_model: PyModel = serde_json::from_str(&rs_bpe_ser).unwrap();
match *py_model.model.as_ref().read().unwrap() {
ModelWrapper::BPE(_) => (),
_ => panic!("Expected Bert postprocessor."),
};
let py_model: PyModel = serde_json::from_str(&rs_wrapper_ser).unwrap();
match *py_model.model.as_ref().read().unwrap() {
ModelWrapper::BPE(_) => (),
_ => panic!("Expected Bert postprocessor."),
};
}
}
| tokenizers/bindings/python/src/models.rs/0 | {
"file_path": "tokenizers/bindings/python/src/models.rs",
"repo_id": "tokenizers",
"token_count": 15947
} |
from tokenizers import ByteLevelBPETokenizer
from ..utils import data_dir, multiprocessing_with_parallelism, roberta_files
class TestByteLevelBPE:
def test_basic_encode(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"])
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
assert output.ids == [133, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
assert output.tokens == [
"The",
"Ġquick",
"Ġbrown",
"Ġfox",
"Ġjumps",
"Ġover",
"Ġthe",
"Ġlazy",
"Ġdog",
]
assert output.offsets == [
(0, 3),
(3, 9),
(9, 15),
(15, 19),
(19, 25),
(25, 30),
(30, 34),
(34, 39),
(39, 43),
]
def test_add_prefix_space(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_file(
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True
)
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
assert output.ids == [20, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
assert output.tokens == [
"ĠThe",
"Ġquick",
"Ġbrown",
"Ġfox",
"Ġjumps",
"Ġover",
"Ġthe",
"Ġlazy",
"Ġdog",
]
assert output.offsets == [
(0, 3),
(3, 9),
(9, 15),
(15, 19),
(19, 25),
(25, 30),
(30, 34),
(34, 39),
(39, 43),
]
def test_lowerspace(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_file(
roberta_files["vocab"],
roberta_files["merges"],
add_prefix_space=True,
lowercase=True,
)
output = tokenizer.encode("The Quick Brown Fox Jumps Over The Lazy Dog")
assert output.ids == [5, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
assert output.tokens == [
"Ġthe",
"Ġquick",
"Ġbrown",
"Ġfox",
"Ġjumps",
"Ġover",
"Ġthe",
"Ġlazy",
"Ġdog",
]
def test_multiprocessing_with_parallelism(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"])
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = ByteLevelBPETokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["A", "Ġsentence"]
| tokenizers/bindings/python/tests/implementations/test_byte_level_bpe.py/0 | {
"file_path": "tokenizers/bindings/python/tests/implementations/test_byte_level_bpe.py",
"repo_id": "tokenizers",
"token_count": 1653
} |
# Pre-tokenizers
<tokenizerslangcontent>
<python>
## BertPreTokenizer
[[autodoc]] tokenizers.pre_tokenizers.BertPreTokenizer
## ByteLevel
[[autodoc]] tokenizers.pre_tokenizers.ByteLevel
## CharDelimiterSplit
[[autodoc]] tokenizers.pre_tokenizers.CharDelimiterSplit
## Digits
[[autodoc]] tokenizers.pre_tokenizers.Digits
## Metaspace
[[autodoc]] tokenizers.pre_tokenizers.Metaspace
## PreTokenizer
[[autodoc]] tokenizers.pre_tokenizers.PreTokenizer
## Punctuation
[[autodoc]] tokenizers.pre_tokenizers.Punctuation
## Sequence
[[autodoc]] tokenizers.pre_tokenizers.Sequence
## Split
[[autodoc]] tokenizers.pre_tokenizers.Split
## UnicodeScripts
[[autodoc]] tokenizers.pre_tokenizers.UnicodeScripts
## Whitespace
[[autodoc]] tokenizers.pre_tokenizers.Whitespace
## WhitespaceSplit
[[autodoc]] tokenizers.pre_tokenizers.WhitespaceSplit
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/api/pre-tokenizers.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/api/pre-tokenizers.mdx",
"repo_id": "tokenizers",
"token_count": 371
} |
The tokenization pipeline
====================================================================================================
When calling :entity:`Tokenizer.encode` or :entity:`Tokenizer.encode_batch`, the input text(s) go
through the following pipeline:
- :ref:`normalization`
- :ref:`pre-tokenization`
- :ref:`model`
- :ref:`post-processing`
We'll see in details what happens during each of those steps in detail, as well as when you want to
:ref:`decode <decoding>` some token ids, and how the 🤗 Tokenizers library allows you to customize
each of those steps to your needs. If you're already familiar with those steps and want to learn by
seeing some code, jump to :ref:`our BERT from scratch example <example>`.
For the examples that require a :entity:`Tokenizer`, we will use the tokenizer we trained
in the :doc:`quicktour`, which you can load with:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_reload_tokenizer
:end-before: END pipeline_reload_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 8
.. _normalization:
Normalization
----------------------------------------------------------------------------------------------------
Normalization is, in a nutshell, a set of operations you apply to a raw string to make it less
random or "cleaner". Common operations include stripping whitespace, removing accented characters
or lowercasing all text. If you're familiar with `Unicode normalization
<https://unicode.org/reports/tr15>`__, it is also a very common normalization operation applied
in most tokenizers.
Each normalization operation is represented in the 🤗 Tokenizers library by a
:entity:`Normalizer`, and you can combine several of those by using a
:entity:`normalizers.Sequence`. Here is a normalizer applying NFD Unicode normalization
and removing accents as an example:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START setup_normalizer
:end-before: END setup_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_setup_normalizer
:end-before: END pipeline_setup_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START setup_normalizer
:end-before: END setup_normalizer
:dedent: 8
You can manually test that normalizer by applying it to any string:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START test_normalizer
:end-before: END test_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_test_normalizer
:end-before: END pipeline_test_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START test_normalizer
:end-before: END test_normalizer
:dedent: 8
When building a :entity:`Tokenizer`, you can customize its normalizer by just changing
the corresponding attribute:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START replace_normalizer
:end-before: END replace_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_replace_normalizer
:end-before: END pipeline_replace_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START replace_normalizer
:end-before: END replace_normalizer
:dedent: 8
Of course, if you change the way a tokenizer applies normalization, you should probably retrain it
from scratch afterward.
.. _pre-tokenization:
Pre-Tokenization
----------------------------------------------------------------------------------------------------
Pre-tokenization is the act of splitting a text into smaller objects that give an upper bound to
what your tokens will be at the end of training. A good way to think of this is that the
pre-tokenizer will split your text into "words" and then, your final tokens will be parts of those
words.
An easy way to pre-tokenize inputs is to split on spaces and punctuations, which is done by the
:entity:`pre_tokenizers.Whitespace` pre-tokenizer:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START setup_pre_tokenizer
:end-before: END setup_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_setup_pre_tokenizer
:end-before: END pipeline_setup_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START setup_pre_tokenizer
:end-before: END setup_pre_tokenizer
:dedent: 8
The output is a list of tuples, with each tuple containing one word and its span in the original
sentence (which is used to determine the final :obj:`offsets` of our :entity:`Encoding`).
Note that splitting on punctuation will split contractions like :obj:`"I'm"` in this example.
You can combine together any :entity:`PreTokenizer` together. For
instance, here is a pre-tokenizer that will split on space, punctuation and digits, separating
numbers in their individual digits:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START combine_pre_tokenizer
:end-before: END combine_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_combine_pre_tokenizer
:end-before: END pipeline_combine_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START combine_pre_tokenizer
:end-before: END combine_pre_tokenizer
:dedent: 8
As we saw in the :doc:`quicktour`, you can customize the pre-tokenizer of a
:entity:`Tokenizer` by just changing the corresponding attribute:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START replace_pre_tokenizer
:end-before: END replace_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_replace_pre_tokenizer
:end-before: END pipeline_replace_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START replace_pre_tokenizer
:end-before: END replace_pre_tokenizer
:dedent: 8
Of course, if you change the way the pre-tokenizer, you should probably retrain your tokenizer from
scratch afterward.
.. _model:
The Model
----------------------------------------------------------------------------------------------------
Once the input texts are normalized and pre-tokenized, the :entity:`Tokenizer` applies the model on
the pre-tokens. This is the part of the pipeline that needs training on your corpus (or that has
been trained if you are using a pretrained tokenizer).
The role of the model is to split your "words" into tokens, using the rules it has learned. It's
also responsible for mapping those tokens to their corresponding IDs in the vocabulary of the model.
This model is passed along when initializing the :entity:`Tokenizer` so you already know
how to customize this part. Currently, the 🤗 Tokenizers library supports:
- :entity:`models.BPE`
- :entity:`models.Unigram`
- :entity:`models.WordLevel`
- :entity:`models.WordPiece`
For more details about each model and its behavior, you can check `here <components#models>`__
.. _post-processing:
Post-Processing
----------------------------------------------------------------------------------------------------
Post-processing is the last step of the tokenization pipeline, to perform any additional
transformation to the :entity:`Encoding` before it's returned, like adding potential
special tokens.
As we saw in the quick tour, we can customize the post processor of a :entity:`Tokenizer`
by setting the corresponding attribute. For instance, here is how we can post-process to make the
inputs suitable for the BERT model:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START setup_processor
:end-before: END setup_processor
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_setup_processor
:end-before: END pipeline_setup_processor
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START setup_processor
:end-before: END setup_processor
:dedent: 8
Note that contrarily to the pre-tokenizer or the normalizer, you don't need to retrain a tokenizer
after changing its post-processor.
.. _example:
All together: a BERT tokenizer from scratch
----------------------------------------------------------------------------------------------------
Let's put all those pieces together to build a BERT tokenizer. First, BERT relies on WordPiece, so
we instantiate a new :entity:`Tokenizer` with this model:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_tokenizer
:end-before: END bert_setup_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_tokenizer
:end-before: END bert_setup_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_tokenizer
:end-before: END bert_setup_tokenizer
:dedent: 8
Then we know that BERT preprocesses texts by removing accents and lowercasing. We also use a unicode
normalizer:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_normalizer
:end-before: END bert_setup_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_normalizer
:end-before: END bert_setup_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_normalizer
:end-before: END bert_setup_normalizer
:dedent: 8
The pre-tokenizer is just splitting on whitespace and punctuation:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_pre_tokenizer
:end-before: END bert_setup_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_pre_tokenizer
:end-before: END bert_setup_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_pre_tokenizer
:end-before: END bert_setup_pre_tokenizer
:dedent: 8
And the post-processing uses the template we saw in the previous section:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_processor
:end-before: END bert_setup_processor
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_processor
:end-before: END bert_setup_processor
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_processor
:end-before: END bert_setup_processor
:dedent: 8
We can use this tokenizer and train on it on wikitext like in the :doc:`quicktour`:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_train_tokenizer
:end-before: END bert_train_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_train_tokenizer
:end-before: END bert_train_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_train_tokenizer
:end-before: END bert_train_tokenizer
:dedent: 8
.. _decoding:
Decoding
----------------------------------------------------------------------------------------------------
.. entities:: python
bert_tokenizer
:obj:`bert_tokenizer`
.. entities:: rust
bert_tokenizer
:obj:`bert_tokenizer`
.. entities:: node
bert_tokenizer
:obj:`bertTokenizer`
On top of encoding the input texts, a :entity:`Tokenizer` also has an API for decoding,
that is converting IDs generated by your model back to a text. This is done by the methods
:entity:`Tokenizer.decode` (for one predicted text) and :entity:`Tokenizer.decode_batch` (for a
batch of predictions).
The `decoder` will first convert the IDs back to tokens (using the tokenizer's vocabulary) and
remove all special tokens, then join those tokens with spaces:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START test_decoding
:end-before: END test_decoding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_test_decoding
:end-before: END pipeline_test_decoding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START test_decoding
:end-before: END test_decoding
:dedent: 8
If you used a model that added special characters to represent subtokens of a given "word" (like
the :obj:`"##"` in WordPiece) you will need to customize the `decoder` to treat them properly. If we
take our previous :entity:`bert_tokenizer` for instance the default decoding will give:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_test_decoding
:end-before: END bert_test_decoding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_test_decoding
:end-before: END bert_test_decoding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_test_decoding
:end-before: END bert_test_decoding
:dedent: 8
But by changing it to a proper decoder, we get:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_proper_decoding
:end-before: END bert_proper_decoding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_proper_decoding
:end-before: END bert_proper_decoding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_proper_decoding
:end-before: END bert_proper_decoding
:dedent: 8
| tokenizers/docs/source/pipeline.rst/0 | {
"file_path": "tokenizers/docs/source/pipeline.rst",
"repo_id": "tokenizers",
"token_count": 6322
} |
use tokenizers::models::wordpiece::WordPiece;
use tokenizers::{AddedToken, Tokenizer};
fn main() {
let start = std::time::Instant::now();
let mut tokenizer = Tokenizer::new(WordPiece::default());
// Mix special and not special
// You can make sure ids are in order, and special status is correct.
let tokens: Vec<_> = (0..120_000)
.map(|i| AddedToken::from(format!("[SPECIAL_{i}]"), i % 2 == 0))
.collect();
tokenizer.add_tokens(&tokens);
tokenizer.save("_tok.json", true).unwrap();
println!("Save took {:?}", start.elapsed());
let start = std::time::Instant::now();
let _tok = Tokenizer::from_file("_tok.json").unwrap();
println!("Took {:?}", start.elapsed());
std::fs::remove_file("_tok.json").unwrap();
}
| tokenizers/tokenizers/examples/serialization.rs/0 | {
"file_path": "tokenizers/tokenizers/examples/serialization.rs",
"repo_id": "tokenizers",
"token_count": 299
} |
#![allow(clippy::map_entry)]
use super::{Pair, WithFirstLastIterator, Word, BPE};
use crate::parallelism::*;
use crate::tokenizer::{AddedToken, Result, Trainer};
use crate::utils::progress::{ProgressBar, ProgressStyle};
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
use std::collections::{BinaryHeap, HashMap, HashSet};
#[derive(Debug, Eq)]
struct Merge {
pair: Pair,
count: u64,
pos: HashSet<usize>,
}
impl PartialEq for Merge {
fn eq(&self, other: &Self) -> bool {
self.count == other.count && self.pair == other.pair
}
}
impl PartialOrd for Merge {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Merge {
fn cmp(&self, other: &Self) -> Ordering {
if self.count != other.count {
self.count.cmp(&other.count)
} else {
// Here we want ascending order
other.pair.cmp(&self.pair)
}
}
}
struct Config {
min_frequency: u64,
vocab_size: usize,
show_progress: bool,
special_tokens: Vec<AddedToken>,
limit_alphabet: Option<usize>,
initial_alphabet: HashSet<char>,
continuing_subword_prefix: Option<String>,
end_of_word_suffix: Option<String>,
max_token_length: Option<usize>,
}
/// A `BpeTrainerBuilder` can be used to create a `BpeTrainer` with a custom
/// configuration.
pub struct BpeTrainerBuilder {
config: Config,
}
impl Default for BpeTrainerBuilder {
fn default() -> Self {
Self {
config: Config {
min_frequency: 0,
vocab_size: 30000,
show_progress: true,
special_tokens: vec![],
limit_alphabet: None,
initial_alphabet: HashSet::new(),
continuing_subword_prefix: None,
end_of_word_suffix: None,
max_token_length: None,
},
}
}
}
impl BpeTrainerBuilder {
/// Constructs a new `BpeTrainerBuilder`
pub fn new() -> Self {
Self::default()
}
/// Set the expected minimum frequency
#[must_use]
pub fn min_frequency(mut self, frequency: u64) -> Self {
self.config.min_frequency = frequency;
self
}
/// Set the vocabulary size
#[must_use]
pub fn vocab_size(mut self, size: usize) -> Self {
self.config.vocab_size = size;
self
}
/// Set whether to show progress
#[must_use]
pub fn show_progress(mut self, show: bool) -> Self {
self.config.show_progress = show;
self
}
/// Set the special tokens
#[must_use]
pub fn special_tokens(mut self, tokens: Vec<AddedToken>) -> Self {
self.config.special_tokens = tokens;
self
}
/// Set whether to limit the alphabet
#[must_use]
pub fn limit_alphabet(mut self, limit: usize) -> Self {
self.config.limit_alphabet = Some(limit);
self
}
/// Set the initial alphabet
#[must_use]
pub fn initial_alphabet(mut self, alphabet: HashSet<char>) -> Self {
self.config.initial_alphabet = alphabet;
self
}
/// Set the continuing_subword_prefix
#[must_use]
pub fn continuing_subword_prefix(mut self, prefix: String) -> Self {
self.config.continuing_subword_prefix = Some(prefix);
self
}
/// Set the end_of_word_suffix
#[must_use]
pub fn end_of_word_suffix(mut self, suffix: String) -> Self {
self.config.end_of_word_suffix = Some(suffix);
self
}
/// Set max_token_length
#[must_use]
pub fn max_token_length(mut self, max_token_length: Option<usize>) -> Self {
self.config.max_token_length = max_token_length;
self
}
/// Constructs the final BpeTrainer
pub fn build(self) -> BpeTrainer {
BpeTrainer {
min_frequency: self.config.min_frequency,
vocab_size: self.config.vocab_size,
show_progress: self.config.show_progress,
special_tokens: self.config.special_tokens,
limit_alphabet: self.config.limit_alphabet,
initial_alphabet: self.config.initial_alphabet,
continuing_subword_prefix: self.config.continuing_subword_prefix,
end_of_word_suffix: self.config.end_of_word_suffix,
max_token_length: self.config.max_token_length,
words: HashMap::new(),
}
}
}
/// In charge of training a `BPE` model
///
/// # Examples
///
/// ```
/// use tokenizers::tokenizer::Trainer;
/// use tokenizers::models::bpe::{BPE, BpeTrainer};
///
/// let sequences = vec![ "Hello", "World" ];
///
/// let mut trainer = BpeTrainer::default();
/// trainer.feed(sequences.iter(), |s| Ok(vec![s.to_owned()]));
///
/// let mut model = BPE::default();
/// let special_tokens = trainer.train(&mut model).unwrap();
/// ```
#[non_exhaustive]
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)]
pub struct BpeTrainer {
/// The minimum frequency a pair must have to produce a merge operation
pub min_frequency: u64,
/// The target vocabulary size
pub vocab_size: usize,
/// Whether to show progress while training
pub show_progress: bool,
/// A list of special tokens that the model should know of
pub special_tokens: Vec<AddedToken>,
/// Whether to limit the number of initial tokens that can be kept before computing merges
pub limit_alphabet: Option<usize>,
/// The initial alphabet we want absolutely to include. This allows to cover
/// some characters that are not necessarily in the training set
pub initial_alphabet: HashSet<char>,
/// An optional prefix to use on any subword that exist only behind another one
pub continuing_subword_prefix: Option<String>,
/// An optional suffix to caracterize and end-of-word subword
pub end_of_word_suffix: Option<String>,
/// An optional parameter to limit the max length of any single token
pub max_token_length: Option<usize>,
words: HashMap<String, u64>,
}
impl Default for BpeTrainer {
fn default() -> Self {
Self::builder().build()
}
}
impl BpeTrainer {
pub fn new(min_frequency: u64, vocab_size: usize) -> Self {
Self {
min_frequency,
vocab_size,
..Default::default()
}
}
pub fn builder() -> BpeTrainerBuilder {
BpeTrainerBuilder::new()
}
/// Setup a progress bar if asked to show progress
fn setup_progress(&self) -> Option<ProgressBar> {
if self.show_progress {
let p = ProgressBar::new(0);
p.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {msg:<30!} {wide_bar} {pos:<9!}/{len:>9!}")
.expect("Invalid progress template"),
);
Some(p)
} else {
None
}
}
/// Set the progress bar in the finish state
fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) {
if let Some(p) = p {
p.set_length(final_len as u64);
p.finish();
println!();
}
}
/// Update the progress bar with the new provided length and message
fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &'static str) {
if let Some(p) = p {
p.set_message(message);
p.set_length(len as u64);
p.reset();
}
}
/// Add the provided special tokens to the initial vocabulary
fn add_special_tokens(&self, w2id: &mut HashMap<String, u32>, id2w: &mut Vec<String>) {
for token in &self.special_tokens {
if !w2id.contains_key(&token.content) {
id2w.push(token.content.to_owned());
w2id.insert(token.content.to_owned(), (id2w.len() - 1) as u32);
}
}
}
/// Compute the initial alphabet and limit it if relevant
fn compute_alphabet(
&self,
wc: &HashMap<String, u64>,
w2id: &mut HashMap<String, u32>,
id2w: &mut Vec<String>,
) {
// Compute the alphabet from seen words
let mut alphabet: HashMap<char, usize> = HashMap::new();
for (word, count) in wc {
for c in word.chars() {
alphabet
.entry(c)
.and_modify(|cnt| *cnt += *count as usize)
.or_insert(*count as usize);
}
}
// Also include anything from the provided initial alphabet
for c in &self.initial_alphabet {
alphabet
.entry(*c)
.and_modify(|cnt| *cnt = usize::MAX)
.or_insert(usize::MAX);
}
let mut kept = alphabet.iter().collect::<Vec<_>>();
// Compute the number of chars to remove from the alphabet
// If `limit_alphabet < initial_alphabet.len()`, some of these initial characters
// will be removed
let to_remove = self
.limit_alphabet
.map(|limit| {
if alphabet.len() > limit {
alphabet.len() - limit
} else {
0
}
})
.unwrap_or(0);
// Remove the unwanted chars
if to_remove > 0 {
kept.sort_unstable_by_key(|k| *k.1);
kept.drain(..to_remove);
}
// Keep the initial alphabet (sorted for determinism)
kept.sort_unstable_by_key(|k| (*k.0) as u32);
kept.into_iter().for_each(|(c, _)| {
let s = c.to_string();
if !w2id.contains_key(&s) {
id2w.push(s.clone());
w2id.insert(s, (id2w.len() - 1) as u32);
}
});
}
/// Tokenize words and add subwords to the vocabulary when relevant
fn tokenize_words(
&self,
wc: &HashMap<String, u64>,
w2id: &mut HashMap<String, u32>,
id2w: &mut Vec<String>,
p: &Option<ProgressBar>,
) -> (Vec<Word>, Vec<u64>) {
let mut words: Vec<Word> = Vec::with_capacity(wc.len());
let mut counts: Vec<u64> = Vec::with_capacity(wc.len());
for (word, count) in wc {
let mut current_word = Word::new();
counts.push(*count);
for (is_first, is_last, c) in word.chars().with_first_and_last() {
let mut s = c.to_string();
if w2id.contains_key(&s) {
// Found the initial char in the authorized alphabet
// Add the `continuing_subword_prefix` if relevant
if !is_first {
if let Some(prefix) = &self.continuing_subword_prefix {
s = format!("{prefix}{s}");
}
}
// Add the `end_of_word_suffix` if relevant
if is_last {
if let Some(suffix) = &self.end_of_word_suffix {
s = format!("{s}{suffix}");
}
}
// Insert the new formed string if necessary
if !w2id.contains_key(&s) {
id2w.push(s.clone());
w2id.insert(s.clone(), (id2w.len() - 1) as u32);
}
current_word.add(w2id[&s], 1); // We do not care about the len here
}
}
words.push(current_word);
if let Some(p) = p {
p.inc(1);
}
}
(words, counts)
}
fn count_pairs(
&self,
words: &[Word],
counts: &[u64],
p: &Option<ProgressBar>,
) -> (HashMap<Pair, i32>, HashMap<Pair, HashSet<usize>>) {
words
.maybe_par_iter()
.enumerate()
.map(|(i, word)| {
let mut pair_counts = HashMap::new();
let mut where_to_update: HashMap<Pair, HashSet<usize>> = HashMap::new();
for window in word.get_chars().windows(2) {
let cur_pair: Pair = (window[0], window[1]);
// Initialize pair_counts and where_to_update for this pair if we just saw it
if !pair_counts.contains_key(&cur_pair) {
pair_counts.insert(cur_pair, 0);
}
// Then update counts
let count = counts[i];
where_to_update
.entry(cur_pair)
.and_modify(|h| {
h.insert(i);
})
.or_insert_with(|| {
let mut h = HashSet::new();
h.insert(i);
h
});
*pair_counts.get_mut(&cur_pair).unwrap() += count as i32;
}
if let Some(p) = &p {
p.inc(1);
}
(pair_counts, where_to_update)
})
.reduce(
|| (HashMap::new(), HashMap::new()),
|(mut pair_counts, mut where_to_update), (pc, wtu)| {
for (k, v) in pc {
pair_counts.entry(k).and_modify(|c| *c += v).or_insert(v);
}
for (k, v) in wtu {
where_to_update
.entry(k)
.and_modify(|set| *set = set.union(&v).copied().collect())
.or_insert(v);
}
(pair_counts, where_to_update)
},
)
}
pub fn do_train(
&self,
word_counts: &HashMap<String, u64>,
model: &mut BPE,
) -> Result<Vec<AddedToken>> {
let mut word_to_id: HashMap<String, u32> = HashMap::with_capacity(self.vocab_size);
let mut id_to_word: Vec<String> = Vec::with_capacity(self.vocab_size);
let max_token_length: usize = self.max_token_length.unwrap_or(usize::MAX);
let progress = self.setup_progress();
//
// 1. Add all special tokens to the vocabulary
//
self.add_special_tokens(&mut word_to_id, &mut id_to_word);
//
// 2. Compute the initial alphabet
//
self.compute_alphabet(word_counts, &mut word_to_id, &mut id_to_word);
//
// 3. Tokenize words
//
self.update_progress(&progress, word_counts.len(), "Tokenize words");
let (mut words, counts) =
self.tokenize_words(word_counts, &mut word_to_id, &mut id_to_word, &progress);
self.finalize_progress(&progress, words.len());
//
// 4. Count pairs in words
//
self.update_progress(&progress, words.len(), "Count pairs");
let (mut pair_counts, mut where_to_update) = self.count_pairs(&words, &counts, &progress);
// Insert them in the queue
let mut queue = BinaryHeap::with_capacity(pair_counts.len());
where_to_update.drain().for_each(|(pair, pos)| {
let count = pair_counts[&pair];
if count > 0 {
queue.push(Merge {
pair,
count: count as u64,
pos,
});
}
});
self.finalize_progress(&progress, words.len());
//
// 5. Do merges
//
self.update_progress(&progress, self.vocab_size, "Compute merges");
let mut merges: Vec<(Pair, u32)> = vec![];
loop {
// Stop as soon as we have a big enough vocabulary
if word_to_id.len() >= self.vocab_size {
break;
}
if queue.is_empty() {
break;
}
let mut top = queue.pop().unwrap();
if top.count != pair_counts[&top.pair] as u64 {
top.count = pair_counts[&top.pair] as u64;
queue.push(top);
continue;
}
if top.count < 1 || self.min_frequency > top.count {
break;
}
let part_a = &id_to_word[top.pair.0 as usize];
let mut part_b = id_to_word[top.pair.1 as usize].to_owned();
// Build new token
if let Some(prefix) = &self.continuing_subword_prefix {
if part_b.starts_with(prefix) {
let prefix_byte_len = prefix.chars().map(|c| c.len_utf8()).sum();
part_b = part_b[prefix_byte_len..].to_string();
}
}
let new_token = format!("{part_a}{part_b}");
// implement sentencepiece-like merge.
// if this code were to be merged, integrate a way in the python bindings to communicate this variable
// default should be 0/None to maintain previous behavior. 16 is the spm default.
// Insert new token if it does not already exist
let new_token_id = word_to_id
.get(&new_token)
.copied()
.unwrap_or(id_to_word.len() as u32);
if !word_to_id.contains_key(&new_token) {
id_to_word.push(new_token.clone());
word_to_id.insert(new_token.clone(), new_token_id);
}
merges.push((top.pair, new_token_id));
// Merge the new pair in every words
// Safety: This is just a type assertion, the code below may no longer be safe
// if the type of `pos` changes
let pos: &HashSet<usize> = &top.pos;
let words_len = words.len();
struct WordPtr(*mut Word);
// Safety: We do not actually use this for concurrent access to the same memory,
// only to different chunks within the same allocation.
unsafe impl Sync for WordPtr {}
let word_start = WordPtr(words.as_mut_ptr());
let changes = pos
.maybe_par_iter()
.flat_map(|&i| {
// Safety:
// We are producing a valid pointer since we are indexing in bounds
//
// We can access each `word` here in parallel because each position
// can be there only once (pos is a HashSet).
unsafe {
assert!(i < words_len);
// This is words[i], but avoids needing to go through &T (which triggers UB)
let word = word_start.0.add(i);
// let word: &mut Word = &mut (*word);
(*word)
.merge(top.pair.0, top.pair.1, new_token_id, max_token_length)
.into_iter()
.map(|c| (c, i))
.collect::<Vec<_>>()
}
})
.collect::<Vec<_>>();
// Introduce new formed pairs
for ((pair, change), iw) in changes {
let count = change * counts[iw] as i32;
pair_counts
.entry(pair)
.and_modify(|c| *c += count)
.or_insert(count);
if change > 0 {
where_to_update
.entry(pair)
.and_modify(|h| {
h.insert(iw);
})
.or_insert_with(|| {
let mut h = HashSet::new();
h.insert(iw);
h
});
}
}
where_to_update.drain().for_each(|(pair, pos)| {
let count = pair_counts[&pair];
if count > 0 {
queue.push(Merge {
pair,
count: count as u64,
pos,
});
}
});
if let Some(p) = &progress {
p.inc(1);
}
}
self.finalize_progress(&progress, merges.len());
// Transfer new vocab & options to model
model.vocab = word_to_id;
model.vocab_r = model
.vocab
.iter()
.map(|(key, val)| (*val, key.to_owned()))
.collect();
model.merges = merges
.into_iter()
.enumerate()
.map(|(i, (pair, new_token_id))| (pair, (i as u32, new_token_id)))
.collect();
if let Some(prefix) = &self.continuing_subword_prefix {
model.continuing_subword_prefix = Some(prefix.to_owned());
} else {
model.continuing_subword_prefix = None;
}
if let Some(suffix) = &self.end_of_word_suffix {
model.end_of_word_suffix = Some(suffix.to_owned());
} else {
model.end_of_word_suffix = None;
}
Ok(self.special_tokens.clone())
}
}
impl Trainer for BpeTrainer {
type Model = BPE;
/// Train a BPE model
fn train(&self, model: &mut BPE) -> Result<Vec<AddedToken>> {
self.do_train(&self.words, model)
}
/// Whether we should show progress
fn should_show_progress(&self) -> bool {
self.show_progress
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync,
{
let words: Result<HashMap<String, u64>> = iterator
.maybe_par_bridge()
.map(|sequence| {
let words = process(sequence.as_ref())?;
let mut map = HashMap::new();
for word in words {
map.entry(word).and_modify(|c| *c += 1).or_insert(1);
}
Ok(map)
})
.reduce(
|| Ok(HashMap::new()),
|acc, ws| {
let mut acc = acc?;
for (k, v) in ws? {
acc.entry(k).and_modify(|c| *c += v).or_insert(v);
}
Ok(acc)
},
);
self.words = words?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::{BpeTrainer, Pair, BPE};
use std::collections::HashMap;
#[test]
fn test_train() {
let word_counts: HashMap<String, u64> = [
("roses".into(), 1),
("are".into(), 2),
("red".into(), 1),
("voilets".into(), 1),
("blue".into(), 1),
("BERT".into(), 1),
("is".into(), 2),
("big".into(), 1),
("and".into(), 1),
("so".into(), 1),
("GPT-2".into(), 1),
]
.iter()
.cloned()
.collect();
let trainer = BpeTrainer::builder()
.show_progress(false)
.min_frequency(2)
.build();
let mut model = BPE::default();
trainer.do_train(&word_counts, &mut model).unwrap();
// Vocab should contain all of the characters from the `word_counts` mapping
// as well as three merges: 're', 'are', and 'is'.
let expected_vocab: HashMap<String, u32> = [
("-".into(), 0),
("2".into(), 1),
("B".into(), 2),
("E".into(), 3),
("G".into(), 4),
("P".into(), 5),
("R".into(), 6),
("T".into(), 7),
("a".into(), 8),
("b".into(), 9),
("d".into(), 10),
("e".into(), 11),
("g".into(), 12),
("i".into(), 13),
("l".into(), 14),
("n".into(), 15),
("o".into(), 16),
("r".into(), 17),
("s".into(), 18),
("t".into(), 19),
("u".into(), 20),
("v".into(), 21),
("re".into(), 22),
("are".into(), 23),
("is".into(), 24),
]
.iter()
.cloned()
.collect();
assert_eq!(model.vocab, expected_vocab);
// The keys in `merges` are pairs of symbols, the values are tuples of (rank, id),
// where 'rank' determines the order in which this merge will be applied during
// tokenization, and 'id' is the vocab id of the symbol resulting from merging
// the pair of symbols in the corresponding key.
let expected_merges: HashMap<Pair, (u32, u32)> = [
((17, 11), (0, 22)), // 'r' + 'e' -> 're'
((8, 22), (1, 23)), // 'a' + 're' -> 'are'
((13, 18), (2, 24)), // 'i' + 's' -> 'is'
]
.iter()
.cloned()
.collect();
assert_eq!(model.merges, expected_merges);
}
#[test]
fn bpe_test_max_token_length_16() {
/* bpe_test_max_token_length series of tests test the max_token_length flag of bpetrainer
// this is the more robust version that only tests max length of learned tokens
// (pre) tokenizer settings or vocab can be easily modified when necessary
*/
let max_token_length = 16;
let long_word_counts: HashMap<String, u64> = [
("singlelongtokenwithoutcasechange", 2),
("singleLongTokenWithCamelCaseChange", 2),
("Longsingletokenwithpunctu@t!onwithin", 2),
("Anotherlongsingletokenwithnumberw1th1n", 2),
("짧은한글문자열짧은한", 2), // korean 10 char
("긴한글문자열긴한글문자열긴한글문", 2), // korean 16 char
("短字符串短字符串短字", 2), //simplified chinese 10 char
("长字符串长字符串长字符串长字符串", 2), // simp. chinese 16 char
("短い文字列短い文字列", 2), // japanese 10 char
("長い文字列長い文字列長い文字列長", 2), // japanese 16 char
("so", 2),
("GPT-2", 2),
]
.iter()
.map(|(key, value)| (key.to_string(), *value))
.collect();
let trainer = BpeTrainer::builder()
.max_token_length(Some(max_token_length))
.show_progress(false)
.min_frequency(0)
.build();
let mut model = BPE::default();
trainer.do_train(&long_word_counts, &mut model).unwrap();
let vocab = model.get_vocab();
for token in vocab.keys() {
assert!(
token.chars().count() <= max_token_length,
"token too long : {} , chars().count() = {}",
token,
token.chars().count()
)
}
}
#[test]
fn bpe_test_max_token_length_direct_assert() {
/* more direct version of bpe_test_max_token_length test
// directly compares tokens with known expected values.
// maybe unstable depending on specific settings or changes.
*/
let long_word_counts: HashMap<String, u64> = [
("sin", 2),
("Sin", 2),
("Lon", 2),
("Ano", 2),
("짧은한", 2),
("긴한글", 2),
("短字符", 2),
("长字符", 2),
("短い文", 2),
("長い文", 2),
("so", 2),
("GP", 2),
]
.iter()
.map(|(key, value)| (key.to_string(), *value))
.collect();
let trainer = BpeTrainer::builder()
.max_token_length(Some(2))
.show_progress(false)
.min_frequency(0)
.build();
let mut model = BPE::default();
trainer.do_train(&long_word_counts, &mut model).unwrap();
let trained_vocab: HashMap<String, u32> = model.get_vocab();
let expected_vocab: HashMap<String, u32> = [
("短", 12),
("n", 6),
("i", 5),
("s", 8),
("字符", 23),
("長", 14),
("긴", 17),
("い文", 22),
("L", 2),
("in", 21),
("o", 7),
("은한", 29),
("S", 4),
("P", 3),
("so", 27),
("符", 13),
("文", 11),
("字", 10),
("짧", 19),
("GP", 25),
("글", 16),
("G", 1),
("An", 24),
("长", 15),
("A", 0),
("Lo", 26),
("긴한", 28),
("い", 9),
("한", 20),
("은", 18),
]
.iter()
.cloned()
.map(|(k, v)| (k.to_string(), v))
.collect();
assert_eq!(trained_vocab, expected_vocab)
}
}
| tokenizers/tokenizers/src/models/bpe/trainer.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/bpe/trainer.rs",
"repo_id": "tokenizers",
"token_count": 15462
} |
use crate::processors::byte_level::bytes_char;
use crate::tokenizer::{NormalizedString, Normalizer, Result};
use crate::utils::macro_rules_attribute;
use std::collections::{HashMap, HashSet};
#[derive(Clone, Debug)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct ByteLevel;
lazy_static! {
static ref BYTES_CHAR: HashMap<u8, char> = bytes_char();
static ref CHAR_BYTES: HashMap<char, u8> =
bytes_char().into_iter().map(|(c, b)| (b, c)).collect();
}
impl Default for ByteLevel {
fn default() -> Self {
Self::new()
}
}
impl ByteLevel {
pub fn new() -> Self {
Self {}
}
pub fn alphabet() -> HashSet<char> {
BYTES_CHAR.values().copied().collect()
}
}
impl Normalizer for ByteLevel {
/// Strip the normalized string inplace
fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> {
if !normalized.is_empty() {
let s = normalized.get();
let mut transformations: Vec<(char, isize)> = Vec::with_capacity(s.len());
let mut i = 0;
for cur_char in s.chars() {
let size = cur_char.len_utf8();
let bytes = s[i..i + size].as_bytes();
i += size;
transformations.extend(
bytes
.iter()
.enumerate()
.map(|(i, b)| (BYTES_CHAR[b], isize::from(i > 0))),
);
}
normalized.transform(transformations, 0);
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_byte_level_normalize() {
let original = "Hello 我今天能为你做什么";
let normalized = "HelloĠæĪijä»Ĭ天èĥ½ä¸ºä½łåģļä»Ģä¹Ī";
assert_ne!(original, normalized);
let mut n = NormalizedString::from(original);
let byte_level = ByteLevel::new();
byte_level.normalize(&mut n).unwrap();
assert_eq!(&n.get(), &normalized);
assert_eq!(
n,
NormalizedString::new(
original.to_string(),
normalized.to_string(),
vec![
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(5, 6),
(6, 9),
(6, 9),
(6, 9),
(6, 9),
(6, 9),
(6, 9),
(9, 12),
(9, 12),
(9, 12),
(9, 12),
(9, 12),
(9, 12),
(12, 15),
(12, 15),
(12, 15),
(12, 15),
(12, 15),
(12, 15),
(15, 18),
(15, 18),
(15, 18),
(15, 18),
(15, 18),
(15, 18),
(18, 21),
(18, 21),
(18, 21),
(18, 21),
(18, 21),
(18, 21),
(21, 24),
(21, 24),
(21, 24),
(21, 24),
(21, 24),
(21, 24),
(24, 27),
(24, 27),
(24, 27),
(24, 27),
(24, 27),
(24, 27),
(27, 30),
(27, 30),
(27, 30),
(27, 30),
(27, 30),
(27, 30),
(30, 33),
(30, 33),
(30, 33),
(30, 33),
(30, 33),
(30, 33)
],
0
)
);
assert_eq!(
n.alignments_original(),
vec![
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 7),
(7, 13),
(7, 13),
(7, 13),
(13, 19),
(13, 19),
(13, 19),
(19, 25),
(19, 25),
(19, 25),
(25, 31),
(25, 31),
(25, 31),
(31, 37),
(31, 37),
(31, 37),
(37, 43),
(37, 43),
(37, 43),
(43, 49),
(43, 49),
(43, 49),
(49, 55),
(49, 55),
(49, 55),
(55, 61),
(55, 61),
(55, 61)
]
);
}
}
| tokenizers/tokenizers/src/normalizers/byte_level.rs/0 | {
"file_path": "tokenizers/tokenizers/src/normalizers/byte_level.rs",
"repo_id": "tokenizers",
"token_count": 3445
} |
use crate::utils::SysRegex;
use serde::{Deserialize, Deserializer, Serialize};
use crate::tokenizer::{
pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior,
};
/// Represents the different patterns that `Split` can use
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)]
pub enum SplitPattern {
String(String),
Regex(String),
}
impl From<String> for SplitPattern {
fn from(v: String) -> Self {
Self::String(v)
}
}
impl From<&str> for SplitPattern {
fn from(v: &str) -> Self {
Self::String(v.to_owned())
}
}
#[derive(Debug, Serialize)]
#[serde(tag = "type")]
pub struct Split {
pub pattern: SplitPattern,
#[serde(skip)]
pub regex: SysRegex,
pub behavior: SplitDelimiterBehavior,
pub invert: bool,
}
impl<'de> Deserialize<'de> for Split {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
enum Type {
Split,
}
#[derive(Deserialize)]
pub struct SplitHelper {
#[serde(rename = "type")]
_type: Type,
pattern: SplitPattern,
behavior: SplitDelimiterBehavior,
invert: bool,
}
let helper = SplitHelper::deserialize(deserializer)?;
Self::new(helper.pattern, helper.behavior, helper.invert).map_err(serde::de::Error::custom)
}
}
impl Clone for Split {
fn clone(&self) -> Self {
Self::new(self.pattern.clone(), self.behavior, self.invert).unwrap()
}
}
impl PartialEq for Split {
fn eq(&self, other: &Self) -> bool {
self.pattern == other.pattern
&& self.behavior == other.behavior
&& self.invert == other.invert
}
}
impl Split {
pub fn new<I: Into<SplitPattern>>(
pattern: I,
behavior: SplitDelimiterBehavior,
invert: bool,
) -> Result<Self> {
let pattern: SplitPattern = pattern.into();
let regex = match &pattern {
SplitPattern::String(s) => SysRegex::new(®ex::escape(s))?,
SplitPattern::Regex(r) => SysRegex::new(r)?,
};
Ok(Self {
pattern,
regex,
behavior,
invert,
})
}
}
impl PreTokenizer for Split {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
if self.invert {
pretokenized.split(|_, normalized| normalized.split(Invert(&self.regex), self.behavior))
} else {
pretokenized.split(|_, normalized| normalized.split(&self.regex, self.behavior))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType, PreTokenizer};
use SplitDelimiterBehavior::*;
#[test]
fn basic() {
let tests = vec![
(
Removed,
"How are you doing?",
vec![
("How", (0, 3)),
("are", (4, 7)),
("you", (8, 11)),
("doing", (12, 17)),
("?", (17, 18)),
],
),
(
Isolated,
"How are you doing?",
vec![
("How", (0, 3)),
(" ", (3, 4)),
("are", (4, 7)),
(" ", (7, 8)),
("you", (8, 11)),
(" ", (11, 12)),
("doing", (12, 17)),
("?", (17, 18)),
],
),
(
MergedWithPrevious,
"How are you doing?",
vec![
("How ", (0, 4)),
("are ", (4, 8)),
("you ", (8, 12)),
("doing", (12, 17)),
("?", (17, 18)),
],
),
(
MergedWithNext,
"How are you doing?",
vec![
("How", (0, 3)),
(" are", (3, 7)),
(" you", (7, 11)),
(" doing", (11, 17)),
("?", (17, 18)),
],
),
(
Contiguous,
"How are you doing?",
vec![
("How", (0, 3)),
(" ", (3, 4)),
("are", (4, 7)),
(" ", (7, 8)),
("you", (8, 11)),
(" ", (11, 12)),
("doing?", (12, 18)),
],
),
];
// use whitespace regex
let regex = SplitPattern::Regex(r"\w+|[^\w\s]+".into());
for (behavior, s, res) in tests {
let mut pretokenized = PreTokenizedString::from(s);
let pretok = Split::new(regex.clone(), behavior, true).unwrap();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
res
);
}
}
#[test]
fn regex_string() {
let mut pretok_str_for_regex = PreTokenizedString::from("Hey, man!");
let mut pretok_str_for_string = pretok_str_for_regex.clone();
// pre-tokenizer splits on " " - one from Regex, one from string
let pretokenizer_regex = Split::new(
SplitPattern::Regex(r"\s+".into()),
SplitDelimiterBehavior::Removed,
false,
)
.unwrap();
let pretokenizer_string = Split::new(" ", SplitDelimiterBehavior::Removed, false).unwrap();
pretokenizer_regex
.pre_tokenize(&mut pretok_str_for_regex)
.unwrap();
pretokenizer_string
.pre_tokenize(&mut pretok_str_for_string)
.unwrap();
assert_eq!(pretok_str_for_regex, pretok_str_for_string);
}
#[test]
fn invert() {
let mut pretok_str = PreTokenizedString::from("Hello Hello Hello");
let mut pretok_str_for_invert = pretok_str.clone();
// one pre-tokenizer splits on " " - one splits inverted on "Hello"
let pretokenizer = Split::new(" ", SplitDelimiterBehavior::Removed, false).unwrap();
let pretokenizer_invert =
Split::new("Hello", SplitDelimiterBehavior::Removed, true).unwrap();
pretokenizer.pre_tokenize(&mut pretok_str).unwrap();
pretokenizer_invert
.pre_tokenize(&mut pretok_str_for_invert)
.unwrap();
assert_eq!(pretok_str, pretok_str_for_invert);
}
#[test]
fn serialization() {
use SplitDelimiterBehavior::*;
let split = Split::new("Hello", Removed, true).unwrap();
let split_s =
r#"{"type":"Split","pattern":{"String":"Hello"},"behavior":"Removed","invert":true}"#;
assert_eq!(serde_json::to_string(&split).unwrap(), split_s);
assert_eq!(serde_json::from_str::<Split>(split_s).unwrap(), split);
let split = Split::new(SplitPattern::Regex(r"\s+".into()), Isolated, false).unwrap();
let split_s =
r#"{"type":"Split","pattern":{"Regex":"\\s+"},"behavior":"Isolated","invert":false}"#;
assert_eq!(serde_json::to_string(&split).unwrap(), split_s);
assert_eq!(serde_json::from_str::<Split>(split_s).unwrap(), split);
}
}
| tokenizers/tokenizers/src/pre_tokenizers/split.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/split.rs",
"repo_id": "tokenizers",
"token_count": 4042
} |
use std::marker::PhantomData;
use serde::{
self,
de::{Error, MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use super::{added_vocabulary::AddedTokenWithId, TokenizerImpl};
use crate::{Decoder, Model, Normalizer, PostProcessor, PreTokenizer, TokenizerBuilder};
static SERIALIZATION_VERSION: &str = "1.0";
impl<M, N, PT, PP, D> Serialize for TokenizerImpl<M, N, PT, PP, D>
where
M: Serialize,
N: Serialize,
PT: Serialize,
PP: Serialize,
D: Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut tokenizer = serializer.serialize_struct("Tokenizer", 9)?;
// Start by adding the current version
tokenizer.serialize_field("version", SERIALIZATION_VERSION)?;
// Params
tokenizer.serialize_field("truncation", &self.truncation)?;
tokenizer.serialize_field("padding", &self.padding)?;
// Added tokens
tokenizer.serialize_field("added_tokens", &self.added_vocabulary)?;
// Then add our parts
tokenizer.serialize_field("normalizer", &self.normalizer)?;
tokenizer.serialize_field("pre_tokenizer", &self.pre_tokenizer)?;
tokenizer.serialize_field("post_processor", &self.post_processor)?;
tokenizer.serialize_field("decoder", &self.decoder)?;
tokenizer.serialize_field("model", &self.model)?;
tokenizer.end()
}
}
impl<'de, M, N, PT, PP, D> Deserialize<'de> for TokenizerImpl<M, N, PT, PP, D>
where
M: Deserialize<'de> + Model,
N: Deserialize<'de> + Normalizer,
PT: Deserialize<'de> + PreTokenizer,
PP: Deserialize<'de> + PostProcessor,
D: Deserialize<'de> + Decoder,
{
fn deserialize<De>(deserializer: De) -> Result<Self, De::Error>
where
De: Deserializer<'de>,
{
deserializer.deserialize_struct(
"Tokenizer",
&[
"version",
"truncation",
"padding",
"added_tokens",
"normalizer",
"pre_tokenizer",
"post_processor",
"decoder",
"model",
],
TokenizerVisitor(
PhantomData,
PhantomData,
PhantomData,
PhantomData,
PhantomData,
),
)
}
}
struct TokenizerVisitor<M, N, PT, PP, D>(
PhantomData<M>,
PhantomData<N>,
PhantomData<PT>,
PhantomData<PP>,
PhantomData<D>,
);
impl<'de, M, N, PT, PP, D> Visitor<'de> for TokenizerVisitor<M, N, PT, PP, D>
where
M: Deserialize<'de> + Model,
N: Deserialize<'de> + Normalizer,
PT: Deserialize<'de> + PreTokenizer,
PP: Deserialize<'de> + PostProcessor,
D: Deserialize<'de> + Decoder,
{
type Value = TokenizerImpl<M, N, PT, PP, D>;
fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "struct Tokenizer")
}
fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error>
where
V: MapAccess<'de>,
{
let mut builder = TokenizerBuilder::new();
let mut tokens: Vec<AddedTokenWithId> = vec![];
while let Some(key) = map.next_key::<String>()? {
match key.as_ref() {
"version" => {
let v: String = map.next_value()?;
if &v != "1.0" {
return Err(Error::custom(format!("Unknown tokenizer version '{v}'")));
}
}
"truncation" => {
builder = builder.with_truncation(map.next_value()?);
}
"padding" => {
builder = builder.with_padding(map.next_value()?);
}
"added_tokens" => {
tokens = map.next_value()?;
}
"normalizer" => {
builder = builder.with_normalizer(map.next_value()?);
}
"pre_tokenizer" => {
builder = builder.with_pre_tokenizer(map.next_value()?);
}
"model" => {
builder = builder.with_model(map.next_value()?);
}
"decoder" => {
builder = builder.with_decoder(map.next_value()?);
}
"post_processor" => {
builder = builder.with_post_processor(map.next_value()?);
}
_ => {}
};
}
let mut tokenizer = builder
.build()
.map_err(|e| V::Error::custom(e.to_string()))?;
// We take care of deserializing the added_tokens (instead of `AddedVocabulary` directly
// because it let us check that associated IDs are still good, and warn the user otherwise
for token in &tokens {
// Warn the user if the id is different than expected
let received_id = tokenizer.token_to_id(&token.token.content);
if let Some(rid) = received_id {
if rid != token.id {
warn!(
"Warning: Token '{}' was expected to have ID '{}' but was given ID '{}'",
token.token.content,
token.id,
rid.to_string()
);
}
}
}
let added_tokens: Vec<_> = tokens.into_iter().map(|token| token.token).collect();
tokenizer.add_tokens(&added_tokens[..]);
Ok(tokenizer)
}
}
#[cfg(test)]
mod tests {
use crate::tokenizer::Tokenizer;
use std::str::FromStr;
#[test]
fn test_deserialization_serialization_invariant() {
let tok_json = r#"{
"version": "1.0",
"truncation": null,
"padding": null,
"added_tokens": [
{
"id": 0,
"content": "[SPECIAL_0]",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 1,
"content": "[SPECIAL_1]",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": true,
"special": false
},
{
"id": 2,
"content": "[SPECIAL_2]",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
}
],
"normalizer": null,
"pre_tokenizer": null,
"post_processor": null,
"decoder": null,
"model": {
"type": "WordPiece",
"unk_token": "[UNK]",
"continuing_subword_prefix": "",
"max_input_chars_per_word": 100,
"vocab": {}
}
}"#;
let tokenizer = Tokenizer::from_str(tok_json).unwrap();
let tok_str = serde_json::to_string_pretty(&tokenizer).unwrap();
// It should be exactly the same as above
assert_eq!(tok_str, tok_json);
}
#[cfg(feature = "http")]
#[test]
fn test_from_pretrained() {
tracing_subscriber::fmt()
.with_max_level(tracing::Level::DEBUG)
.with_target(false)
.init();
let _ = Tokenizer::from_pretrained("Qwen/Qwen2-7B-Instruct", None);
warn!("This should be the first warning");
}
}
| tokenizers/tokenizers/src/tokenizer/serialization.rs/0 | {
"file_path": "tokenizers/tokenizers/src/tokenizer/serialization.rs",
"repo_id": "tokenizers",
"token_count": 3738
} |
mod common;
use common::*;
use tokenizers::decoders::byte_level::ByteLevel;
use tokenizers::decoders::DecoderWrapper;
use tokenizers::models::bpe::BPE;
use tokenizers::models::wordlevel::WordLevel;
use tokenizers::models::wordpiece::WordPiece;
use tokenizers::models::ModelWrapper;
use tokenizers::normalizers::bert::BertNormalizer;
use tokenizers::normalizers::unicode::{NFC, NFKC};
use tokenizers::normalizers::NormalizerWrapper;
use tokenizers::pre_tokenizers::bert::BertPreTokenizer;
use tokenizers::pre_tokenizers::delimiter::CharDelimiterSplit;
use tokenizers::pre_tokenizers::split::{Split, SplitPattern};
use tokenizers::pre_tokenizers::whitespace::Whitespace;
use tokenizers::pre_tokenizers::PreTokenizerWrapper;
use tokenizers::processors::bert::BertProcessing;
use tokenizers::processors::PostProcessorWrapper;
use tokenizers::{SplitDelimiterBehavior, Tokenizer, TokenizerImpl};
#[test]
fn bpe_serde() {
let bpe = get_byte_level_bpe();
let ser = serde_json::to_string(&bpe).unwrap();
let de = serde_json::from_str(&ser).unwrap();
assert_eq!(bpe, de);
}
#[test]
fn wordpiece_serde() {
let wordpiece = get_bert_wordpiece();
let ser = serde_json::to_string(&wordpiece).unwrap();
let de = serde_json::from_str(&ser).unwrap();
assert_eq!(wordpiece, de);
}
#[test]
fn wordlevel_serde() {
let wordlevel = WordLevel::from_file("data/gpt2-vocab.json", "<unk>".into()).unwrap();
let ser = serde_json::to_string(&wordlevel).unwrap();
let de = serde_json::from_str(&ser).unwrap();
assert_eq!(wordlevel, de);
}
#[test]
fn normalizers() {
// Test unit struct
let nfc = NFC;
let nfc_ser = serde_json::to_string(&nfc).unwrap();
assert_eq!(nfc_ser, r#"{"type":"NFC"}"#);
// empty struct can deserialize from self
serde_json::from_str::<NFC>(&nfc_ser).unwrap();
let err: Result<NFKC, _> = serde_json::from_str(&nfc_ser);
assert!(err.is_err(), "NFKC shouldn't be deserializable from NFC");
// wrapper can can deserialize from inner
let nfc_wrapped: NormalizerWrapper = serde_json::from_str(&nfc_ser).unwrap();
match &nfc_wrapped {
NormalizerWrapper::NFC(_) => (),
_ => panic!("NFC wrapped with incorrect variant"),
}
let ser_wrapped = serde_json::to_string(&nfc_wrapped).unwrap();
assert_eq!(ser_wrapped, nfc_ser);
// Test non-empty roundtrip
let bert = BertNormalizer::default();
let bert_ser = serde_json::to_string(&bert).unwrap();
assert_eq!(
bert_ser,
r#"{"type":"BertNormalizer","clean_text":true,"handle_chinese_chars":true,"strip_accents":null,"lowercase":true}"#
);
// make sure we can deserialize to self
serde_json::from_str::<BertNormalizer>(&bert_ser).unwrap();
// wrapper can deserialize from inner serialization
let bert_wrapped: NormalizerWrapper = serde_json::from_str(&bert_ser).unwrap();
match &bert_wrapped {
NormalizerWrapper::BertNormalizer(_) => (),
_ => panic!("BertNormalizer wrapped with incorrect variant"),
}
// wrapped serializes same way as inner
let ser_wrapped = serde_json::to_string(&bert_wrapped).unwrap();
assert_eq!(ser_wrapped, bert_ser);
}
#[test]
fn processors() {
let bert = BertProcessing::new(("SEP".into(), 0), ("CLS".into(), 0));
let bert_ser = serde_json::to_string(&bert).unwrap();
assert_eq!(
bert_ser,
r#"{"type":"BertProcessing","sep":["SEP",0],"cls":["CLS",0]}"#
);
serde_json::from_str::<BertProcessing>(&bert_ser).unwrap();
let bert_wrapped: PostProcessorWrapper = serde_json::from_str(&bert_ser).unwrap();
match &bert_wrapped {
PostProcessorWrapper::Bert(_) => (),
_ => panic!("Bert wrapped with incorrect variant"),
}
let ser_wrapped = serde_json::to_string(&bert_wrapped).unwrap();
assert_eq!(ser_wrapped, bert_ser);
}
#[test]
fn pretoks() {
// Test unit struct
let bert = BertPreTokenizer;
let bert_ser = serde_json::to_string(&bert).unwrap();
assert_eq!(bert_ser, r#"{"type":"BertPreTokenizer"}"#);
// empty struct can deserialize from self
serde_json::from_str::<BertPreTokenizer>(&bert_ser).unwrap();
let err: Result<Whitespace, _> = serde_json::from_str(&bert_ser);
assert!(
err.is_err(),
"Whitespace shouldn't be deserializable from BertPreTokenizer"
);
// wrapper can can deserialize from inner
let bert_wrapped: PreTokenizerWrapper = serde_json::from_str(&bert_ser).unwrap();
match &bert_wrapped {
PreTokenizerWrapper::BertPreTokenizer(_) => (),
_ => panic!("Bert wrapped with incorrect variant"),
}
let ser_wrapped = serde_json::to_string(&bert_wrapped).unwrap();
assert_eq!(ser_wrapped, bert_ser);
// Test non-empty roundtrip
let ch = CharDelimiterSplit::new(' ');
let ch_ser = serde_json::to_string(&ch).unwrap();
assert_eq!(ch_ser, r#"{"type":"CharDelimiterSplit","delimiter":" "}"#);
// make sure we can deserialize to self
serde_json::from_str::<CharDelimiterSplit>(&ch_ser).unwrap();
// wrapper can deserialize from inner serialization
let ch_wrapped: PreTokenizerWrapper = serde_json::from_str(&ch_ser).unwrap();
match &ch_wrapped {
PreTokenizerWrapper::Delimiter(_) => (),
_ => panic!("CharDelimiterSplit wrapped with incorrect variant"),
}
// wrapped serializes same way as inner
let ser_wrapped = serde_json::to_string(&ch_wrapped).unwrap();
assert_eq!(ser_wrapped, ch_ser);
let wsp = Whitespace {};
let wsp_ser = serde_json::to_string(&wsp).unwrap();
assert_eq!(wsp_ser, r#"{"type":"Whitespace"}"#);
serde_json::from_str::<Whitespace>(&wsp_ser).unwrap();
let err: Result<BertPreTokenizer, _> = serde_json::from_str(&wsp_ser);
assert!(
err.is_err(),
"BertPreTokenizer shouldn't be deserializable from Whitespace"
);
let pattern: SplitPattern = "[SEP]".into();
let pretok = Split::new(pattern, SplitDelimiterBehavior::Isolated, false).unwrap();
let pretok_str = serde_json::to_string(&pretok).unwrap();
assert_eq!(
pretok_str,
r#"{"type":"Split","pattern":{"String":"[SEP]"},"behavior":"Isolated","invert":false}"#
);
assert_eq!(serde_json::from_str::<Split>(&pretok_str).unwrap(), pretok);
let pattern = SplitPattern::Regex("[SEP]".to_string());
let pretok = Split::new(pattern, SplitDelimiterBehavior::Isolated, false).unwrap();
let pretok_str = serde_json::to_string(&pretok).unwrap();
assert_eq!(
pretok_str,
r#"{"type":"Split","pattern":{"Regex":"[SEP]"},"behavior":"Isolated","invert":false}"#
);
assert_eq!(serde_json::from_str::<Split>(&pretok_str).unwrap(), pretok);
}
#[test]
fn decoders() {
let byte_level = ByteLevel::default();
let byte_level_ser = serde_json::to_string(&byte_level).unwrap();
assert_eq!(
byte_level_ser,
r#"{"type":"ByteLevel","add_prefix_space":true,"trim_offsets":true,"use_regex":true}"#
);
serde_json::from_str::<ByteLevel>(&byte_level_ser).unwrap();
let byte_level_wrapper: DecoderWrapper = serde_json::from_str(&byte_level_ser).unwrap();
match &byte_level_wrapper {
DecoderWrapper::ByteLevel(_) => (),
_ => panic!("ByteLevel wrapped with incorrect variant"),
}
let ser_wrapped = serde_json::to_string(&byte_level_wrapper).unwrap();
assert_eq!(ser_wrapped, byte_level_ser);
}
#[test]
fn models() {
let bpe = BPE::default();
let bpe_ser = serde_json::to_string(&bpe).unwrap();
serde_json::from_str::<BPE>(&bpe_ser).unwrap();
let bpe_wrapper: ModelWrapper = serde_json::from_str(&bpe_ser).unwrap();
match &bpe_wrapper {
ModelWrapper::BPE(_) => (),
_ => panic!("BPE wrapped with incorrect variant"),
}
let ser_wrapped = serde_json::to_string(&bpe_wrapper).unwrap();
assert_eq!(ser_wrapped, bpe_ser);
}
#[test]
fn tokenizer() {
let wordpiece = WordPiece::default();
let mut tokenizer = Tokenizer::new(wordpiece);
tokenizer.with_normalizer(Some(NFC));
let ser = serde_json::to_string(&tokenizer).unwrap();
let _: Tokenizer = serde_json::from_str(&ser).unwrap();
let unwrapped_nfc_tok: TokenizerImpl<
WordPiece,
NFC,
PreTokenizerWrapper,
PostProcessorWrapper,
DecoderWrapper,
> = serde_json::from_str(&ser).unwrap();
assert_eq!(serde_json::to_string(&unwrapped_nfc_tok).unwrap(), ser);
let err: Result<
TokenizerImpl<WordPiece, NFKC, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper>,
_,
> = serde_json::from_str(&ser);
assert!(err.is_err(), "NFKC shouldn't be deserializable from NFC");
let de: TokenizerImpl<
WordPiece,
NormalizerWrapper,
PreTokenizerWrapper,
PostProcessorWrapper,
DecoderWrapper,
> = serde_json::from_str(&ser).unwrap();
assert_eq!(serde_json::to_string(&de).unwrap(), ser);
}
#[test]
fn bpe_with_dropout_serde() {
let mut bpe = BPE::default();
bpe.dropout = Some(0.1);
let ser = serde_json::to_string(&bpe).unwrap();
let de = serde_json::from_str(&ser).unwrap();
assert_eq!(bpe, de);
// set dropout to 0.0 (which is analogous to None) and reserialize
bpe.dropout = Some(0.0);
let ser = serde_json::to_string(&bpe).unwrap();
let de = serde_json::from_str(&ser).unwrap();
assert_eq!(bpe, de);
}
#[test]
fn test_deserialize_long_file() {
let _tokenizer = Tokenizer::from_file("data/albert-base-v1-tokenizer.json").unwrap();
}
| tokenizers/tokenizers/tests/serialization.rs/0 | {
"file_path": "tokenizers/tokenizers/tests/serialization.rs",
"repo_id": "tokenizers",
"token_count": 3890
} |
# Using quantized models (dtypes)
Before Transformers.js v3, we used the `quantized` option to specify whether to use a quantized (q8) or full-precision (fp32) variant of the model by setting `quantized` to `true` or `false`, respectively. Now, we've added the ability to select from a much larger list with the `dtype` parameter.
The list of available quantizations depends on the model, but some common ones are: full-precision (`"fp32"`), half-precision (`"fp16"`), 8-bit (`"q8"`, `"int8"`, `"uint8"`), and 4-bit (`"q4"`, `"bnb4"`, `"q4f16"`).
<p align="center">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/transformersjs-v3/dtypes-dark.jpg" style="max-width: 100%;">
<source media="(prefers-color-scheme: light)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/transformersjs-v3/dtypes-light.jpg" style="max-width: 100%;">
<img alt="Available dtypes for mixedbread-ai/mxbai-embed-xsmall-v1" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/transformersjs-v3/dtypes-dark.jpg" style="max-width: 100%;">
</picture>
<a href="https://huggingface.co/mixedbread-ai/mxbai-embed-xsmall-v1/tree/main/onnx">(e.g., mixedbread-ai/mxbai-embed-xsmall-v1)</a>
</p>
## Basic usage
**Example:** Run Qwen2.5-0.5B-Instruct in 4-bit quantization ([demo](https://v2.scrimba.com/s0dlcpv0ci))
```js
import { pipeline } from "@huggingface/transformers";
// Create a text generation pipeline
const generator = await pipeline(
"text-generation",
"onnx-community/Qwen2.5-0.5B-Instruct",
{ dtype: "q4", device: "webgpu" },
);
// Define the list of messages
const messages = [
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "Tell me a funny joke." },
];
// Generate a response
const output = await generator(messages, { max_new_tokens: 128 });
console.log(output[0].generated_text.at(-1).content);
```
## Per-module dtypes
Some encoder-decoder models, like Whisper or Florence-2, are extremely sensitive to quantization settings: especially of the encoder. For this reason, we added the ability to select per-module dtypes, which can be done by providing a mapping from module name to dtype.
**Example:** Run Florence-2 on WebGPU ([demo](https://v2.scrimba.com/s0pdm485fo))
```js
import { Florence2ForConditionalGeneration } from "@huggingface/transformers";
const model = await Florence2ForConditionalGeneration.from_pretrained(
"onnx-community/Florence-2-base-ft",
{
dtype: {
embed_tokens: "fp16",
vision_encoder: "fp16",
encoder_model: "q4",
decoder_model_merged: "q4",
},
device: "webgpu",
},
);
```
<p align="middle">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/transformersjs-v3/florence-2-webgpu.gif" alt="Florence-2 running on WebGPU" />
</p>
<details>
<summary>
See full code example
</summary>
```js
import {
Florence2ForConditionalGeneration,
AutoProcessor,
AutoTokenizer,
RawImage,
} from "@huggingface/transformers";
// Load model, processor, and tokenizer
const model_id = "onnx-community/Florence-2-base-ft";
const model = await Florence2ForConditionalGeneration.from_pretrained(
model_id,
{
dtype: {
embed_tokens: "fp16",
vision_encoder: "fp16",
encoder_model: "q4",
decoder_model_merged: "q4",
},
device: "webgpu",
},
);
const processor = await AutoProcessor.from_pretrained(model_id);
const tokenizer = await AutoTokenizer.from_pretrained(model_id);
// Load image and prepare vision inputs
const url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg";
const image = await RawImage.fromURL(url);
const vision_inputs = await processor(image);
// Specify task and prepare text inputs
const task = "<MORE_DETAILED_CAPTION>";
const prompts = processor.construct_prompts(task);
const text_inputs = tokenizer(prompts);
// Generate text
const generated_ids = await model.generate({
...text_inputs,
...vision_inputs,
max_new_tokens: 100,
});
// Decode generated text
const generated_text = tokenizer.batch_decode(generated_ids, {
skip_special_tokens: false,
})[0];
// Post-process the generated text
const result = processor.post_process_generation(
generated_text,
task,
image.size,
);
console.log(result);
// { '<MORE_DETAILED_CAPTION>': 'A green car is parked in front of a tan building. The building has a brown door and two brown windows. The car is a two door and the door is closed. The green car has black tires.' }
```
</details>
| transformers.js/docs/source/guides/dtypes.md/0 | {
"file_path": "transformers.js/docs/source/guides/dtypes.md",
"repo_id": "transformers.js",
"token_count": 1698
} |
.sidebar {
background-color: #181818;
color: #CCCCCC;
}
body{
background-color: #1F1F1F;
color: white;
}
.progress-container {
position: relative;
font-size: 16px;
color: white;
/* background-color: #e9ecef; */
border-radius: 8px;
text-align: left;
overflow: hidden;
}
.progress-bar {
padding: 2px 4px;
z-index: 0;
top: 0;
width: 1%;
height: 100%;
overflow: hidden;
background-color: #007bff;
white-space: nowrap;
}
.progress-text {
z-index: 2;
}
| transformers.js/examples/code-completion/src/App.css/0 | {
"file_path": "transformers.js/examples/code-completion/src/App.css",
"repo_id": "transformers.js",
"token_count": 208
} |
import { useState, useRef, useEffect, useCallback } from 'react'
import './App.css'
const PLACEHOLDER_TEXTS = [
"'To Kill a Mockingbird' is a novel by Harper Lee published in 1960. It was immediately successful, winning the Pulitzer Prize, and has become a classic of modern American literature.",
"The novel 'Moby-Dick' was written by Herman Melville and first published in 1851. It is considered a masterpiece of American literature and deals with complex themes of obsession, revenge, and the conflict between good and evil.",
"Harper Lee, an American novelist widely known for her novel 'To Kill a Mockingbird', was born in 1926 in Monroeville, Alabama. She received the Pulitzer Prize for Fiction in 1961.",
"Jane Austen was an English novelist known primarily for her six major novels, which interpret, critique and comment upon the British landed gentry at the end of the 18th century.",
"The 'Harry Potter' series, which consists of seven fantasy novels written by British author J.K. Rowling, is among the most popular and critically acclaimed books of the modern era.",
"'The Great Gatsby', a novel written by American author F. Scott Fitzgerald, was published in 1925. The story is set in the Jazz Age and follows the life of millionaire Jay Gatsby and his pursuit of Daisy Buchanan."
].sort(() => Math.random() - 0.5);
function App() {
const [status, setStatus] = useState('idle');
const [query, setQuery] = useState(`Who wrote 'To Kill a Mockingbird'?`);
const [documents, setDocuments] = useState(PLACEHOLDER_TEXTS.join('\n'));
const [results, setResults] = useState([]);
// Create a reference to the worker object.
const worker = useRef(null);
// We use the `useEffect` hook to setup the worker as soon as the `App` component is mounted.
useEffect(() => {
if (!worker.current) {
// Create the worker if it does not yet exist.
worker.current = new Worker(new URL('./worker.js', import.meta.url), {
type: 'module'
});
}
// Create a callback function for messages from the worker thread.
const onMessageReceived = (e) => {
const status = e.data.status;
if (e.data.file?.endsWith('.onnx')) {
if (status === 'initiate') {
setStatus('loading');
} else if (status === 'done') {
setStatus('ready');
}
} else if (status === 'complete') {
setResults(e.data.output);
setStatus('idle');
}
};
// Attach the callback function as an event listener.
worker.current.addEventListener('message', onMessageReceived);
// Define a cleanup function for when the component is unmounted.
return () => worker.current.removeEventListener('message', onMessageReceived);
}, []);
const run = useCallback(() => {
setStatus('processing');
worker.current.postMessage({
query,
documents,
});
}, [query, documents])
const busy = status !== 'idle';
return (
<div className='flex flex-col h-full'>
<h1 className='text-2xl md:text-4xl font-bold text-center mb-1'>Reranking w/ The Crispy mixedbread Rerank Models</h1>
<p className='text-lg md:text-xl font-medium text-center mb-2'>Powered by <a href='https://huggingface.co/mixedbread-ai/mxbai-rerank-xsmall-v1' target='_blank' rel='noreferrer'>mxbai-rerank-xsmall-v1</a> and <a href='http://huggingface.co/docs/transformers.js' target='_blank' rel='noreferrer'>🤗 Transformers.js</a></p>
<div className='flex-grow flex flex-wrap p-4'>
<div className='flex flex-col items-center gap-y-1 w-full md:w-1/2'>
<label className='text-lg font-medium'>Query</label>
<textarea
placeholder='Enter query.'
className='border w-full p-1 resize-none overflow-hidden h-10'
value={query}
onChange={e => {
setQuery(e.target.value);
setResults([]);
}}
></textarea>
<label className='text-lg font-medium mt-1'>Documents</label>
<textarea
placeholder='Enter documents to compare with the query. One sentence per line.'
className='border w-full p-1 h-full resize-none'
value={documents}
onChange={e => {
setDocuments(e.target.value);
setResults([]);
}}
></textarea>
<button
className='border py-1 px-2 bg-green-400 rounded text-white text-lg font-medium disabled:opacity-50 disabled:cursor-not-allowed'
disabled={busy}
onClick={run}>{
!busy
? 'Rerank'
: status === 'loading'
? 'Model loading...'
: 'Processing'
}</button>
</div>
<div className='flex flex-col items-center w-full md:w-1/2 gap-y-1'>
{results.length > 0 && (<>
<div className='w-full flex flex-col gap-y-1'>
<label className='text-lg font-medium text-center'>Results</label>
<div className='flex flex-col gap-y-1'>
{results.map((result, i) => (
<div key={i} className='flex gap-x-2 border mx-2 p-1'>
<span className='font-bold'>{result.score.toFixed(3)}</span>
<span>{result.text}</span>
</div>
))}
</div>
</div>
</>)
}
</div>
</div>
</div>
)
}
export default App
| transformers.js/examples/cross-encoder/src/App.jsx/0 | {
"file_path": "transformers.js/examples/cross-encoder/src/App.jsx",
"repo_id": "transformers.js",
"token_count": 2232
} |
# Transformers.js - Sample browser extension
An example project to show how to run 🤗 Transformers in a browser extension. Although we only provide instructions for running in Chrome, it should be similar for other browsers.
## Getting Started
1. Clone the repo and enter the project directory:
```bash
git clone https://github.com/huggingface/transformers.js.git
cd transformers.js/examples/extension/
```
1. Install the necessary dependencies:
```bash
npm install
```
1. Build the project:
```bash
npm run build
```
1. Add the extension to your browser. To do this, go to `chrome://extensions/`, enable developer mode (top right), and click "Load unpacked". Select the `build` directory from the dialog which appears and click "Select Folder".
1. That's it! You should now be able to open the extension's popup and use the model in your browser!
## Editing the template
We recommend running `npm run dev` while editing the template as it will rebuild the project when changes are made.
All source code can be found in the `./src/` directory:
- `background.js` ([service worker](https://developer.chrome.com/docs/extensions/mv3/service_workers/)) - handles all the requests from the UI, does processing in the background, then returns the result. You will need to reload the extension (by visiting `chrome://extensions/` and clicking the refresh button) after editing this file for changes to be visible in the extension.
- `content.js` ([content script](https://developer.chrome.com/docs/extensions/mv3/content_scripts/)) - contains the code which is injected into every page the user visits. You can use the `sendMessage` api to make requests to the background script. Similarly, you will need to reload the extension after editing this file for changes to be visible in the extension.
- `popup.html`, `popup.css`, `popup.js` ([toolbar action](https://developer.chrome.com/docs/extensions/reference/action/)) - contains the code for the popup which is visible to the user when they click the extension's icon from the extensions bar. For development, we recommend opening the `popup.html` file in its own tab by visiting `chrome-extension://<ext_id>/popup.html` (remember to replace `<ext_id>` with the extension's ID). You will need to refresh the page while you develop to see the changes you make.
| transformers.js/examples/extension/README.md/0 | {
"file_path": "transformers.js/examples/extension/README.md",
"repo_id": "transformers.js",
"token_count": 624
} |
import { useEffect, useState, useRef } from 'react';
import { AutoTokenizer, MusicgenForConditionalGeneration, BaseStreamer } from '@xenova/transformers';
import { encodeWAV, share } from './utils.js';
import './App.css';
const MODEL_ID = 'Xenova/musicgen-small';
// Adapted from https://huggingface.co/spaces/facebook/MusicGen
const EXAMPLES = [
'80s pop track with bassy drums and synth',
'90s rock song with loud guitars and heavy drums',
'a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130',
'A cheerful country song with acoustic guitars',
'lofi slow bpm electro chill with organic samples',
];
// Enable sharing if running on Hugging Face Spaces
const SHARING_ENABLED = window.location.host.endsWith('.hf.space');
// Streamer to update progress
class CallbackStreamer extends BaseStreamer {
constructor(callback_fn) {
super();
this.callback_fn = callback_fn;
}
put(value) {
return this.callback_fn(value);
}
end() {
return this.callback_fn();
}
}
// Main App component
const App = () => {
// Input/output state
const [textInput, setTextInput] = useState(EXAMPLES[0]);
const [progress, setProgress] = useState(0);
const [loadProgress, setLoadProgress] = useState({});
const [statusText, setStatusText] = useState('Loading model (656MB)...');
const [result, setResult] = useState(null);
const audioRef = useRef(null);
// Model and tokenizer references
const modelPromise = useRef(null);
const tokenizerPromise = useRef(null);
// Generation parameters
const [guidance_scale, setGuidanceScale] = useState(3);
const [temperature, setTemperature] = useState(1);
const [duration, setDuration] = useState(10);
// Load model and tokenizer on first render
useEffect(() => {
modelPromise.current ??= MusicgenForConditionalGeneration.from_pretrained(MODEL_ID, {
progress_callback: (data) => {
if (data.status !== 'progress') return;
setLoadProgress(prev => ({ ...prev, [data.file]: data }))
},
dtype: {
text_encoder: 'q8',
decoder_model_merged: 'q8',
encodec_decode: 'fp32',
},
device: 'wasm',
});
tokenizerPromise.current ??= AutoTokenizer.from_pretrained(MODEL_ID);
}, []);
// Update progress bar based on load progress
useEffect(() => {
const items = Object.values(loadProgress);
if (items.length !== 5) return; // 5 files to load
let loaded = 0;
let total = 0;
for (const data of Object.values(loadProgress)) {
loaded += data.loaded;
total += data.total;
}
const progress = loaded / total;
setProgress(progress);
setStatusText(progress === 1
? 'Ready!'
: `Loading model (${(progress * 100).toFixed()}% of 656MB)...`
);
}, [loadProgress]);
// Function to handle generating music
const generateMusic = async () => {
// Reset audio player and result
audioRef.current.src = '';
setResult(null);
// Get model and tokenizer
const tokenizer = await tokenizerPromise.current;
const model = await modelPromise.current;
// Get number of tokens to match user-specified duration (more intuitive for user)
// 503 tokens -> 10 seconds generated => ~50 tokens per second
// https://huggingface.co/docs/transformers/model_doc/musicgen#generation
const max_length = Math.min(
Math.max(Math.floor(duration * 50), 1) + 4,
model.generation_config.max_length ?? 1500,
);
// Create a streamer to update progress
let num_tokens = 0;
const streamer = new CallbackStreamer((value) => {
const percent = value === undefined ? 1 : ++num_tokens / max_length;
setStatusText(`Generating (${(percent * 100).toFixed()}%)...`);
setProgress(percent);
});
// Tokenize input text
const inputs = tokenizer(textInput);
// Generate music
const audio_values = await model.generate({
// Inputs
...inputs,
// Generation parameters
max_length,
guidance_scale,
temperature,
// Outputs
streamer,
});
setStatusText('Encoding audio...');
// Encode audio values to WAV
const sampling_rate = model.config.audio_encoder.sampling_rate;
const wav = encodeWAV(audio_values.data, sampling_rate);
const blob = new Blob([wav], { type: 'audio/wav' });
setResult(blob);
audioRef.current.src = URL.createObjectURL(blob);
setStatusText('Done!');
};
return (
<div className="container mx-auto p-8">
<h1 className="text-5xl font-bold mb-2">MusicGen Web</h1>
<h2 className="text-2xl font-semibold mb-4">In-browser text-to-music w/ <a className="underline" href="https://github.com/huggingface/transformers.js">🤗 Transformers.js!</a>
</h2>
{/* Text input for user */}
<input
type="text"
placeholder="Describe the music to generate..."
value={textInput}
onChange={(e) => setTextInput(e.target.value)}
className="border border-gray-300 p-2 mb-4 w-full rounded"
/>
{/* Example buttons */}
<div className="mb-4 flex gap-2 justify-center text-sm">
{EXAMPLES.map((example, i) => (
<button key={i} className="bg-blue-500 hover:bg-blue-400 transition-colors duration-100 text-white px-2 py-2 rounded" onClick={(e) => setTextInput(e.target.innerText)}>{example}</button>
))}
</div>
{/* Generation parameters */}
<div className="flex mb-4 justify-center gap-2">
{/* Duration */}
<div>
<label className="block text-sm font-semibold mb-1">Duration</label>
<input type="range" min={1} max={30} value={duration} onChange={(e) => setDuration(e.target.value)} />
<p className="text-sm text-center">{`${duration} second${duration > 1 ? 's' : ''}`}</p>
</div>
{/* Guidance Scale */}
<div className="mr-4">
<label className="block text-sm font-semibold mb-1">Guidance Scale</label>
<input type="range" min={1} max={10} value={guidance_scale} onChange={(e) => setGuidanceScale(e.target.value)} />
<p className="text-sm text-center">{guidance_scale}</p>
</div>
{/* Temperature */}
<div>
<label className="block text-sm font-semibold mb-1">Temperature</label>
<input type="range" min={0.1} max={2} step={0.1} value={temperature} onChange={(e) => setTemperature(e.target.value)} />
<p className="text-sm text-center">{temperature}</p>
</div>
</div>
{/* Button to generate music */}
<button className="mb-4 bg-green-500 hover:bg-green-400 transition-colors duration-100 text-white px-4 py-3 rounded-lg font-semibold" onClick={generateMusic}>Generate Music</button>
{/* Progress bar */}
<div className="mb-4">
<div className="bg-gray-200 h-4 w-full rounded-full">
<div className="bg-blue-500 h-4 rounded-full" style={{ width: `${100 * progress}%` }}></div>
</div>
<p className="text-sm text-center mt-1">{statusText}</p>
</div>
{/* Audio player */}
{<div className="flex justify-center flex-col items-center">
<audio ref={audioRef} controls type="audio/wav" />
{SHARING_ENABLED && result &&
<button
className="bg-red-500 hover:bg-red-400 transition-colors duration-100 text-white px-2 py-1 my-2 rounded-lg text-sm"
onClick={async (e) => {
e.target.disabled = true;
e.target.innerText = 'Uploading...';
await share(result, {
prompt: textInput,
duration,
guidance_scale,
temperature,
});
e.target.disabled = false;
e.target.innerText = 'Share';
}
}>Share</button>
}
</div>}
</div>
);
};
export default App;
| transformers.js/examples/musicgen-web/src/App.jsx/0 | {
"file_path": "transformers.js/examples/musicgen-web/src/App.jsx",
"repo_id": "transformers.js",
"token_count": 3165
} |
'use client'
import { useState, useEffect, useCallback, useRef } from 'react'
import { Modal } from './components/Modal';
import { SearchBar } from './components/SearchBar';
import { ImageGrid } from './components/ImageGrid';
export default function Home() {
// Application state
const [ready, setReady] = useState(null);
const [images, setImages] = useState(null);
const [currentImage, setCurrentImage] = useState(null);
// Create a reference to the worker object.
const worker = useRef(null);
// We use the `useEffect` hook to set up the worker as soon as the `App` component is mounted.
useEffect(() => {
if (!worker.current) {
// Create the worker if it does not yet exist.
worker.current = new Worker(new URL('./worker.js', import.meta.url), {
type: 'module'
});
}
const onMessageReceived = (e) => {
switch (e.data.status) {
case 'initiate':
setReady(false);
break;
case 'ready':
setReady(true);
break;
case 'complete':
setImages(e.data.output);
break;
}
};
// Attach the callback function as an event listener.
worker.current.addEventListener('message', onMessageReceived);
// Define a cleanup function for when the component is unmounted.
return () => worker.current.removeEventListener('message', onMessageReceived);
});
const search = useCallback((text) => {
if (worker.current) {
worker.current.postMessage({ text });
}
}, []);
return (
<main className="mx-auto max-w-[1960px] p-4 relative">
<Modal currentImage={currentImage} setCurrentImage={setCurrentImage} />
<SearchBar search={search} />
{ready === false && (
<div className="z-10 fixed top-0 left-0 w-full h-full bg-black bg-opacity-50 flex items-center justify-center">
<div className="text-white text-2xl font-bold">Loading model and database...</div>
</div>
)}
<ImageGrid images={images} setCurrentImage={setCurrentImage} />
</main>
)
}
| transformers.js/examples/semantic-image-search-client/src/app/page.js/0 | {
"file_path": "transformers.js/examples/semantic-image-search-client/src/app/page.js",
"repo_id": "transformers.js",
"token_count": 772
} |
// Helper script to update the database with image embeddings
import { AutoProcessor, RawImage, CLIPVisionModelWithProjection } from '@xenova/transformers';
import { createClient } from '@supabase/supabase-js'
if (!process.env.SUPABASE_SECRET_KEY) {
throw new Error('Missing `SUPABASE_SECRET_KEY` environment variable.')
}
// Create a single supabase client for interacting with your database
const supabase = createClient(
process.env.SUPABASE_URL,
process.env.SUPABASE_SECRET_KEY,
)
let { data, error } = await supabase
.from('images')
.select('*')
.neq('ignore', true)
.is('image_embedding', null);
if (error) {
throw error;
}
// Load processor and vision model
const model_id = 'Xenova/clip-vit-base-patch16';
const processor = await AutoProcessor.from_pretrained(model_id);
const vision_model = await CLIPVisionModelWithProjection.from_pretrained(model_id, {
quantized: false,
});
for (const image_data of data) {
let image;
try {
image = await RawImage.read(image_data.photo_image_url);
} catch (e) {
// Unable to load image, so we ignore it
console.warn('Ignoring image due to error', e)
await supabase
.from('images')
.update({ ignore: true })
.eq('photo_id', image_data.photo_id)
.select()
continue;
}
// Read image and run processor
let image_inputs = await processor(image);
// Compute embeddings
const { image_embeds } = await vision_model(image_inputs);
const embed_as_list = image_embeds.tolist()[0];
// https://supabase.com/docs/guides/ai/vector-columns#storing-a-vector--embedding
const { data, error } = await supabase
.from('images')
.update({ image_embedding: embed_as_list })
.eq('photo_id', image_data.photo_id)
.select()
if (error) {
console.error('error', error)
} else {
console.log('success', image_data.photo_id)
}
}
| transformers.js/examples/semantic-image-search/scripts/update-database.mjs/0 | {
"file_path": "transformers.js/examples/semantic-image-search/scripts/update-database.mjs",
"repo_id": "transformers.js",
"token_count": 778
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Transformers.js | Real-time object detection</title>
</head>
<body>
<h1>
Real-time object detection w/
<a href="https://github.com/huggingface/transformers.js" target="_blank">🤗 Transformers.js</a>
</h1>
<h4>
Runs locally in your browser, powered by
<a href="https://github.com/WongKinYiu/yolov9" target="_blank">YOLOv9</a>
</h4>
<div id="container">
<video id="video" autoplay muted playsinline></video>
<canvas id="canvas" width="360" height="240"></canvas>
<div id="overlay"></div>
</div>
<div id="controls">
<div>
<label>Image size</label>
(<label id="size-value">128</label>)
<br>
<input id="size" type="range" min="64" max="256" step="32" value="128" disabled>
</div>
<div>
<label>Threshold</label>
(<label id="threshold-value">0.25</label>)
<br>
<input id="threshold" type="range" min="0.01" max="1" step="0.01" value="0.25" disabled>
</div>
<div>
<label>Image scale</label>
(<label id="scale-value">0.5</label>)
<br>
<input id="scale" type="range" min="0" max="1" step="0.01" value="0.5" disabled>
</div>
</div>
<label id="status"></label>
<script type="module" src="/main.js"></script>
</body>
</html>
| transformers.js/examples/video-object-detection/index.html/0 | {
"file_path": "transformers.js/examples/video-object-detection/index.html",
"repo_id": "transformers.js",
"token_count": 608
} |
* {
box-sizing: border-box;
padding: 0;
margin: 0;
font-family: sans-serif;
}
html,
body {
height: 100%;
}
body {
padding: 16px 32px;
}
body,
#container {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
}
#controls {
display: flex;
padding: 1rem;
gap: 1rem;
}
#controls>div {
text-align: center;
}
h1,
h3 {
text-align: center;
}
h3 {
margin-top: 0.5rem;
}
#container {
display: flex;
flex-direction: row;
position: relative;
max-width: 100%;
max-height: 100%;
border: 2px dashed #D1D5DB;
border-radius: 0.75rem;
overflow: hidden;
margin-top: 1rem;
background-size: 100% 100%;
background-position: center;
background-repeat: no-repeat;
}
#video, #output-canvas {
width: 504px;
height: 504px;
}
canvas {
width: 100%;
height: 100%;
}
#status {
min-height: 16px;
margin: 8px 0;
}
| transformers.js/examples/webgpu-video-depth-estimation/style.css/0 | {
"file_path": "transformers.js/examples/webgpu-video-depth-estimation/style.css",
"repo_id": "transformers.js",
"token_count": 372
} |
export default function CrossIcon(props) {
return (
<svg
{...props}
xmlns="http://www.w3.org/2000/svg"
width="24"
height="24"
viewBox="0 0 24 24"
fill="none"
stroke="currentColor"
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
>
<path d="m9.75 9.75 4.5 4.5m0-4.5-4.5 4.5M21 12a9 9 0 1 1-18 0 9 9 0 0 1 18 0Z" />
</svg>
)
}
| transformers.js/examples/webgpu-vlm/src/components/icons/CrossIcon.jsx/0 | {
"file_path": "transformers.js/examples/webgpu-vlm/src/components/icons/CrossIcon.jsx",
"repo_id": "transformers.js",
"token_count": 304
} |
import { useEffect, useState, useRef, useCallback } from 'react';
import Progress from './components/Progress';
import MediaInput from './components/MediaInput';
import Transcript from './components/Transcript';
import LanguageSelector from './components/LanguageSelector';
async function hasWebGPU() {
if (!navigator.gpu) {
return false;
}
try {
const adapter = await navigator.gpu.requestAdapter();
return !!adapter;
} catch (e) {
return false;
}
}
function App() {
// Create a reference to the worker object.
const worker = useRef(null);
// Model loading and progress
const [status, setStatus] = useState(null);
const [loadingMessage, setLoadingMessage] = useState('');
const [progressItems, setProgressItems] = useState([]);
const mediaInputRef = useRef(null);
const [audio, setAudio] = useState(null);
const [language, setLanguage] = useState('en');
const [result, setResult] = useState(null);
const [time, setTime] = useState(null);
const [currentTime, setCurrentTime] = useState(0);
const [device, setDevice] = useState('webgpu'); // Try use WebGPU first
const [modelSize, setModelSize] = useState('gpu' in navigator ? 196 : 77); // WebGPU=196MB, WebAssembly=77MB
useEffect(() => {
hasWebGPU().then((result) => {
setModelSize(result ? 196 : 77);
setDevice(result ? 'webgpu' : 'wasm');
});
}, []);
// We use the `useEffect` hook to setup the worker as soon as the `App` component is mounted.
useEffect(() => {
if (!worker.current) {
// Create the worker if it does not yet exist.
worker.current = new Worker(new URL('./worker.js', import.meta.url), {
type: 'module'
});
}
// Create a callback function for messages from the worker thread.
const onMessageReceived = (e) => {
switch (e.data.status) {
case 'loading':
// Model file start load: add a new progress item to the list.
setStatus('loading');
setLoadingMessage(e.data.data);
break;
case 'initiate':
setProgressItems(prev => [...prev, e.data]);
break;
case 'progress':
// Model file progress: update one of the progress items.
setProgressItems(
prev => prev.map(item => {
if (item.file === e.data.file) {
return { ...item, ...e.data }
}
return item;
})
);
break;
case 'done':
// Model file loaded: remove the progress item from the list.
setProgressItems(
prev => prev.filter(item => item.file !== e.data.file)
);
break;
case 'ready':
// Pipeline ready: the worker is ready to accept messages.
setStatus('ready');
break;
case 'complete':
setResult(e.data.result);
setTime(e.data.time);
setStatus('ready');
break;
}
};
// Attach the callback function as an event listener.
worker.current.addEventListener('message', onMessageReceived);
// Define a cleanup function for when the component is unmounted.
return () => {
worker.current.removeEventListener('message', onMessageReceived);
};
}, []);
const handleClick = useCallback(() => {
setResult(null);
setTime(null);
if (status === null) {
setStatus('loading');
worker.current.postMessage({ type: 'load', data: { device } });
} else {
setStatus('running');
worker.current.postMessage({
type: 'run', data: { audio, language }
});
}
}, [status, audio, language, device]);
return (
<div className="flex flex-col h-screen mx-auto items justify-end text-gray-800 dark:text-gray-200 bg-white dark:bg-gray-900 max-w-[560px]">
{status === 'loading' && (
<div className="flex justify-center items-center fixed w-screen h-screen bg-black z-10 bg-opacity-[92%] top-0 left-0">
<div className="w-[500px]">
<p className="text-center mb-1 text-white text-md">{loadingMessage}</p>
{progressItems.map(({ file, progress, total }, i) => (
<Progress key={i} text={file} percentage={progress} total={total} />
))}
</div>
</div>
)}
<div className="h-full flex justify-center items-center flex-col relative">
<div className="flex flex-col items-center mb-1 text-center">
<h1 className="text-5xl font-bold mb-2">Whisper Timestamped</h1>
<h2 className="text-xl font-semibold">In-browser speech recognition w/ word-level timestamps</h2>
</div>
<div className="w-full min-h-[220px] flex flex-col justify-center items-center p-2">
{
!audio && (
<p className="mb-2">
You are about to download <a href="https://huggingface.co/onnx-community/whisper-base_timestamped" target="_blank" rel="noreferrer" className="font-medium underline">whisper-base (timestamped)</a>,
a 73 million parameter speech recognition model with the ability to generate word-level timestamps across 100 different languages.
Once loaded, the model ({modelSize} MB) will be cached and reused when you revisit the page.<br />
<br />
Everything runs locally in your browser using <a href="https://huggingface.co/docs/transformers.js" target="_blank" rel="noreferrer" className="underline">🤗 Transformers.js</a> and ONNX Runtime Web,
meaning no API calls are made to a server for inference. You can even disconnect from the internet after the model has loaded!
</p>
)
}
<div className="flex flex-col w-full m-3">
<span className="text-sm mb-0.5">Input audio/video</span>
<MediaInput
ref={mediaInputRef}
className="flex items-center border rounded-md cursor-pointer min-h-[100px] max-h-[500px] overflow-hidden"
onInputChange={(result) => setAudio(result)}
onTimeUpdate={(time) => setCurrentTime(time)}
/>
</div>
<div className="relative w-full flex justify-center items-center">
<button
className="border px-4 py-2 rounded-lg bg-blue-400 text-white hover:bg-blue-500 disabled:bg-blue-100 disabled:cursor-not-allowed select-none"
onClick={handleClick}
disabled={status === 'running' || (status !== null && audio === null)}
>
{status === null ? 'Load model' :
status === 'running'
? 'Running...'
: 'Run model'
}
</button>
{status !== null &&
<div className='absolute right-0 bottom-0'>
<span className="text-xs">Language:</span>
<br />
<LanguageSelector className="border rounded-lg p-1 max-w-[100px]" language={language} setLanguage={setLanguage} />
</div>
}
</div>
{
result && time && (
<>
<div className="w-full mt-4 border rounded-md">
<Transcript
className="p-2 max-h-[200px] overflow-y-auto scrollbar-thin select-none"
transcript={result}
currentTime={currentTime}
setCurrentTime={(time) => {
setCurrentTime(time);
mediaInputRef.current.setMediaTime(time);
}}
/>
</div>
<p className="text-sm text-gray-600 text-end p-1">Generation time: <span className="text-gray-800 font-semibold">{time.toFixed(2)}ms</span></p>
</>
)
}
</div>
</div>
</div >
)
}
export default App
| transformers.js/examples/whisper-word-timestamps/src/App.jsx/0 | {
"file_path": "transformers.js/examples/whisper-word-timestamps/src/App.jsx",
"repo_id": "transformers.js",
"token_count": 3492
} |
# Support exporting vision and text models separately:
# Adapted from https://github.com/huggingface/optimum/issues/1186#issuecomment-1637641760
from optimum.exporters.onnx.model_configs import SiglipTextOnnxConfig, ViTOnnxConfig
from typing import Dict
class SiglipVisionOnnxConfig(ViTOnnxConfig):
pass
class SiglipTextModelOnnxConfig(SiglipTextOnnxConfig):
@property
def outputs(self) -> Dict[str, Dict[int, str]]:
return {
"last_hidden_state": {0: "batch_size", 1: "sequence_length"},
"pooler_output": {0: "batch_size"},
}
def generate_dummy_inputs(self, framework: str = "pt", **kwargs):
dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)
if framework == "pt":
import torch
dummy_inputs["input_ids"] = dummy_inputs["input_ids"].to(dtype=torch.int64)
return dummy_inputs
class SiglipVisionModelOnnxConfig(SiglipVisionOnnxConfig):
@property
def outputs(self) -> Dict[str, Dict[int, str]]:
return {
"last_hidden_state": {0: "batch_size"},
"pooler_output": {0: "batch_size"},
}
| transformers.js/scripts/extra/siglip.py/0 | {
"file_path": "transformers.js/scripts/extra/siglip.py",
"repo_id": "transformers.js",
"token_count": 496
} |
Subsets and Splits