text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from collections import defaultdict
import yaml
PATH_TO_TOC = "docs/source/en/_toctree.yml"
def clean_doc_toc(doc_list):
"""
Cleans the table of content of the model documentation by removing duplicates and sorting models alphabetically.
"""
counts = defaultdict(int)
overview_doc = []
new_doc_list = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]})
else:
new_doc_list.append(doc)
doc_list = new_doc_list
duplicates = [key for key, value in counts.items() if value > 1]
new_doc = []
for duplicate_key in duplicates:
titles = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key})
if len(titles) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others."
)
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1])
new_doc = sorted(new_doc, key=lambda s: s["title"].lower())
# "overview" gets special treatment and is always first
if len(overview_doc) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed.")
overview_doc.extend(new_doc)
# Sort
return overview_doc
def check_scheduler_doc(overwrite=False):
with open(PATH_TO_TOC, encoding="utf-8") as f:
content = yaml.safe_load(f.read())
# Get to the API doc
api_idx = 0
while content[api_idx]["title"] != "API":
api_idx += 1
api_doc = content[api_idx]["sections"]
# Then to the model doc
scheduler_idx = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
scheduler_doc = api_doc[scheduler_idx]["sections"]
new_scheduler_doc = clean_doc_toc(scheduler_doc)
diff = False
if new_scheduler_doc != scheduler_doc:
diff = True
if overwrite:
api_doc[scheduler_idx]["sections"] = new_scheduler_doc
if diff:
if overwrite:
content[api_idx]["sections"] = api_doc
with open(PATH_TO_TOC, "w", encoding="utf-8") as f:
f.write(yaml.dump(content, allow_unicode=True))
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this."
)
def check_pipeline_doc(overwrite=False):
with open(PATH_TO_TOC, encoding="utf-8") as f:
content = yaml.safe_load(f.read())
# Get to the API doc
api_idx = 0
while content[api_idx]["title"] != "API":
api_idx += 1
api_doc = content[api_idx]["sections"]
# Then to the model doc
pipeline_idx = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
diff = False
pipeline_docs = api_doc[pipeline_idx]["sections"]
new_pipeline_docs = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
sub_pipeline_doc = pipeline_doc["section"]
new_sub_pipeline_doc = clean_doc_toc(sub_pipeline_doc)
if overwrite:
pipeline_doc["section"] = new_sub_pipeline_doc
new_pipeline_docs.append(pipeline_doc)
# sort overall pipeline doc
new_pipeline_docs = clean_doc_toc(new_pipeline_docs)
if new_pipeline_docs != pipeline_docs:
diff = True
if overwrite:
api_doc[pipeline_idx]["sections"] = new_pipeline_docs
if diff:
if overwrite:
content[api_idx]["sections"] = api_doc
with open(PATH_TO_TOC, "w", encoding="utf-8") as f:
f.write(yaml.dump(content, allow_unicode=True))
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
args = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| diffusers/utils/check_doc_toc.py/0 | {
"file_path": "diffusers/utils/check_doc_toc.py",
"repo_id": "diffusers",
"token_count": 2177
} |
# Copyright 2024 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to close stale issue. Taken in part from the AllenNLP repository.
https://github.com/allenai/allennlp.
"""
import os
from datetime import datetime as dt
from datetime import timezone
from github import Github
LABELS_TO_EXEMPT = [
"close-to-merge",
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("huggingface/diffusers")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
labels = [label.name.lower() for label in issue.get_labels()]
if "stale" in labels:
comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True)
last_comment = comments[0] if len(comments) > 0 else None
if last_comment is not None and last_comment.user.login != "github-actions[bot]":
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open")
issue.remove_from_labels("stale")
elif (
(dt.now(timezone.utc) - issue.updated_at).days > 23
and (dt.now(timezone.utc) - issue.created_at).days >= 30
and not any(label in LABELS_TO_EXEMPT for label in labels)
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored."
)
issue.add_to_labels("stale")
if __name__ == "__main__":
main()
| diffusers/utils/stale.py/0 | {
"file_path": "diffusers/utils/stale.py",
"repo_id": "diffusers",
"token_count": 1008
} |
This tutorial explains how to use [Stretch 3](https://hello-robot.com/stretch-3-product) with LeRobot.
## Setup
Familiarize yourself with Stretch by following its [tutorials](https://docs.hello-robot.com/0.3/getting_started/hello_robot/) (recommended).
To use LeRobot on Stretch, 3 options are available:
- [tethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup)
- [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup)
- ssh directly into Stretch (you will first need to install and configure openssh-server on stretch using one of the two above setups)
## Install LeRobot
On Stretch's CLI, follow these steps:
1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install):
```bash
mkdir -p ~/miniconda3
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
rm ~/miniconda3/miniconda.sh
~/miniconda3/bin/conda init bash
```
2. Comment out these lines in `~/.profile` (this can mess up paths used by conda and ~/.local/bin should already be in your PATH)
```
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/.local/bin" ] ; then
PATH="$HOME/.local/bin:$PATH"
fi
```
3. Restart shell or `source ~/.bashrc`
4. Create and activate a fresh conda environment for lerobot
```bash
conda create -y -n lerobot python=3.10 && conda activate lerobot
```
5. Clone LeRobot:
```bash
git clone https://github.com/huggingface/lerobot.git ~/lerobot
```
6. Install LeRobot with stretch dependencies:
```bash
cd ~/lerobot && pip install -e ".[stretch]"
```
> **Note:** If you get this message, you can ignore it: `ERROR: pip's dependency resolver does not currently take into account all the packages that are installed.`
For Linux only (not Mac), install extra dependencies for recording datasets:
```bash
conda install -y -c conda-forge ffmpeg
pip uninstall -y opencv-python
conda install -y -c conda-forge "opencv>=4.10.0"
```
7. Run a [system check](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#system-check) to make sure your robot is ready:
```bash
stretch_system_check.py
```
> **Note:** You may need to free the "robot process" after booting Stretch by running `stretch_free_robot_process.py`. For more info this Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#turning-off-gamepad-teleoperation).
You should get something like this:
```bash
For use with S T R E T C H (R) from Hello Robot Inc.
---------------------------------------------------------------------
Model = Stretch 3
Tool = DexWrist 3 w/ Gripper
Serial Number = stretch-se3-3054
---- Checking Hardware ----
[Pass] Comms are ready
[Pass] Actuators are ready
[Warn] Sensors not ready (IMU AZ = -10.19 out of range -10.1 to -9.5)
[Pass] Battery voltage is 13.6 V
---- Checking Software ----
[Pass] Ubuntu 22.04 is ready
[Pass] All APT pkgs are setup correctly
[Pass] Firmware is up-to-date
[Pass] Python pkgs are up-to-date
[Pass] ROS2 Humble is ready
```
## Teleoperate, record a dataset and run a policy
**Calibrate (Optional)**
Before operating Stretch, you need to [home](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#homing) it first. Be mindful about giving Stretch some space as this procedure will move the robot's arm and gripper. Now run this command:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=stretch \
--control.type=calibrate
```
This is equivalent to running `stretch_robot_home.py`
> **Note:** If you run any of the LeRobot scripts below and Stretch is not poperly homed, it will automatically home/calibrate first.
**Teleoperate**
Before trying teleoperation, you need activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation).
Now try out teleoperation (see above documentation to learn about the gamepad controls):
```bash
python lerobot/scripts/control_robot.py \
--robot.type=stretch \
--control.type=teleoperate
```
This is essentially the same as running `stretch_gamepad_teleop.py`
**Record a dataset**
Once you're familiar with the gamepad controls and after a bit of practice, you can try to record your first dataset with Stretch.
If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
```bash
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
```
Store your Hugging Face repository name in a variable to run these commands:
```bash
HF_USER=$(huggingface-cli whoami | head -n 1)
echo $HF_USER
```
Record one episode:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=stretch \
--control.type=record \
--control.fps=30 \
--control.single_task="Grasp a lego block and put it in the bin." \
--control.repo_id=${HF_USER}/stretch_test \
--control.tags='["tutorial"]' \
--control.warmup_time_s=5 \
--control.episode_time_s=30 \
--control.reset_time_s=30 \
--control.num_episodes=2 \
--control.push_to_hub=true
```
> **Note:** If you're using ssh to connect to Stretch and run this script, you won't be able to visualize its cameras feed (though they will still be recording). To see the cameras stream, use [tethered](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup) or [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup).
**Replay an episode**
Now try to replay this episode (make sure the robot's initial position is the same):
```bash
python lerobot/scripts/control_robot.py \
--robot.type=stretch \
--control.type=replay \
--control.fps=30 \
--control.repo_id=${HF_USER}/stretch_test \
--control.episode=0
```
Follow [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) to train a policy on your data and run inference on your robot. You will need to adapt the code for Stretch.
> TODO(rcadene, aliberts): Add already setup environment and policy yaml configuration files
If you need help, please reach out on Discord in the channel `#stretch3-mobile-arm`.
| lerobot/examples/8_use_stretch.md/0 | {
"file_path": "lerobot/examples/8_use_stretch.md",
"repo_id": "lerobot",
"token_count": 2135
} |
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Process zarr files formatted like in: https://github.com/real-stanford/diffusion_policy"""
import shutil
from pathlib import Path
import numpy as np
import torch
import tqdm
import zarr
from datasets import Dataset, Features, Image, Sequence, Value
from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub.utils import (
calculate_episode_data_index,
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
hf_transform_to_torch,
)
from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames
def check_format(raw_dir):
zarr_path = raw_dir / "pusht_cchi_v7_replay.zarr"
zarr_data = zarr.open(zarr_path, mode="r")
required_datasets = {
"data/action",
"data/img",
"data/keypoint",
"data/n_contacts",
"data/state",
"meta/episode_ends",
}
for dataset in required_datasets:
assert dataset in zarr_data
nb_frames = zarr_data["data/img"].shape[0]
required_datasets.remove("meta/episode_ends")
assert all(nb_frames == zarr_data[dataset].shape[0] for dataset in required_datasets)
def load_from_raw(
raw_dir: Path,
videos_dir: Path,
fps: int,
video: bool,
episodes: list[int] | None = None,
keypoints_instead_of_image: bool = False,
encoding: dict | None = None,
):
try:
import pymunk
from gym_pusht.envs.pusht import PushTEnv, pymunk_to_shapely
from lerobot.common.datasets.push_dataset_to_hub._diffusion_policy_replay_buffer import (
ReplayBuffer as DiffusionPolicyReplayBuffer,
)
except ModuleNotFoundError as e:
print("`gym_pusht` is not installed. Please install it with `pip install 'lerobot[gym_pusht]'`")
raise e
# as define in gmy-pusht env: https://github.com/huggingface/gym-pusht/blob/e0684ff988d223808c0a9dcfaba9dc4991791370/gym_pusht/envs/pusht.py#L174
success_threshold = 0.95 # 95% coverage,
zarr_path = raw_dir / "pusht_cchi_v7_replay.zarr"
zarr_data = DiffusionPolicyReplayBuffer.copy_from_path(zarr_path)
episode_ids = torch.from_numpy(zarr_data.get_episode_idxs())
assert len(
{zarr_data[key].shape[0] for key in zarr_data.keys()} # noqa: SIM118
), "Some data type dont have the same number of total frames."
# TODO(rcadene): verify that goal pose is expected to be fixed
goal_pos_angle = np.array([256, 256, np.pi / 4]) # x, y, theta (in radians)
goal_body = PushTEnv.get_goal_pose_body(goal_pos_angle)
imgs = torch.from_numpy(zarr_data["img"]) # b h w c
states = torch.from_numpy(zarr_data["state"])
actions = torch.from_numpy(zarr_data["action"])
# load data indices from which each episode starts and ends
from_ids, to_ids = [], []
from_idx = 0
for to_idx in zarr_data.meta["episode_ends"]:
from_ids.append(from_idx)
to_ids.append(to_idx)
from_idx = to_idx
num_episodes = len(from_ids)
ep_dicts = []
ep_ids = episodes if episodes else range(num_episodes)
for ep_idx, selected_ep_idx in tqdm.tqdm(enumerate(ep_ids)):
from_idx = from_ids[selected_ep_idx]
to_idx = to_ids[selected_ep_idx]
num_frames = to_idx - from_idx
# sanity check
assert (episode_ids[from_idx:to_idx] == ep_idx).all()
# get image
if not keypoints_instead_of_image:
image = imgs[from_idx:to_idx]
assert image.min() >= 0.0
assert image.max() <= 255.0
image = image.type(torch.uint8)
# get state
state = states[from_idx:to_idx]
agent_pos = state[:, :2]
block_pos = state[:, 2:4]
block_angle = state[:, 4]
# get reward, success, done, and (maybe) keypoints
reward = torch.zeros(num_frames)
success = torch.zeros(num_frames, dtype=torch.bool)
if keypoints_instead_of_image:
keypoints = torch.zeros(num_frames, 16) # 8 keypoints each with 2 coords
done = torch.zeros(num_frames, dtype=torch.bool)
for i in range(num_frames):
space = pymunk.Space()
space.gravity = 0, 0
space.damping = 0
# Add walls.
walls = [
PushTEnv.add_segment(space, (5, 506), (5, 5), 2),
PushTEnv.add_segment(space, (5, 5), (506, 5), 2),
PushTEnv.add_segment(space, (506, 5), (506, 506), 2),
PushTEnv.add_segment(space, (5, 506), (506, 506), 2),
]
space.add(*walls)
block_body, block_shapes = PushTEnv.add_tee(space, block_pos[i].tolist(), block_angle[i].item())
goal_geom = pymunk_to_shapely(goal_body, block_body.shapes)
block_geom = pymunk_to_shapely(block_body, block_body.shapes)
intersection_area = goal_geom.intersection(block_geom).area
goal_area = goal_geom.area
coverage = intersection_area / goal_area
reward[i] = np.clip(coverage / success_threshold, 0, 1)
success[i] = coverage > success_threshold
if keypoints_instead_of_image:
keypoints[i] = torch.from_numpy(PushTEnv.get_keypoints(block_shapes).flatten())
# last step of demonstration is considered done
done[-1] = True
ep_dict = {}
if not keypoints_instead_of_image:
imgs_array = [x.numpy() for x in image]
img_key = "observation.image"
if video:
# save png images in temporary directory
tmp_imgs_dir = videos_dir / "tmp_images"
save_images_concurrently(imgs_array, tmp_imgs_dir)
# encode images to a mp4 video
fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
video_path = videos_dir / fname
encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
# clean temporary images directory
shutil.rmtree(tmp_imgs_dir)
# store the reference to the video frame
ep_dict[img_key] = [
{"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)
]
else:
ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
ep_dict["observation.state"] = agent_pos
if keypoints_instead_of_image:
ep_dict["observation.environment_state"] = keypoints
ep_dict["action"] = actions[from_idx:to_idx]
ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames, dtype=torch.int64)
ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
# ep_dict["next.observation.image"] = image[1:],
# ep_dict["next.observation.state"] = agent_pos[1:],
# TODO(rcadene)] = verify that reward and done are aligned with image and agent_pos
ep_dict["next.reward"] = torch.cat([reward[1:], reward[[-1]]])
ep_dict["next.done"] = torch.cat([done[1:], done[[-1]]])
ep_dict["next.success"] = torch.cat([success[1:], success[[-1]]])
ep_dicts.append(ep_dict)
data_dict = concatenate_episodes(ep_dicts)
total_frames = data_dict["frame_index"].shape[0]
data_dict["index"] = torch.arange(0, total_frames, 1)
return data_dict
def to_hf_dataset(data_dict, video, keypoints_instead_of_image: bool = False):
features = {}
if not keypoints_instead_of_image:
if video:
features["observation.image"] = VideoFrame()
else:
features["observation.image"] = Image()
features["observation.state"] = Sequence(
length=data_dict["observation.state"].shape[1], feature=Value(dtype="float32", id=None)
)
if keypoints_instead_of_image:
features["observation.environment_state"] = Sequence(
length=data_dict["observation.environment_state"].shape[1],
feature=Value(dtype="float32", id=None),
)
features["action"] = Sequence(
length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None)
)
features["episode_index"] = Value(dtype="int64", id=None)
features["frame_index"] = Value(dtype="int64", id=None)
features["timestamp"] = Value(dtype="float32", id=None)
features["next.reward"] = Value(dtype="float32", id=None)
features["next.done"] = Value(dtype="bool", id=None)
features["next.success"] = Value(dtype="bool", id=None)
features["index"] = Value(dtype="int64", id=None)
hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
hf_dataset.set_transform(hf_transform_to_torch)
return hf_dataset
def from_raw_to_lerobot_format(
raw_dir: Path,
videos_dir: Path,
fps: int | None = None,
video: bool = True,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
# Manually change this to True to use keypoints of the T instead of an image observation (but don't merge
# with True). Also make sure to use video = 0 in the `push_dataset_to_hub.py` script.
keypoints_instead_of_image = False
# sanity check
check_format(raw_dir)
if fps is None:
fps = 10
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, keypoints_instead_of_image, encoding)
hf_dataset = to_hf_dataset(data_dict, video, keypoints_instead_of_image)
episode_data_index = calculate_episode_data_index(hf_dataset)
info = {
"codebase_version": CODEBASE_VERSION,
"fps": fps,
"video": video if not keypoints_instead_of_image else 0,
}
if video:
info["encoding"] = get_default_encoding()
return hf_dataset, episode_data_index, info
| lerobot/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py/0 | {
"file_path": "lerobot/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py",
"repo_id": "lerobot",
"token_count": 4650
} |
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LRScheduler
from lerobot.common.logger import TRAINING_STATE
from lerobot.common.policies.pretrained import PreTrainedPolicy
from lerobot.common.utils.utils import get_global_random_state, set_global_random_state
from lerobot.configs.train import TrainPipelineConfig
def make_optimizer_and_scheduler(
cfg: TrainPipelineConfig, policy: PreTrainedPolicy
) -> tuple[Optimizer, LRScheduler | None]:
"""Generates the optimizer and scheduler based on configs.
Args:
cfg (TrainPipelineConfig): The training config that contains optimizer and scheduler configs
policy (PreTrainedPolicy): The policy config from which parameters and presets must be taken from.
Returns:
tuple[Optimizer, LRScheduler | None]: The couple (Optimizer, Scheduler). Scheduler can be `None`.
"""
params = policy.get_optim_params() if cfg.use_policy_training_preset else policy.parameters()
optimizer = cfg.optimizer.build(params)
lr_scheduler = cfg.scheduler.build(optimizer, cfg.offline.steps) if cfg.scheduler is not None else None
return optimizer, lr_scheduler
def load_training_state(checkpoint_dir: Path, optimizer: Optimizer, scheduler: LRScheduler | None) -> int:
"""
Given the checkpoint directory, load the optimizer state, scheduler state, and random state, and
return the global training step.
"""
# TODO(aliberts): use safetensors instead as weights_only=False is unsafe
training_state = torch.load(checkpoint_dir / TRAINING_STATE, weights_only=False)
optimizer.load_state_dict(training_state["optimizer"])
if scheduler is not None:
scheduler.load_state_dict(training_state["scheduler"])
elif "scheduler" in training_state:
raise ValueError("The checkpoint contains a scheduler state_dict, but no LRScheduler was provided.")
# Small HACK to get the expected keys: use `get_global_random_state`.
set_global_random_state({k: training_state[k] for k in get_global_random_state()})
return training_state["step"], optimizer, scheduler
| lerobot/lerobot/common/optim/factory.py/0 | {
"file_path": "lerobot/lerobot/common/optim/factory.py",
"repo_id": "lerobot",
"token_count": 883
} |
#!/usr/bin/env python
# Copyright 2025 Physical Intelligence and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
π0: A Vision-Language-Action Flow Model for General Robot Control
[Paper](https://www.physicalintelligence.company/download/pi0.pdf)
[Jax code](https://github.com/Physical-Intelligence/openpi)
Designed by Physical Intelligence. Ported from Jax by Hugging Face.
Install pi0 extra dependencies:
```bash
pip install -e ".[pi0]"
```
Example of finetuning the pi0 pretrained model (`pi0_base` in `openpi`):
```bash
python lerobot/scripts/train.py \
--policy.path=lerobot/pi0 \
--dataset.repo_id=danaaubakirova/koch_test
```
Example of finetuning the pi0 neural network with PaliGemma and expert Gemma
pretrained with VLM default parameters before pi0 finetuning:
```bash
python lerobot/scripts/train.py \
--policy.type=pi0 \
--dataset.repo_id=danaaubakirova/koch_test
```
Example of using the pi0 pretrained model outside LeRobot training framework:
```python
policy = Pi0Policy.from_pretrained("lerobot/pi0")
```
"""
import math
from collections import deque
import torch
import torch.nn.functional as F # noqa: N812
from torch import Tensor, nn
from transformers import AutoTokenizer
from lerobot.common.constants import ACTION, OBS_ROBOT
from lerobot.common.policies.normalize import Normalize, Unnormalize
from lerobot.common.policies.pi0.configuration_pi0 import PI0Config
from lerobot.common.policies.pi0.paligemma_with_expert import (
PaliGemmaWithExpertConfig,
PaliGemmaWithExpertModel,
)
from lerobot.common.policies.pretrained import PreTrainedPolicy
from lerobot.common.utils.utils import get_safe_dtype
def create_sinusoidal_pos_embedding(
time: torch.tensor, dimension: int, min_period: float, max_period: float, device="cpu"
) -> Tensor:
"""Computes sine-cosine positional embedding vectors for scalar positions."""
if dimension % 2 != 0:
raise ValueError(f"dimension ({dimension}) must be divisible by 2")
if time.ndim != 1:
raise ValueError("The time tensor is expected to be of shape `(batch_size, )`.")
dtype = get_safe_dtype(torch.float64, device.type)
fraction = torch.linspace(0.0, 1.0, dimension // 2, dtype=dtype, device=device)
period = min_period * (max_period / min_period) ** fraction
# Compute the outer product
scaling_factor = 1.0 / period * 2 * math.pi
sin_input = scaling_factor[None, :] * time[:, None]
pos_emb = torch.cat([torch.sin(sin_input), torch.cos(sin_input)], dim=1)
return pos_emb
def sample_beta(alpha, beta, bsize, device):
gamma1 = torch.empty((bsize,), device=device).uniform_(0, 1).pow(1 / alpha)
gamma2 = torch.empty((bsize,), device=device).uniform_(0, 1).pow(1 / beta)
return gamma1 / (gamma1 + gamma2)
def make_att_2d_masks(pad_masks, att_masks):
"""Copied from big_vision.
Tokens can attend to valid inputs tokens which have a cumulative mask_ar
smaller or equal to theirs. This way `mask_ar` int[B, N] can be used to
setup several types of attention, for example:
[[1 1 1 1 1 1]]: pure causal attention.
[[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between
themselves and the last 3 tokens have a causal attention. The first
entry could also be a 1 without changing behaviour.
[[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a
block can attend all previous blocks and all tokens on the same block.
Args:
input_mask: bool[B, N] true if its part of the input, false if padding.
mask_ar: int32[B, N] mask that's 1 where previous tokens cannot depend on
it and 0 where it shares the same attention mask as the previous token.
"""
if att_masks.ndim != 2:
raise ValueError(att_masks.ndim)
if pad_masks.ndim != 2:
raise ValueError(pad_masks.ndim)
cumsum = torch.cumsum(att_masks, dim=1)
att_2d_masks = cumsum[:, None, :] <= cumsum[:, :, None]
pad_2d_masks = pad_masks[:, None, :] * pad_masks[:, :, None]
att_2d_masks = att_2d_masks & pad_2d_masks
return att_2d_masks
def resize_with_pad(img, width, height, pad_value=-1):
# assume no-op when width height fits already
if img.ndim != 4:
raise ValueError(f"(b,c,h,w) expected, but {img.shape}")
cur_height, cur_width = img.shape[2:]
ratio = max(cur_width / width, cur_height / height)
resized_height = int(cur_height / ratio)
resized_width = int(cur_width / ratio)
resized_img = F.interpolate(
img, size=(resized_height, resized_width), mode="bilinear", align_corners=False
)
pad_height = max(0, int(height - resized_height))
pad_width = max(0, int(width - resized_width))
# pad on left and top of image
padded_img = F.pad(resized_img, (pad_width, 0, pad_height, 0), value=pad_value)
return padded_img
def pad_vector(vector, new_dim):
"""Can be (batch_size x sequence_length x features_dimension)
or (batch_size x features_dimension)
"""
if vector.shape[-1] == new_dim:
return vector
shape = list(vector.shape)
current_dim = shape[-1]
shape[-1] = new_dim
new_vector = torch.zeros(*shape, dtype=vector.dtype, device=vector.device)
new_vector[..., :current_dim] = vector
return new_vector
def normalize(x, min_val, max_val):
return (x - min_val) / (max_val - min_val)
def unnormalize(x, min_val, max_val):
return x * (max_val - min_val) + min_val
def safe_arcsin(value):
# This ensures that the input stays within
# [−1,1] to avoid invalid values for arcsin
return torch.arcsin(torch.clamp(value, -1.0, 1.0))
def aloha_gripper_to_angular(value):
# Aloha transforms the gripper positions into a linear space. The following code
# reverses this transformation to be consistent with pi0 which is pretrained in
# angular space.
#
# These values are coming from the Aloha code:
# PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED
value = unnormalize(value, min_val=0.01844, max_val=0.05800)
# This is the inverse of the angular to linear transformation inside the Interbotix code.
def linear_to_radian(linear_position, arm_length, horn_radius):
value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position)
return safe_arcsin(value)
# The constants are taken from the Interbotix code.
value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022)
# Normalize to [0, 1].
# The values 0.4 and 1.5 were measured on an actual Trossen robot.
return normalize(value, min_val=0.4, max_val=1.5)
def aloha_gripper_from_angular(value):
# Convert from the gripper position used by pi0 to the gripper position that is used by Aloha.
# Note that the units are still angular but the range is different.
# The values 0.4 and 1.5 were measured on an actual Trossen robot.
value = unnormalize(value, min_val=0.4, max_val=1.5)
# These values are coming from the Aloha code:
# PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE
return normalize(value, min_val=-0.6213, max_val=1.4910)
def aloha_gripper_from_angular_inv(value):
# Directly inverts the gripper_from_angular function.
value = unnormalize(value, min_val=-0.6213, max_val=1.4910)
return normalize(value, min_val=0.4, max_val=1.5)
class PI0Policy(PreTrainedPolicy):
"""Wrapper class around PI0FlowMatching model to train and run inference within LeRobot."""
config_class = PI0Config
name = "pi0"
def __init__(
self,
config: PI0Config,
dataset_stats: dict[str, dict[str, Tensor]] | None = None,
):
"""
Args:
config: Policy configuration class instance or None, in which case the default instantiation of
the configuration class is used.
dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected
that they will be passed with a call to `load_state_dict` before the policy is used.
"""
super().__init__(config)
config.validate_features()
self.config = config
self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats)
self.normalize_targets = Normalize(
config.output_features, config.normalization_mapping, dataset_stats
)
self.unnormalize_outputs = Unnormalize(
config.output_features, config.normalization_mapping, dataset_stats
)
self.language_tokenizer = AutoTokenizer.from_pretrained("google/paligemma-3b-pt-224")
self.model = PI0FlowMatching(config)
self.reset()
def reset(self):
"""This should be called whenever the environment is reset."""
self._action_queue = deque([], maxlen=self.config.n_action_steps)
def get_optim_params(self) -> dict:
return self.parameters()
@torch.no_grad
def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor:
"""Select a single action given environment observations.
This method wraps `select_actions` in order to return one action at a time for execution in the
environment. It works by managing the actions in a queue and only calling `select_actions` when the
queue is empty.
"""
self.eval()
if self.config.adapt_to_pi_aloha:
batch[OBS_ROBOT] = self._pi_aloha_decode_state(batch[OBS_ROBOT])
batch = self.normalize_inputs(batch)
# Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by
# querying the policy.
if len(self._action_queue) == 0:
images, img_masks = self.prepare_images(batch)
state = self.prepare_state(batch)
lang_tokens, lang_masks = self.prepare_language(batch)
actions = self.model.sample_actions(
images, img_masks, lang_tokens, lang_masks, state, noise=noise
)
# Unpad actions
original_action_dim = self.config.action_feature.shape[0]
actions = actions[:, :, :original_action_dim]
actions = self.unnormalize_outputs({"action": actions})["action"]
if self.config.adapt_to_pi_aloha:
actions = self._pi_aloha_encode_actions(actions)
# `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue
# effectively has shape (n_action_steps, batch_size, *), hence the transpose.
self._action_queue.extend(actions.transpose(0, 1))
return self._action_queue.popleft()
def forward(self, batch: dict[str, Tensor], noise=None, time=None) -> dict[str, Tensor]:
"""Do a full training forward pass to compute the loss"""
if self.config.adapt_to_pi_aloha:
batch[OBS_ROBOT] = self._pi_aloha_decode_state(batch[OBS_ROBOT])
batch[ACTION] = self._pi_aloha_encode_actions_inv(batch[ACTION])
batch = self.normalize_inputs(batch)
batch = self.normalize_targets(batch)
images, img_masks = self.prepare_images(batch)
state = self.prepare_state(batch)
lang_tokens, lang_masks = self.prepare_language(batch)
actions = self.prepare_action(batch)
actions_is_pad = batch.get("actions_id_pad")
loss_dict = {}
losses = self.model.forward(images, img_masks, lang_tokens, lang_masks, state, actions, noise, time)
loss_dict["losses_after_forward"] = losses.clone()
if actions_is_pad is not None:
in_episode_bound = ~actions_is_pad
losses = losses * in_episode_bound.unsqueeze(-1)
loss_dict["losses_after_in_ep_bound"] = losses.clone()
# Remove padding
losses = losses[:, :, : self.config.max_action_dim]
loss_dict["losses_after_rm_padding"] = losses.clone()
loss = losses.mean()
# For backward pass
loss_dict["loss"] = loss
# For logging
loss_dict["l2_loss"] = loss.item()
return loss_dict
def prepare_images(self, batch):
"""Apply Pi0 preprocessing to the images, like resizing to 224x224 and padding to keep aspect ratio, and
convert pixel range from [0.0, 1.0] to [-1.0, 1.0] as requested by SigLIP.
"""
images = []
img_masks = []
present_img_keys = [key for key in self.config.image_features if key in batch]
missing_img_keys = [key for key in self.config.image_features if key not in batch]
if len(present_img_keys) == 0:
raise ValueError(
f"All image features are missing from the batch. At least one expected. (batch: {batch.keys()}) (image_features:{self.config.image_features})"
)
# Preprocess image features present in the batch
for key in present_img_keys:
img = batch[key]
if self.config.resize_imgs_with_padding is not None:
img = resize_with_pad(img, *self.config.resize_imgs_with_padding, pad_value=0)
# Normalize from range [0,1] to [-1,1] as expacted by siglip
img = img * 2.0 - 1.0
bsize = img.shape[0]
device = img.device
mask = torch.ones(bsize, dtype=torch.bool, device=device)
images.append(img)
img_masks.append(mask)
# Create image features not present in the batch
# as fully 0 padded images.
for num_empty_cameras in range(len(missing_img_keys)):
if num_empty_cameras >= self.config.empty_cameras:
break
img = torch.ones_like(img) * -1
mask = torch.zeros_like(mask)
images.append(img)
img_masks.append(mask)
return images, img_masks
def prepare_language(self, batch) -> tuple[Tensor, Tensor]:
"""Tokenize the text input"""
device = batch[OBS_ROBOT].device
tasks = batch["task"]
# PaliGemma prompt has to end with a new line
tasks = [task if task.endswith("\n") else f"{task}\n" for task in tasks]
tokenized_prompt = self.language_tokenizer.__call__(
tasks,
padding="max_length",
padding_side="right",
max_length=self.config.tokenizer_max_length,
return_tensors="pt",
)
lang_tokens = tokenized_prompt["input_ids"].to(device=device)
lang_masks = tokenized_prompt["attention_mask"].to(device=device, dtype=torch.bool)
return lang_tokens, lang_masks
def _pi_aloha_decode_state(self, state):
# Flip the joints.
for motor_idx in [1, 2, 8, 9]:
state[:, motor_idx] *= -1
# Reverse the gripper transformation that is being applied by the Aloha runtime.
for motor_idx in [6, 13]:
state[:, motor_idx] = aloha_gripper_to_angular(state[:, motor_idx])
return state
def _pi_aloha_encode_actions(self, actions):
# Flip the joints.
for motor_idx in [1, 2, 8, 9]:
actions[:, :, motor_idx] *= -1
# Reverse the gripper transformation that is being applied by the Aloha runtime.
for motor_idx in [6, 13]:
actions[:, :, motor_idx] = aloha_gripper_from_angular(actions[:, :, motor_idx])
return actions
def _pi_aloha_encode_actions_inv(self, actions):
# Flip the joints again.
for motor_idx in [1, 2, 8, 9]:
actions[:, :, motor_idx] *= -1
# Reverse the gripper transformation that is being applied by the Aloha runtime.
for motor_idx in [6, 13]:
actions[:, :, motor_idx] = aloha_gripper_from_angular_inv(actions[:, :, motor_idx])
return actions
def prepare_state(self, batch):
"""Pad state"""
state = pad_vector(batch[OBS_ROBOT], self.config.max_state_dim)
return state
def prepare_action(self, batch):
"""Pad action"""
actions = pad_vector(batch[ACTION], self.config.max_action_dim)
return actions
class PI0FlowMatching(nn.Module):
"""
π0: A Vision-Language-Action Flow Model for General Robot Control
[Paper](https://www.physicalintelligence.company/download/pi0.pdf)
[Jax code](https://github.com/Physical-Intelligence/openpi)
Designed by Physical Intelligence. Ported from Jax by Hugging Face.
┌──────────────────────────────┐
│ actions │
│ ▲ │
│ ┌┴─────┐ │
│ kv cache │Gemma │ │
│ ┌──────────►│Expert│ │
│ │ │ │ │
│ ┌┴────────┐ │x 10 │ │
│ │ │ └▲──▲──┘ │
│ │PaliGemma│ │ │ │
│ │ │ │ robot state │
│ │ │ noise │
│ └▲──▲─────┘ │
│ │ │ │
│ │ image(s) │
│ language tokens │
└──────────────────────────────┘
"""
def __init__(self, config):
super().__init__()
self.config = config
paligemma_with_export_config = PaliGemmaWithExpertConfig(
freeze_vision_encoder=self.config.freeze_vision_encoder,
train_expert_only=self.config.train_expert_only,
attention_implementation=self.config.attention_implementation,
)
self.paligemma_with_expert = PaliGemmaWithExpertModel(paligemma_with_export_config)
# Projections are float32
self.state_proj = nn.Linear(self.config.max_state_dim, self.config.proj_width)
self.action_in_proj = nn.Linear(self.config.max_action_dim, self.config.proj_width)
self.action_out_proj = nn.Linear(self.config.proj_width, self.config.max_action_dim)
self.action_time_mlp_in = nn.Linear(self.config.proj_width * 2, self.config.proj_width)
self.action_time_mlp_out = nn.Linear(self.config.proj_width, self.config.proj_width)
self.set_requires_grad()
def set_requires_grad(self):
for params in self.state_proj.parameters():
params.requires_grad = self.config.train_state_proj
def sample_noise(self, shape, device):
noise = torch.normal(
mean=0.0,
std=1.0,
size=shape,
dtype=torch.float32,
device=device,
)
return noise
def sample_time(self, bsize, device):
time_beta = sample_beta(1.5, 1.0, bsize, device)
time = time_beta * 0.999 + 0.001
return time.to(dtype=torch.float32, device=device)
def embed_prefix(
self, images, img_masks, lang_tokens, lang_masks
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Embed images with SigLIP and language tokens with embedding layer to prepare
for PaliGemma transformer processing.
"""
# TODO: avoid list in python and torch.cat ; prefer pre-allocation with torch.empty
embs = []
pad_masks = []
att_masks = []
# TODO: remove for loop
for (
img,
img_mask,
) in zip(images, img_masks, strict=False):
img_emb = self.paligemma_with_expert.embed_image(img)
img_emb = img_emb.to(dtype=torch.bfloat16)
# Normalize image embeddings
img_emb_dim = img_emb.shape[-1]
img_emb = img_emb * torch.tensor(img_emb_dim**0.5, dtype=img_emb.dtype, device=img_emb.device)
bsize, num_img_embs = img_emb.shape[:2]
img_mask = img_mask[:, None].expand(bsize, num_img_embs)
embs.append(img_emb)
pad_masks.append(img_mask)
# Create attention masks so that image tokens attend to each other
att_masks += [0] * num_img_embs
lang_emb = self.paligemma_with_expert.embed_language_tokens(lang_tokens)
# Normalize language embeddings
lang_emb_dim = lang_emb.shape[-1]
lang_emb = lang_emb * math.sqrt(lang_emb_dim)
embs.append(lang_emb)
pad_masks.append(lang_masks)
# full attention between image and language inputs
num_lang_embs = lang_emb.shape[1]
att_masks += [0] * num_lang_embs
embs = torch.cat(embs, dim=1)
pad_masks = torch.cat(pad_masks, dim=1)
att_masks = torch.tensor(att_masks, dtype=torch.bool, device=pad_masks.device)
att_masks = att_masks[None, :].expand(bsize, len(att_masks))
return embs, pad_masks, att_masks
def embed_suffix(self, state, noisy_actions, timestep):
"""Embed state, noisy_actions, timestep to prepare for Expert Gemma processing."""
embs = []
pad_masks = []
att_masks = []
# Embed state
state_emb = self.state_proj(state)
state_emb = state_emb.to(dtype=torch.bfloat16)
embs.append(state_emb[:, None, :])
bsize = state_emb.shape[0]
dtype = state_emb.dtype
device = state_emb.device
state_mask = torch.ones(bsize, 1, dtype=torch.bool, device=device)
pad_masks.append(state_mask)
# Set attention masks so that image and language inputs do not attend to state or actions
att_masks += [1]
# Embed timestep using sine-cosine positional encoding with sensitivity in the range [0, 1]
time_emb = create_sinusoidal_pos_embedding(
timestep, self.config.proj_width, min_period=4e-3, max_period=4.0, device=device
)
time_emb = time_emb.type(dtype=dtype)
# Fuse timestep + action information using an MLP
action_emb = self.action_in_proj(noisy_actions)
time_emb = time_emb[:, None, :].expand_as(action_emb)
action_time_emb = torch.cat([action_emb, time_emb], dim=2)
action_time_emb = self.action_time_mlp_in(action_time_emb)
action_time_emb = F.silu(action_time_emb) # swish == silu
action_time_emb = self.action_time_mlp_out(action_time_emb)
# Add to input tokens
embs.append(action_time_emb)
bsize, action_time_dim = action_time_emb.shape[:2]
action_time_mask = torch.ones(bsize, action_time_dim, dtype=torch.bool, device=device)
pad_masks.append(action_time_mask)
# Set attention masks so that image, language and state inputs do not attend to action tokens
att_masks += [1] + ([0] * (self.config.n_action_steps - 1))
embs = torch.cat(embs, dim=1)
pad_masks = torch.cat(pad_masks, dim=1)
att_masks = torch.tensor(att_masks, dtype=embs.dtype, device=embs.device)
att_masks = att_masks[None, :].expand(bsize, len(att_masks))
return embs, pad_masks, att_masks
def forward(
self, images, img_masks, lang_tokens, lang_masks, state, actions, noise=None, time=None
) -> Tensor:
"""Do a full training forward pass and compute the loss (batch_size x num_steps x num_motors)"""
if noise is None:
noise = self.sample_noise(actions.shape, actions.device)
if time is None:
time = self.sample_time(actions.shape[0], actions.device)
time_expanded = time[:, None, None]
x_t = time_expanded * noise + (1 - time_expanded) * actions
u_t = noise - actions
prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(
images, img_masks, lang_tokens, lang_masks
)
suffix_embs, suffix_pad_masks, suffix_att_masks = self.embed_suffix(state, x_t, time)
pad_masks = torch.cat([prefix_pad_masks, suffix_pad_masks], dim=1)
att_masks = torch.cat([prefix_att_masks, suffix_att_masks], dim=1)
att_2d_masks = make_att_2d_masks(pad_masks, att_masks)
position_ids = torch.cumsum(pad_masks, dim=1) - 1
(_, suffix_out), _ = self.paligemma_with_expert.forward(
attention_mask=att_2d_masks,
position_ids=position_ids,
past_key_values=None,
inputs_embeds=[prefix_embs, suffix_embs],
use_cache=False,
fill_kv_cache=False,
)
suffix_out = suffix_out[:, -self.config.n_action_steps :]
# Original openpi code, upcast attention output
suffix_out = suffix_out.to(dtype=torch.float32)
v_t = self.action_out_proj(suffix_out)
losses = F.mse_loss(u_t, v_t, reduction="none")
return losses
def sample_actions(self, images, img_masks, lang_tokens, lang_masks, state, noise=None) -> Tensor:
"""Do a full inference forward and compute the action (batch_size x num_steps x num_motors)"""
bsize = state.shape[0]
device = state.device
if noise is None:
actions_shape = (bsize, self.config.n_action_steps, self.config.max_action_dim)
noise = self.sample_noise(actions_shape, device)
prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(
images, img_masks, lang_tokens, lang_masks
)
prefix_att_2d_masks = make_att_2d_masks(prefix_pad_masks, prefix_att_masks)
prefix_position_ids = torch.cumsum(prefix_pad_masks, dim=1) - 1
# Compute image and language key value cache
_, past_key_values = self.paligemma_with_expert.forward(
attention_mask=prefix_att_2d_masks,
position_ids=prefix_position_ids,
past_key_values=None,
inputs_embeds=[prefix_embs, None],
use_cache=self.config.use_cache,
fill_kv_cache=True,
)
dt = -1.0 / self.config.num_steps
dt = torch.tensor(dt, dtype=torch.float32, device=device)
x_t = noise
time = torch.tensor(1.0, dtype=torch.float32, device=device)
while time >= -dt / 2:
expanded_time = time.expand(bsize)
v_t = self.denoise_step(
state,
prefix_pad_masks,
past_key_values,
x_t,
expanded_time,
)
# Euler step
x_t += dt * v_t
time += dt
return x_t
def denoise_step(
self,
state,
prefix_pad_masks,
past_key_values,
x_t,
timestep,
):
"""Apply one denoising step of the noise `x_t` at a given timestep."""
suffix_embs, suffix_pad_masks, suffix_att_masks = self.embed_suffix(state, x_t, timestep)
suffix_len = suffix_pad_masks.shape[1]
batch_size = prefix_pad_masks.shape[0]
prefix_len = prefix_pad_masks.shape[1]
prefix_pad_2d_masks = prefix_pad_masks[:, None, :].expand(batch_size, suffix_len, prefix_len)
suffix_att_2d_masks = make_att_2d_masks(suffix_pad_masks, suffix_att_masks)
full_att_2d_masks = torch.cat([prefix_pad_2d_masks, suffix_att_2d_masks], dim=2)
prefix_offsets = torch.sum(prefix_pad_masks, dim=-1)[:, None]
position_ids = prefix_offsets + torch.cumsum(suffix_pad_masks, dim=1) - 1
outputs_embeds, _ = self.paligemma_with_expert.forward(
attention_mask=full_att_2d_masks,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=[None, suffix_embs],
use_cache=self.config.use_cache,
fill_kv_cache=False,
)
suffix_out = outputs_embeds[1]
suffix_out = suffix_out[:, -self.config.n_action_steps :]
suffix_out = suffix_out.to(dtype=torch.float32)
v_t = self.action_out_proj(suffix_out)
return v_t
| lerobot/lerobot/common/policies/pi0/modeling_pi0.py/0 | {
"file_path": "lerobot/lerobot/common/policies/pi0/modeling_pi0.py",
"repo_id": "lerobot",
"token_count": 12336
} |
import enum
import logging
import math
import time
import traceback
from copy import deepcopy
import numpy as np
import tqdm
from lerobot.common.robot_devices.motors.configs import DynamixelMotorsBusConfig
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
from lerobot.common.utils.utils import capture_timestamp_utc
PROTOCOL_VERSION = 2.0
BAUDRATE = 1_000_000
TIMEOUT_MS = 1000
MAX_ID_RANGE = 252
# The following bounds define the lower and upper joints range (after calibration).
# For joints in degree (i.e. revolute joints), their nominal range is [-180, 180] degrees
# which corresponds to a half rotation on the left and half rotation on the right.
# Some joints might require higher range, so we allow up to [-270, 270] degrees until
# an error is raised.
LOWER_BOUND_DEGREE = -270
UPPER_BOUND_DEGREE = 270
# For joints in percentage (i.e. joints that move linearly like the prismatic joint of a gripper),
# their nominal range is [0, 100] %. For instance, for Aloha gripper, 0% is fully
# closed, and 100% is fully open. To account for slight calibration issue, we allow up to
# [-10, 110] until an error is raised.
LOWER_BOUND_LINEAR = -10
UPPER_BOUND_LINEAR = 110
HALF_TURN_DEGREE = 180
# https://emanual.robotis.com/docs/en/dxl/x/xl330-m077
# https://emanual.robotis.com/docs/en/dxl/x/xl330-m288
# https://emanual.robotis.com/docs/en/dxl/x/xl430-w250
# https://emanual.robotis.com/docs/en/dxl/x/xm430-w350
# https://emanual.robotis.com/docs/en/dxl/x/xm540-w270
# https://emanual.robotis.com/docs/en/dxl/x/xc430-w150
# data_name: (address, size_byte)
X_SERIES_CONTROL_TABLE = {
"Model_Number": (0, 2),
"Model_Information": (2, 4),
"Firmware_Version": (6, 1),
"ID": (7, 1),
"Baud_Rate": (8, 1),
"Return_Delay_Time": (9, 1),
"Drive_Mode": (10, 1),
"Operating_Mode": (11, 1),
"Secondary_ID": (12, 1),
"Protocol_Type": (13, 1),
"Homing_Offset": (20, 4),
"Moving_Threshold": (24, 4),
"Temperature_Limit": (31, 1),
"Max_Voltage_Limit": (32, 2),
"Min_Voltage_Limit": (34, 2),
"PWM_Limit": (36, 2),
"Current_Limit": (38, 2),
"Acceleration_Limit": (40, 4),
"Velocity_Limit": (44, 4),
"Max_Position_Limit": (48, 4),
"Min_Position_Limit": (52, 4),
"Shutdown": (63, 1),
"Torque_Enable": (64, 1),
"LED": (65, 1),
"Status_Return_Level": (68, 1),
"Registered_Instruction": (69, 1),
"Hardware_Error_Status": (70, 1),
"Velocity_I_Gain": (76, 2),
"Velocity_P_Gain": (78, 2),
"Position_D_Gain": (80, 2),
"Position_I_Gain": (82, 2),
"Position_P_Gain": (84, 2),
"Feedforward_2nd_Gain": (88, 2),
"Feedforward_1st_Gain": (90, 2),
"Bus_Watchdog": (98, 1),
"Goal_PWM": (100, 2),
"Goal_Current": (102, 2),
"Goal_Velocity": (104, 4),
"Profile_Acceleration": (108, 4),
"Profile_Velocity": (112, 4),
"Goal_Position": (116, 4),
"Realtime_Tick": (120, 2),
"Moving": (122, 1),
"Moving_Status": (123, 1),
"Present_PWM": (124, 2),
"Present_Current": (126, 2),
"Present_Velocity": (128, 4),
"Present_Position": (132, 4),
"Velocity_Trajectory": (136, 4),
"Position_Trajectory": (140, 4),
"Present_Input_Voltage": (144, 2),
"Present_Temperature": (146, 1),
}
X_SERIES_BAUDRATE_TABLE = {
0: 9_600,
1: 57_600,
2: 115_200,
3: 1_000_000,
4: 2_000_000,
5: 3_000_000,
6: 4_000_000,
}
CALIBRATION_REQUIRED = ["Goal_Position", "Present_Position"]
CONVERT_UINT32_TO_INT32_REQUIRED = ["Goal_Position", "Present_Position"]
MODEL_CONTROL_TABLE = {
"x_series": X_SERIES_CONTROL_TABLE,
"xl330-m077": X_SERIES_CONTROL_TABLE,
"xl330-m288": X_SERIES_CONTROL_TABLE,
"xl430-w250": X_SERIES_CONTROL_TABLE,
"xm430-w350": X_SERIES_CONTROL_TABLE,
"xm540-w270": X_SERIES_CONTROL_TABLE,
"xc430-w150": X_SERIES_CONTROL_TABLE,
}
MODEL_RESOLUTION = {
"x_series": 4096,
"xl330-m077": 4096,
"xl330-m288": 4096,
"xl430-w250": 4096,
"xm430-w350": 4096,
"xm540-w270": 4096,
"xc430-w150": 4096,
}
MODEL_BAUDRATE_TABLE = {
"x_series": X_SERIES_BAUDRATE_TABLE,
"xl330-m077": X_SERIES_BAUDRATE_TABLE,
"xl330-m288": X_SERIES_BAUDRATE_TABLE,
"xl430-w250": X_SERIES_BAUDRATE_TABLE,
"xm430-w350": X_SERIES_BAUDRATE_TABLE,
"xm540-w270": X_SERIES_BAUDRATE_TABLE,
"xc430-w150": X_SERIES_BAUDRATE_TABLE,
}
NUM_READ_RETRY = 10
NUM_WRITE_RETRY = 10
def convert_degrees_to_steps(degrees: float | np.ndarray, models: str | list[str]) -> np.ndarray:
"""This function converts the degree range to the step range for indicating motors rotation.
It assumes a motor achieves a full rotation by going from -180 degree position to +180.
The motor resolution (e.g. 4096) corresponds to the number of steps needed to achieve a full rotation.
"""
resolutions = [MODEL_RESOLUTION[model] for model in models]
steps = degrees / 180 * np.array(resolutions) / 2
steps = steps.astype(int)
return steps
def convert_to_bytes(value, bytes, mock=False):
if mock:
return value
import dynamixel_sdk as dxl
# Note: No need to convert back into unsigned int, since this byte preprocessing
# already handles it for us.
if bytes == 1:
data = [
dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)),
]
elif bytes == 2:
data = [
dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)),
dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)),
]
elif bytes == 4:
data = [
dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)),
dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)),
dxl.DXL_LOBYTE(dxl.DXL_HIWORD(value)),
dxl.DXL_HIBYTE(dxl.DXL_HIWORD(value)),
]
else:
raise NotImplementedError(
f"Value of the number of bytes to be sent is expected to be in [1, 2, 4], but "
f"{bytes} is provided instead."
)
return data
def get_group_sync_key(data_name, motor_names):
group_key = f"{data_name}_" + "_".join(motor_names)
return group_key
def get_result_name(fn_name, data_name, motor_names):
group_key = get_group_sync_key(data_name, motor_names)
rslt_name = f"{fn_name}_{group_key}"
return rslt_name
def get_queue_name(fn_name, data_name, motor_names):
group_key = get_group_sync_key(data_name, motor_names)
queue_name = f"{fn_name}_{group_key}"
return queue_name
def get_log_name(var_name, fn_name, data_name, motor_names):
group_key = get_group_sync_key(data_name, motor_names)
log_name = f"{var_name}_{fn_name}_{group_key}"
return log_name
def assert_same_address(model_ctrl_table, motor_models, data_name):
all_addr = []
all_bytes = []
for model in motor_models:
addr, bytes = model_ctrl_table[model][data_name]
all_addr.append(addr)
all_bytes.append(bytes)
if len(set(all_addr)) != 1:
raise NotImplementedError(
f"At least two motor models use a different address for `data_name`='{data_name}' ({list(zip(motor_models, all_addr, strict=False))}). Contact a LeRobot maintainer."
)
if len(set(all_bytes)) != 1:
raise NotImplementedError(
f"At least two motor models use a different bytes representation for `data_name`='{data_name}' ({list(zip(motor_models, all_bytes, strict=False))}). Contact a LeRobot maintainer."
)
class TorqueMode(enum.Enum):
ENABLED = 1
DISABLED = 0
class DriveMode(enum.Enum):
NON_INVERTED = 0
INVERTED = 1
class CalibrationMode(enum.Enum):
# Joints with rotational motions are expressed in degrees in nominal range of [-180, 180]
DEGREE = 0
# Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100]
LINEAR = 1
class JointOutOfRangeError(Exception):
def __init__(self, message="Joint is out of range"):
self.message = message
super().__init__(self.message)
class DynamixelMotorsBus:
"""
The DynamixelMotorsBus class allows to efficiently read and write to the attached motors. It relies on
the python dynamixel sdk to communicate with the motors. For more info, see the [Dynamixel SDK Documentation](https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20).
A DynamixelMotorsBus instance requires a port (e.g. `DynamixelMotorsBus(port="/dev/tty.usbmodem575E0031751"`)).
To find the port, you can run our utility script:
```bash
python lerobot/scripts/find_motors_bus_port.py
>>> Finding all available ports for the MotorBus.
>>> ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
>>> Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
>>> The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751.
>>> Reconnect the usb cable.
```
Example of usage for 1 motor connected to the bus:
```python
motor_name = "gripper"
motor_index = 6
motor_model = "xl330-m288"
config = DynamixelMotorsBusConfig(
port="/dev/tty.usbmodem575E0031751",
motors={motor_name: (motor_index, motor_model)},
)
motors_bus = DynamixelMotorsBus(config)
motors_bus.connect()
position = motors_bus.read("Present_Position")
# move from a few motor steps as an example
few_steps = 30
motors_bus.write("Goal_Position", position + few_steps)
# when done, consider disconnecting
motors_bus.disconnect()
```
"""
def __init__(
self,
config: DynamixelMotorsBusConfig,
):
self.port = config.port
self.motors = config.motors
self.mock = config.mock
self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE)
self.model_resolution = deepcopy(MODEL_RESOLUTION)
self.port_handler = None
self.packet_handler = None
self.calibration = None
self.is_connected = False
self.group_readers = {}
self.group_writers = {}
self.logs = {}
def connect(self):
if self.is_connected:
raise RobotDeviceAlreadyConnectedError(
f"DynamixelMotorsBus({self.port}) is already connected. Do not call `motors_bus.connect()` twice."
)
if self.mock:
import tests.mock_dynamixel_sdk as dxl
else:
import dynamixel_sdk as dxl
self.port_handler = dxl.PortHandler(self.port)
self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION)
try:
if not self.port_handler.openPort():
raise OSError(f"Failed to open port '{self.port}'.")
except Exception:
traceback.print_exc()
print(
"\nTry running `python lerobot/scripts/find_motors_bus_port.py` to make sure you are using the correct port.\n"
)
raise
# Allow to read and write
self.is_connected = True
self.port_handler.setPacketTimeoutMillis(TIMEOUT_MS)
def reconnect(self):
if self.mock:
import tests.mock_dynamixel_sdk as dxl
else:
import dynamixel_sdk as dxl
self.port_handler = dxl.PortHandler(self.port)
self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION)
if not self.port_handler.openPort():
raise OSError(f"Failed to open port '{self.port}'.")
self.is_connected = True
def are_motors_configured(self):
# Only check the motor indices and not baudrate, since if the motor baudrates are incorrect,
# a ConnectionError will be raised anyway.
try:
return (self.motor_indices == self.read("ID")).all()
except ConnectionError as e:
print(e)
return False
def find_motor_indices(self, possible_ids=None, num_retry=2):
if possible_ids is None:
possible_ids = range(MAX_ID_RANGE)
indices = []
for idx in tqdm.tqdm(possible_ids):
try:
present_idx = self.read_with_motor_ids(self.motor_models, [idx], "ID", num_retry=num_retry)[0]
except ConnectionError:
continue
if idx != present_idx:
# sanity check
raise OSError(
"Motor index used to communicate through the bus is not the same as the one present in the motor memory. The motor memory might be damaged."
)
indices.append(idx)
return indices
def set_bus_baudrate(self, baudrate):
present_bus_baudrate = self.port_handler.getBaudRate()
if present_bus_baudrate != baudrate:
print(f"Setting bus baud rate to {baudrate}. Previously {present_bus_baudrate}.")
self.port_handler.setBaudRate(baudrate)
if self.port_handler.getBaudRate() != baudrate:
raise OSError("Failed to write bus baud rate.")
@property
def motor_names(self) -> list[str]:
return list(self.motors.keys())
@property
def motor_models(self) -> list[str]:
return [model for _, model in self.motors.values()]
@property
def motor_indices(self) -> list[int]:
return [idx for idx, _ in self.motors.values()]
def set_calibration(self, calibration: dict[str, list]):
self.calibration = calibration
def apply_calibration_autocorrect(self, values: np.ndarray | list, motor_names: list[str] | None):
"""This function applies the calibration, automatically detects out of range errors for motors values and attempts to correct.
For more info, see docstring of `apply_calibration` and `autocorrect_calibration`.
"""
try:
values = self.apply_calibration(values, motor_names)
except JointOutOfRangeError as e:
print(e)
self.autocorrect_calibration(values, motor_names)
values = self.apply_calibration(values, motor_names)
return values
def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
"""Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with
a "zero position" at 0 degree.
Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor
rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range.
Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation
when given a goal position that is + or - their resolution. For instance, dynamixel xl330-m077 have a resolution of 4096, and
at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830,
or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor.
To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work
in the centered nominal degree range ]-180, 180[.
"""
if motor_names is None:
motor_names = self.motor_names
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
values = values.astype(np.float32)
for i, name in enumerate(motor_names):
calib_idx = self.calibration["motor_names"].index(name)
calib_mode = self.calibration["calib_mode"][calib_idx]
if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
drive_mode = self.calibration["drive_mode"][calib_idx]
homing_offset = self.calibration["homing_offset"][calib_idx]
_, model = self.motors[name]
resolution = self.model_resolution[model]
# Update direction of rotation of the motor to match between leader and follower.
# In fact, the motor of the leader for a given joint can be assembled in an
# opposite direction in term of rotation than the motor of the follower on the same joint.
if drive_mode:
values[i] *= -1
# Convert from range [-2**31, 2**31] to
# nominal range [-resolution//2, resolution//2] (e.g. [-2048, 2048])
values[i] += homing_offset
# Convert from range [-resolution//2, resolution//2] to
# universal float32 centered degree range [-180, 180]
# (e.g. 2048 / (4096 // 2) * 180 = 180)
values[i] = values[i] / (resolution // 2) * HALF_TURN_DEGREE
if (values[i] < LOWER_BOUND_DEGREE) or (values[i] > UPPER_BOUND_DEGREE):
raise JointOutOfRangeError(
f"Wrong motor position range detected for {name}. "
f"Expected to be in nominal range of [-{HALF_TURN_DEGREE}, {HALF_TURN_DEGREE}] degrees (a full rotation), "
f"with a maximum range of [{LOWER_BOUND_DEGREE}, {UPPER_BOUND_DEGREE}] degrees to account for joints that can rotate a bit more, "
f"but present value is {values[i]} degree. "
"This might be due to a cable connection issue creating an artificial 360 degrees jump in motor values. "
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
)
elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
start_pos = self.calibration["start_pos"][calib_idx]
end_pos = self.calibration["end_pos"][calib_idx]
# Rescale the present position to a nominal range [0, 100] %,
# useful for joints with linear motions like Aloha gripper
values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100
if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR):
raise JointOutOfRangeError(
f"Wrong motor position range detected for {name}. "
f"Expected to be in nominal range of [0, 100] % (a full linear translation), "
f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, "
f"but present value is {values[i]} %. "
"This might be due to a cable connection issue creating an artificial jump in motor values. "
"You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`"
)
return values
def autocorrect_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
"""This function automatically detects issues with values of motors after calibration, and correct for these issues.
Some motors might have values outside of expected maximum bounds after calibration.
For instance, for a joint in degree, its value can be outside [-270, 270] degrees, which is totally unexpected given
a nominal range of [-180, 180] degrees, which represents half a turn to the left or right starting from zero position.
Known issues:
#1: Motor value randomly shifts of a full turn, caused by hardware/connection errors.
#2: Motor internal homing offset is shifted by a full turn, caused by using default calibration (e.g Aloha).
#3: motor internal homing offset is shifted by less or more than a full turn, caused by using default calibration
or by human error during manual calibration.
Issues #1 and #2 can be solved by shifting the calibration homing offset by a full turn.
Issue #3 will be visually detected by user and potentially captured by the safety feature `max_relative_target`,
that will slow down the motor, raise an error asking to recalibrate. Manual recalibrating will solve the issue.
Note: A full turn corresponds to 360 degrees but also to 4096 steps for a motor resolution of 4096.
"""
if motor_names is None:
motor_names = self.motor_names
# Convert from unsigned int32 original range [0, 2**32] to signed float32 range
values = values.astype(np.float32)
for i, name in enumerate(motor_names):
calib_idx = self.calibration["motor_names"].index(name)
calib_mode = self.calibration["calib_mode"][calib_idx]
if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
drive_mode = self.calibration["drive_mode"][calib_idx]
homing_offset = self.calibration["homing_offset"][calib_idx]
_, model = self.motors[name]
resolution = self.model_resolution[model]
# Update direction of rotation of the motor to match between leader and follower.
# In fact, the motor of the leader for a given joint can be assembled in an
# opposite direction in term of rotation than the motor of the follower on the same joint.
if drive_mode:
values[i] *= -1
# Convert from initial range to range [-180, 180] degrees
calib_val = (values[i] + homing_offset) / (resolution // 2) * HALF_TURN_DEGREE
in_range = (calib_val > LOWER_BOUND_DEGREE) and (calib_val < UPPER_BOUND_DEGREE)
# Solve this inequality to find the factor to shift the range into [-180, 180] degrees
# values[i] = (values[i] + homing_offset + resolution * factor) / (resolution // 2) * HALF_TURN_DEGREE
# - HALF_TURN_DEGREE <= (values[i] + homing_offset + resolution * factor) / (resolution // 2) * HALF_TURN_DEGREE <= HALF_TURN_DEGREE
# (- (resolution // 2) - values[i] - homing_offset) / resolution <= factor <= ((resolution // 2) - values[i] - homing_offset) / resolution
low_factor = (-(resolution // 2) - values[i] - homing_offset) / resolution
upp_factor = ((resolution // 2) - values[i] - homing_offset) / resolution
elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
start_pos = self.calibration["start_pos"][calib_idx]
end_pos = self.calibration["end_pos"][calib_idx]
# Convert from initial range to range [0, 100] in %
calib_val = (values[i] - start_pos) / (end_pos - start_pos) * 100
in_range = (calib_val > LOWER_BOUND_LINEAR) and (calib_val < UPPER_BOUND_LINEAR)
# Solve this inequality to find the factor to shift the range into [0, 100] %
# values[i] = (values[i] - start_pos + resolution * factor) / (end_pos + resolution * factor - start_pos - resolution * factor) * 100
# values[i] = (values[i] - start_pos + resolution * factor) / (end_pos - start_pos) * 100
# 0 <= (values[i] - start_pos + resolution * factor) / (end_pos - start_pos) * 100 <= 100
# (start_pos - values[i]) / resolution <= factor <= (end_pos - values[i]) / resolution
low_factor = (start_pos - values[i]) / resolution
upp_factor = (end_pos - values[i]) / resolution
if not in_range:
# Get first integer between the two bounds
if low_factor < upp_factor:
factor = math.ceil(low_factor)
if factor > upp_factor:
raise ValueError(f"No integer found between bounds [{low_factor=}, {upp_factor=}]")
else:
factor = math.ceil(upp_factor)
if factor > low_factor:
raise ValueError(f"No integer found between bounds [{low_factor=}, {upp_factor=}]")
if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
out_of_range_str = f"{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees"
in_range_str = f"{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees"
elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
out_of_range_str = f"{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %"
in_range_str = f"{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %"
logging.warning(
f"Auto-correct calibration of motor '{name}' by shifting value by {abs(factor)} full turns, "
f"from '{out_of_range_str}' to '{in_range_str}'."
)
# A full turn corresponds to 360 degrees but also to 4096 steps for a motor resolution of 4096.
self.calibration["homing_offset"][calib_idx] += resolution * factor
def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None):
"""Inverse of `apply_calibration`."""
if motor_names is None:
motor_names = self.motor_names
for i, name in enumerate(motor_names):
calib_idx = self.calibration["motor_names"].index(name)
calib_mode = self.calibration["calib_mode"][calib_idx]
if CalibrationMode[calib_mode] == CalibrationMode.DEGREE:
drive_mode = self.calibration["drive_mode"][calib_idx]
homing_offset = self.calibration["homing_offset"][calib_idx]
_, model = self.motors[name]
resolution = self.model_resolution[model]
# Convert from nominal 0-centered degree range [-180, 180] to
# 0-centered resolution range (e.g. [-2048, 2048] for resolution=4096)
values[i] = values[i] / HALF_TURN_DEGREE * (resolution // 2)
# Substract the homing offsets to come back to actual motor range of values
# which can be arbitrary.
values[i] -= homing_offset
# Remove drive mode, which is the rotation direction of the motor, to come back to
# actual motor rotation direction which can be arbitrary.
if drive_mode:
values[i] *= -1
elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR:
start_pos = self.calibration["start_pos"][calib_idx]
end_pos = self.calibration["end_pos"][calib_idx]
# Convert from nominal lnear range of [0, 100] % to
# actual motor range of values which can be arbitrary.
values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos
values = np.round(values).astype(np.int32)
return values
def read_with_motor_ids(self, motor_models, motor_ids, data_name, num_retry=NUM_READ_RETRY):
if self.mock:
import tests.mock_dynamixel_sdk as dxl
else:
import dynamixel_sdk as dxl
return_list = True
if not isinstance(motor_ids, list):
return_list = False
motor_ids = [motor_ids]
assert_same_address(self.model_ctrl_table, self.motor_models, data_name)
addr, bytes = self.model_ctrl_table[motor_models[0]][data_name]
group = dxl.GroupSyncRead(self.port_handler, self.packet_handler, addr, bytes)
for idx in motor_ids:
group.addParam(idx)
for _ in range(num_retry):
comm = group.txRxPacket()
if comm == dxl.COMM_SUCCESS:
break
if comm != dxl.COMM_SUCCESS:
raise ConnectionError(
f"Read failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: "
f"{self.packet_handler.getTxRxResult(comm)}"
)
values = []
for idx in motor_ids:
value = group.getData(idx, addr, bytes)
values.append(value)
if return_list:
return values
else:
return values[0]
def read(self, data_name, motor_names: str | list[str] | None = None):
if not self.is_connected:
raise RobotDeviceNotConnectedError(
f"DynamixelMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`."
)
start_time = time.perf_counter()
if self.mock:
import tests.mock_dynamixel_sdk as dxl
else:
import dynamixel_sdk as dxl
if motor_names is None:
motor_names = self.motor_names
if isinstance(motor_names, str):
motor_names = [motor_names]
motor_ids = []
models = []
for name in motor_names:
motor_idx, model = self.motors[name]
motor_ids.append(motor_idx)
models.append(model)
assert_same_address(self.model_ctrl_table, models, data_name)
addr, bytes = self.model_ctrl_table[model][data_name]
group_key = get_group_sync_key(data_name, motor_names)
if data_name not in self.group_readers:
# create new group reader
self.group_readers[group_key] = dxl.GroupSyncRead(
self.port_handler, self.packet_handler, addr, bytes
)
for idx in motor_ids:
self.group_readers[group_key].addParam(idx)
for _ in range(NUM_READ_RETRY):
comm = self.group_readers[group_key].txRxPacket()
if comm == dxl.COMM_SUCCESS:
break
if comm != dxl.COMM_SUCCESS:
raise ConnectionError(
f"Read failed due to communication error on port {self.port} for group_key {group_key}: "
f"{self.packet_handler.getTxRxResult(comm)}"
)
values = []
for idx in motor_ids:
value = self.group_readers[group_key].getData(idx, addr, bytes)
values.append(value)
values = np.array(values)
# Convert to signed int to use range [-2048, 2048] for our motor positions.
if data_name in CONVERT_UINT32_TO_INT32_REQUIRED:
values = values.astype(np.int32)
if data_name in CALIBRATION_REQUIRED and self.calibration is not None:
values = self.apply_calibration_autocorrect(values, motor_names)
# log the number of seconds it took to read the data from the motors
delta_ts_name = get_log_name("delta_timestamp_s", "read", data_name, motor_names)
self.logs[delta_ts_name] = time.perf_counter() - start_time
# log the utc time at which the data was received
ts_utc_name = get_log_name("timestamp_utc", "read", data_name, motor_names)
self.logs[ts_utc_name] = capture_timestamp_utc()
return values
def write_with_motor_ids(self, motor_models, motor_ids, data_name, values, num_retry=NUM_WRITE_RETRY):
if self.mock:
import tests.mock_dynamixel_sdk as dxl
else:
import dynamixel_sdk as dxl
if not isinstance(motor_ids, list):
motor_ids = [motor_ids]
if not isinstance(values, list):
values = [values]
assert_same_address(self.model_ctrl_table, motor_models, data_name)
addr, bytes = self.model_ctrl_table[motor_models[0]][data_name]
group = dxl.GroupSyncWrite(self.port_handler, self.packet_handler, addr, bytes)
for idx, value in zip(motor_ids, values, strict=True):
data = convert_to_bytes(value, bytes, self.mock)
group.addParam(idx, data)
for _ in range(num_retry):
comm = group.txPacket()
if comm == dxl.COMM_SUCCESS:
break
if comm != dxl.COMM_SUCCESS:
raise ConnectionError(
f"Write failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: "
f"{self.packet_handler.getTxRxResult(comm)}"
)
def write(self, data_name, values: int | float | np.ndarray, motor_names: str | list[str] | None = None):
if not self.is_connected:
raise RobotDeviceNotConnectedError(
f"DynamixelMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`."
)
start_time = time.perf_counter()
if self.mock:
import tests.mock_dynamixel_sdk as dxl
else:
import dynamixel_sdk as dxl
if motor_names is None:
motor_names = self.motor_names
if isinstance(motor_names, str):
motor_names = [motor_names]
if isinstance(values, (int, float, np.integer)):
values = [int(values)] * len(motor_names)
values = np.array(values)
motor_ids = []
models = []
for name in motor_names:
motor_idx, model = self.motors[name]
motor_ids.append(motor_idx)
models.append(model)
if data_name in CALIBRATION_REQUIRED and self.calibration is not None:
values = self.revert_calibration(values, motor_names)
values = values.tolist()
assert_same_address(self.model_ctrl_table, models, data_name)
addr, bytes = self.model_ctrl_table[model][data_name]
group_key = get_group_sync_key(data_name, motor_names)
init_group = data_name not in self.group_readers
if init_group:
self.group_writers[group_key] = dxl.GroupSyncWrite(
self.port_handler, self.packet_handler, addr, bytes
)
for idx, value in zip(motor_ids, values, strict=True):
data = convert_to_bytes(value, bytes, self.mock)
if init_group:
self.group_writers[group_key].addParam(idx, data)
else:
self.group_writers[group_key].changeParam(idx, data)
comm = self.group_writers[group_key].txPacket()
if comm != dxl.COMM_SUCCESS:
raise ConnectionError(
f"Write failed due to communication error on port {self.port} for group_key {group_key}: "
f"{self.packet_handler.getTxRxResult(comm)}"
)
# log the number of seconds it took to write the data to the motors
delta_ts_name = get_log_name("delta_timestamp_s", "write", data_name, motor_names)
self.logs[delta_ts_name] = time.perf_counter() - start_time
# TODO(rcadene): should we log the time before sending the write command?
# log the utc time when the write has been completed
ts_utc_name = get_log_name("timestamp_utc", "write", data_name, motor_names)
self.logs[ts_utc_name] = capture_timestamp_utc()
def disconnect(self):
if not self.is_connected:
raise RobotDeviceNotConnectedError(
f"DynamixelMotorsBus({self.port}) is not connected. Try running `motors_bus.connect()` first."
)
if self.port_handler is not None:
self.port_handler.closePort()
self.port_handler = None
self.packet_handler = None
self.group_readers = {}
self.group_writers = {}
self.is_connected = False
def __del__(self):
if getattr(self, "is_connected", False):
self.disconnect()
| lerobot/lerobot/common/robot_devices/motors/dynamixel.py/0 | {
"file_path": "lerobot/lerobot/common/robot_devices/motors/dynamixel.py",
"repo_id": "lerobot",
"token_count": 15815
} |
import datetime as dt
import logging
from dataclasses import dataclass, field
from pathlib import Path
from lerobot.common import envs, policies # noqa: F401
from lerobot.common.utils.utils import auto_select_torch_device, is_amp_available, is_torch_device_available
from lerobot.configs import parser
from lerobot.configs.default import EvalConfig
from lerobot.configs.policies import PreTrainedConfig
from lerobot.configs.train import TrainPipelineConfig
@dataclass
class EvalPipelineConfig:
# Either the repo ID of a model hosted on the Hub or a path to a directory containing weights
# saved using `Policy.save_pretrained`. If not provided, the policy is initialized from scratch
# (useful for debugging). This argument is mutually exclusive with `--config`.
env: envs.EnvConfig
eval: EvalConfig = field(default_factory=EvalConfig)
policy: PreTrainedConfig | None = None
output_dir: Path | None = None
job_name: str | None = None
# TODO(rcadene, aliberts): By default, use device and use_amp values from policy checkpoint.
device: str | None = None # cuda | cpu | mps
# `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP,
# automatic gradient scaling is used.
use_amp: bool = False
seed: int | None = 1000
def __post_init__(self):
# HACK: We parse again the cli args here to get the pretrained path if there was one.
policy_path = parser.get_path_arg("policy")
if policy_path:
cli_overrides = parser.get_cli_overrides("policy")
self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides)
self.policy.pretrained_path = policy_path
# When no device or use_amp are given, use the one from training config.
if self.device is None or self.use_amp is None:
train_cfg = TrainPipelineConfig.from_pretrained(policy_path)
if self.device is None:
self.device = train_cfg.device
if self.use_amp is None:
self.use_amp = train_cfg.use_amp
# Automatically switch to available device if necessary
if not is_torch_device_available(self.device):
auto_device = auto_select_torch_device()
logging.warning(f"Device '{self.device}' is not available. Switching to '{auto_device}'.")
self.device = auto_device
# Automatically deactivate AMP if necessary
if self.use_amp and not is_amp_available(self.device):
logging.warning(
f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP."
)
self.use_amp = False
else:
logging.warning(
"No pretrained path was provided, evaluated policy will be built from scratch (random weights)."
)
if not self.job_name:
if self.env is None:
self.job_name = f"{self.policy.type}"
else:
self.job_name = f"{self.env.type}_{self.policy.type}"
if not self.output_dir:
now = dt.datetime.now()
eval_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}"
self.output_dir = Path("outputs/eval") / eval_dir
if self.device is None:
raise ValueError("Set one of the following device: cuda, cpu or mps")
elif self.device == "cuda" and self.use_amp is None:
raise ValueError("Set 'use_amp' to True or False.")
@classmethod
def __get_path_fields__(cls) -> list[str]:
"""This enables the parser to load config from the policy using `--policy.path=local/dir`"""
return ["policy"]
| lerobot/lerobot/configs/eval.py/0 | {
"file_path": "lerobot/lerobot/configs/eval.py",
"repo_id": "lerobot",
"token_count": 1574
} |
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Visualize effects of image transforms for a given configuration.
This script will generate examples of transformed images as they are output by LeRobot dataset.
Additionally, each individual transform can be visualized separately as well as examples of combined transforms
Example:
```bash
python lerobot/scripts/visualize_image_transforms.py \
--repo_id=lerobot/pusht \
--episodes='[0]' \
--image_transforms.enable=True
```
"""
import logging
from copy import deepcopy
from dataclasses import replace
from pathlib import Path
import draccus
from torchvision.transforms import ToPILImage
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from lerobot.common.datasets.transforms import (
ImageTransforms,
ImageTransformsConfig,
make_transform_from_config,
)
from lerobot.configs.default import DatasetConfig
OUTPUT_DIR = Path("outputs/image_transforms")
to_pil = ToPILImage()
def save_all_transforms(cfg: ImageTransformsConfig, original_frame, output_dir, n_examples):
output_dir_all = output_dir / "all"
output_dir_all.mkdir(parents=True, exist_ok=True)
tfs = ImageTransforms(cfg)
for i in range(1, n_examples + 1):
transformed_frame = tfs(original_frame)
to_pil(transformed_frame).save(output_dir_all / f"{i}.png", quality=100)
print("Combined transforms examples saved to:")
print(f" {output_dir_all}")
def save_each_transform(cfg: ImageTransformsConfig, original_frame, output_dir, n_examples):
if not cfg.enable:
logging.warning(
"No single transforms will be saved, because `image_transforms.enable=False`. To enable, set `enable` to True in `ImageTransformsConfig` or in the command line with `--image_transforms.enable=True`."
)
return
print("Individual transforms examples saved to:")
for tf_name, tf_cfg in cfg.tfs.items():
# Apply a few transformation with random value in min_max range
output_dir_single = output_dir / tf_name
output_dir_single.mkdir(parents=True, exist_ok=True)
tf = make_transform_from_config(tf_cfg)
for i in range(1, n_examples + 1):
transformed_frame = tf(original_frame)
to_pil(transformed_frame).save(output_dir_single / f"{i}.png", quality=100)
# Apply min, max, average transformations
tf_cfg_kwgs_min = deepcopy(tf_cfg.kwargs)
tf_cfg_kwgs_max = deepcopy(tf_cfg.kwargs)
tf_cfg_kwgs_avg = deepcopy(tf_cfg.kwargs)
for key, (min_, max_) in tf_cfg.kwargs.items():
avg = (min_ + max_) / 2
tf_cfg_kwgs_min[key] = [min_, min_]
tf_cfg_kwgs_max[key] = [max_, max_]
tf_cfg_kwgs_avg[key] = [avg, avg]
tf_min = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_min}))
tf_max = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_max}))
tf_avg = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_avg}))
tf_frame_min = tf_min(original_frame)
tf_frame_max = tf_max(original_frame)
tf_frame_avg = tf_avg(original_frame)
to_pil(tf_frame_min).save(output_dir_single / "min.png", quality=100)
to_pil(tf_frame_max).save(output_dir_single / "max.png", quality=100)
to_pil(tf_frame_avg).save(output_dir_single / "mean.png", quality=100)
print(f" {output_dir_single}")
@draccus.wrap()
def visualize_image_transforms(cfg: DatasetConfig, output_dir: Path = OUTPUT_DIR, n_examples: int = 5):
dataset = LeRobotDataset(
repo_id=cfg.repo_id,
episodes=cfg.episodes,
local_files_only=cfg.local_files_only,
video_backend=cfg.video_backend,
)
output_dir = output_dir / cfg.repo_id.split("/")[-1]
output_dir.mkdir(parents=True, exist_ok=True)
# Get 1st frame from 1st camera of 1st episode
original_frame = dataset[0][dataset.meta.camera_keys[0]]
to_pil(original_frame).save(output_dir / "original_frame.png", quality=100)
print("\nOriginal frame saved to:")
print(f" {output_dir / 'original_frame.png'}.")
save_all_transforms(cfg.image_transforms, original_frame, output_dir, n_examples)
save_each_transform(cfg.image_transforms, original_frame, output_dir, n_examples)
if __name__ == "__main__":
visualize_image_transforms()
| lerobot/lerobot/scripts/visualize_image_transforms.py/0 | {
"file_path": "lerobot/lerobot/scripts/visualize_image_transforms.py",
"repo_id": "lerobot",
"token_count": 1939
} |
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from safetensors.torch import save_file
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from lerobot.common.datasets.transforms import (
ImageTransformConfig,
ImageTransforms,
ImageTransformsConfig,
make_transform_from_config,
)
from lerobot.common.utils.utils import seeded_context
ARTIFACT_DIR = Path("tests/data/save_image_transforms_to_safetensors")
DATASET_REPO_ID = "lerobot/aloha_mobile_shrimp"
def save_default_config_transform(original_frame: torch.Tensor, output_dir: Path):
cfg = ImageTransformsConfig(enable=True)
default_tf = ImageTransforms(cfg)
with seeded_context(1337):
img_tf = default_tf(original_frame)
save_file({"default": img_tf}, output_dir / "default_transforms.safetensors")
def save_single_transforms(original_frame: torch.Tensor, output_dir: Path):
transforms = {
("ColorJitter", "brightness", [(0.5, 0.5), (2.0, 2.0)]),
("ColorJitter", "contrast", [(0.5, 0.5), (2.0, 2.0)]),
("ColorJitter", "saturation", [(0.5, 0.5), (2.0, 2.0)]),
("ColorJitter", "hue", [(-0.25, -0.25), (0.25, 0.25)]),
("SharpnessJitter", "sharpness", [(0.5, 0.5), (2.0, 2.0)]),
}
frames = {"original_frame": original_frame}
for tf_type, tf_name, min_max_values in transforms.items():
for min_max in min_max_values:
tf_cfg = ImageTransformConfig(type=tf_type, kwargs={tf_name: min_max})
tf = make_transform_from_config(tf_cfg)
key = f"{tf_name}_{min_max[0]}_{min_max[1]}"
frames[key] = tf(original_frame)
save_file(frames, output_dir / "single_transforms.safetensors")
def main():
dataset = LeRobotDataset(DATASET_REPO_ID, episodes=[0], image_transforms=None)
output_dir = Path(ARTIFACT_DIR)
output_dir.mkdir(parents=True, exist_ok=True)
original_frame = dataset[0][dataset.meta.camera_keys[0]]
save_single_transforms(original_frame, output_dir)
save_default_config_transform(original_frame, output_dir)
if __name__ == "__main__":
main()
| lerobot/tests/scripts/save_image_transforms_to_safetensors.py/0 | {
"file_path": "lerobot/tests/scripts/save_image_transforms_to_safetensors.py",
"repo_id": "lerobot",
"token_count": 1043
} |
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datasets import Dataset
from lerobot.common.datasets.push_dataset_to_hub.utils import calculate_episode_data_index
from lerobot.common.datasets.sampler import EpisodeAwareSampler
from lerobot.common.datasets.utils import (
hf_transform_to_torch,
)
def test_drop_n_first_frames():
dataset = Dataset.from_dict(
{
"timestamp": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
"index": [0, 1, 2, 3, 4, 5],
"episode_index": [0, 0, 1, 2, 2, 2],
},
)
dataset.set_transform(hf_transform_to_torch)
episode_data_index = calculate_episode_data_index(dataset)
sampler = EpisodeAwareSampler(episode_data_index, drop_n_first_frames=1)
assert sampler.indices == [1, 4, 5]
assert len(sampler) == 3
assert list(sampler) == [1, 4, 5]
def test_drop_n_last_frames():
dataset = Dataset.from_dict(
{
"timestamp": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
"index": [0, 1, 2, 3, 4, 5],
"episode_index": [0, 0, 1, 2, 2, 2],
},
)
dataset.set_transform(hf_transform_to_torch)
episode_data_index = calculate_episode_data_index(dataset)
sampler = EpisodeAwareSampler(episode_data_index, drop_n_last_frames=1)
assert sampler.indices == [0, 3, 4]
assert len(sampler) == 3
assert list(sampler) == [0, 3, 4]
def test_episode_indices_to_use():
dataset = Dataset.from_dict(
{
"timestamp": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
"index": [0, 1, 2, 3, 4, 5],
"episode_index": [0, 0, 1, 2, 2, 2],
},
)
dataset.set_transform(hf_transform_to_torch)
episode_data_index = calculate_episode_data_index(dataset)
sampler = EpisodeAwareSampler(episode_data_index, episode_indices_to_use=[0, 2])
assert sampler.indices == [0, 1, 3, 4, 5]
assert len(sampler) == 5
assert list(sampler) == [0, 1, 3, 4, 5]
def test_shuffle():
dataset = Dataset.from_dict(
{
"timestamp": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
"index": [0, 1, 2, 3, 4, 5],
"episode_index": [0, 0, 1, 2, 2, 2],
},
)
dataset.set_transform(hf_transform_to_torch)
episode_data_index = calculate_episode_data_index(dataset)
sampler = EpisodeAwareSampler(episode_data_index, shuffle=False)
assert sampler.indices == [0, 1, 2, 3, 4, 5]
assert len(sampler) == 6
assert list(sampler) == [0, 1, 2, 3, 4, 5]
sampler = EpisodeAwareSampler(episode_data_index, shuffle=True)
assert sampler.indices == [0, 1, 2, 3, 4, 5]
assert len(sampler) == 6
assert set(sampler) == {0, 1, 2, 3, 4, 5}
| lerobot/tests/test_sampler.py/0 | {
"file_path": "lerobot/tests/test_sampler.py",
"repo_id": "lerobot",
"token_count": 1429
} |
[isort]
default_section = FIRSTPARTY
ensure_newline_before_comments = True
force_grid_wrap = 0
include_trailing_comma = True
known_first_party = open_r1
known_third_party =
transformers
datasets
fugashi
git
h5py
matplotlib
nltk
numpy
packaging
pandas
psutil
pytest
rouge_score
sacrebleu
seqeval
sklearn
streamlit
torch
tqdm
line_length = 119
lines_after_imports = 2
multi_line_output = 3
use_parentheses = True
[flake8]
ignore = E203, E501, E741, W503, W605
max-line-length = 119
per-file-ignores =
# imported but unused
__init__.py: F401
[tool:pytest]
doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS | open-r1/setup.cfg/0 | {
"file_path": "open-r1/setup.cfg",
"repo_id": "open-r1",
"token_count": 300
} |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Push the details from a LightEval run to the Hub.
Usage:
python src/open_r1/utils/upload_details.py \
--data_files {path_to_parquet_file} \
--hub_repo_id {hub_repo_id} \
--config_name {config_name}
"""
from dataclasses import dataclass, field
from typing import List
from datasets import load_dataset
from transformers import HfArgumentParser
@dataclass
class ScriptArguments:
data_files: List[str] = field(default_factory=list)
hub_repo_id: str = None
config_name: str = None
def main():
parser = HfArgumentParser(ScriptArguments)
args = parser.parse_args_into_dataclasses()[0]
if all(file.endswith(".json") for file in args.data_files):
ds = load_dataset("json", data_files=args.data_files)
elif all(file.endswith(".jsonl") for file in args.data_files):
ds = load_dataset("json", data_files=args.data_files)
else:
ds = load_dataset("parquet", data_files=args.data_files)
url = ds.push_to_hub(args.hub_repo_id, config_name=args.config_name, private=True)
print(f"Dataset available at: {url}")
if __name__ == "__main__":
main()
| open-r1/src/open_r1/utils/upload_details.py/0 | {
"file_path": "open-r1/src/open_r1/utils/upload_details.py",
"repo_id": "open-r1",
"token_count": 615
} |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# LoRA
Low-Rank Adaptation ([LoRA](https://huggingface.co/papers/2309.15223)) is a PEFT method that decomposes a large matrix into two smaller low-rank matrices in the attention layers. This drastically reduces the number of parameters that need to be fine-tuned.
The abstract from the paper is:
*We propose a neural language modeling system based on low-rank adaptation (LoRA) for speech recognition output rescoring. Although pretrained language models (LMs) like BERT have shown superior performance in second-pass rescoring, the high computational cost of scaling up the pretraining stage and adapting the pretrained models to specific domains limit their practical use in rescoring. Here we present a method based on low-rank decomposition to train a rescoring BERT model and adapt it to new domains using only a fraction (0.08%) of the pretrained parameters. These inserted matrices are optimized through a discriminative training objective along with a correlation-based regularization loss. The proposed low-rank adaptation Rescore-BERT (LoRB) architecture is evaluated on LibriSpeech and internal datasets with decreased training times by factors between 5.4 and 3.6.*.
## LoraConfig
[[autodoc]] tuners.lora.config.LoraConfig
## LoraModel
[[autodoc]] tuners.lora.model.LoraModel
## Utility
### LoftQ
[[autodoc]] utils.loftq_utils.replace_lora_weights_loftq
### Eva
#### EvaConfig
[[autodoc]] tuners.lora.config.EvaConfig
#### initialize_lora_eva_weights
[[autodoc]] tuners.lora.eva.initialize_lora_eva_weights
#### get_eva_state_dict
[[autodoc]] tuners.lora.eva.get_eva_state_dict
| peft/docs/source/package_reference/lora.md/0 | {
"file_path": "peft/docs/source/package_reference/lora.md",
"repo_id": "peft",
"token_count": 627
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# LoRA methods
A popular way to efficiently train large models is to insert (typically in the attention blocks) smaller trainable matrices that are a low-rank decomposition of the delta weight matrix to be learnt during finetuning. The pretrained model's original weight matrix is frozen and only the smaller matrices are updated during training. This reduces the number of trainable parameters, reducing memory usage and training time which can be very expensive for large models.
There are several different ways to express the weight matrix as a low-rank decomposition, but [Low-Rank Adaptation (LoRA)](../conceptual_guides/adapter#low-rank-adaptation-lora) is the most common method. The PEFT library supports several other LoRA variants, such as [Low-Rank Hadamard Product (LoHa)](../conceptual_guides/adapter#low-rank-hadamard-product-loha), [Low-Rank Kronecker Product (LoKr)](../conceptual_guides/adapter#low-rank-kronecker-product-lokr), and [Adaptive Low-Rank Adaptation (AdaLoRA)](../conceptual_guides/adapter#adaptive-low-rank-adaptation-adalora). You can learn more about how these methods work conceptually in the [Adapters](../conceptual_guides/adapter) guide. If you're interested in applying these methods to other tasks and use cases like semantic segmentation, token classification, take a look at our [notebook collection](https://huggingface.co/collections/PEFT/notebooks-6573b28b33e5a4bf5b157fc1)!
Additionally, PEFT supports the [X-LoRA](../conceptual_guides/adapter#mixture-of-lora-experts-x-lora) Mixture of LoRA Experts method.
This guide will show you how to quickly train an image classification model - with a low-rank decomposition method - to identify the class of food shown in an image.
<Tip>
Some familiarity with the general process of training an image classification model would be really helpful and allow you to focus on the low-rank decomposition methods. If you're new, we recommend taking a look at the [Image classification](https://huggingface.co/docs/transformers/tasks/image_classification) guide first from the Transformers documentation. When you're ready, come back and see how easy it is to drop PEFT in to your training!
</Tip>
Before you begin, make sure you have all the necessary libraries installed.
```bash
pip install -q peft transformers datasets
```
## Dataset
In this guide, you'll use the [Food-101](https://huggingface.co/datasets/food101) dataset which contains images of 101 food classes (take a look at the [dataset viewer](https://huggingface.co/datasets/food101/viewer/default/train) to get a better idea of what the dataset looks like).
Load the dataset with the [`~datasets.load_dataset`] function.
```py
from datasets import load_dataset
ds = load_dataset("food101")
```
Each food class is labeled with an integer, so to make it easier to understand what these integers represent, you'll create a `label2id` and `id2label` dictionary to map the integer to its class label.
```py
labels = ds["train"].features["label"].names
label2id, id2label = dict(), dict()
for i, label in enumerate(labels):
label2id[label] = i
id2label[i] = label
id2label[2]
"baklava"
```
Load an image processor to properly resize and normalize the pixel values of the training and evaluation images.
```py
from transformers import AutoImageProcessor
image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
```
You can also use the image processor to prepare some transformation functions for data augmentation and pixel scaling.
```py
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std)
train_transforms = Compose(
[
RandomResizedCrop(image_processor.size["height"]),
RandomHorizontalFlip(),
ToTensor(),
normalize,
]
)
val_transforms = Compose(
[
Resize(image_processor.size["height"]),
CenterCrop(image_processor.size["height"]),
ToTensor(),
normalize,
]
)
def preprocess_train(example_batch):
example_batch["pixel_values"] = [train_transforms(image.convert("RGB")) for image in example_batch["image"]]
return example_batch
def preprocess_val(example_batch):
example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]]
return example_batch
```
Define the training and validation datasets, and use the [`~datasets.Dataset.set_transform`] function to apply the transformations on-the-fly.
```py
train_ds = ds["train"]
val_ds = ds["validation"]
train_ds.set_transform(preprocess_train)
val_ds.set_transform(preprocess_val)
```
Finally, you'll need a data collator to create a batch of training and evaluation data and convert the labels to `torch.tensor` objects.
```py
import torch
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
labels = torch.tensor([example["label"] for example in examples])
return {"pixel_values": pixel_values, "labels": labels}
```
## Model
Now let's load a pretrained model to use as the base model. This guide uses the [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) model, but you can use any image classification model you want. Pass the `label2id` and `id2label` dictionaries to the model so it knows how to map the integer labels to their class labels, and you can optionally pass the `ignore_mismatched_sizes=True` parameter if you're finetuning a checkpoint that has already been finetuned.
```py
from transformers import AutoModelForImageClassification, TrainingArguments, Trainer
model = AutoModelForImageClassification.from_pretrained(
"google/vit-base-patch16-224-in21k",
label2id=label2id,
id2label=id2label,
ignore_mismatched_sizes=True,
)
```
### PEFT configuration and model
Every PEFT method requires a configuration that holds all the parameters specifying how the PEFT method should be applied. Once the configuration is setup, pass it to the [`~peft.get_peft_model`] function along with the base model to create a trainable [`PeftModel`].
<Tip>
Call the [`~PeftModel.print_trainable_parameters`] method to compare the number of parameters of [`PeftModel`] versus the number of parameters in the base model!
</Tip>
<hfoptions id="loras">
<hfoption id="LoRA">
[LoRA](../conceptual_guides/adapter#low-rank-adaptation-lora) decomposes the weight update matrix into *two* smaller matrices. The size of these low-rank matrices is determined by its *rank* or `r`. A higher rank means the model has more parameters to train, but it also means the model has more learning capacity. You'll also want to specify the `target_modules` which determine where the smaller matrices are inserted. For this guide, you'll target the *query* and *value* matrices of the attention blocks. Other important parameters to set are `lora_alpha` (scaling factor), `bias` (whether `none`, `all` or only the LoRA bias parameters should be trained), and `modules_to_save` (the modules apart from the LoRA layers to be trained and saved). All of these parameters - and more - are found in the [`LoraConfig`].
```py
from peft import LoraConfig, get_peft_model
config = LoraConfig(
r=16,
lora_alpha=16,
target_modules=["query", "value"],
lora_dropout=0.1,
bias="none",
modules_to_save=["classifier"],
)
model = get_peft_model(model, config)
model.print_trainable_parameters()
"trainable params: 667,493 || all params: 86,543,818 || trainable%: 0.7712775047664294"
```
</hfoption>
<hfoption id="LoHa">
[LoHa](../conceptual_guides/adapter#low-rank-hadamard-product-loha) decomposes the weight update matrix into *four* smaller matrices and each pair of smaller matrices is combined with the Hadamard product. This allows the weight update matrix to keep the same number of trainable parameters when compared to LoRA, but with a higher rank (`r^2` for LoHA when compared to `2*r` for LoRA). The size of the smaller matrices is determined by its *rank* or `r`. You'll also want to specify the `target_modules` which determines where the smaller matrices are inserted. For this guide, you'll target the *query* and *value* matrices of the attention blocks. Other important parameters to set are `alpha` (scaling factor), and `modules_to_save` (the modules apart from the LoHa layers to be trained and saved). All of these parameters - and more - are found in the [`LoHaConfig`].
```py
from peft import LoHaConfig, get_peft_model
config = LoHaConfig(
r=16,
alpha=16,
target_modules=["query", "value"],
module_dropout=0.1,
modules_to_save=["classifier"],
)
model = get_peft_model(model, config)
model.print_trainable_parameters()
"trainable params: 1,257,317 || all params: 87,133,642 || trainable%: 1.4429753779831676"
```
</hfoption>
<hfoption id="LoKr">
[LoKr](../conceptual_guides/adapter#low-rank-kronecker-product-lokr) expresses the weight update matrix as a decomposition of a Kronecker product, creating a block matrix that is able to preserve the rank of the original weight matrix. The size of the smaller matrices are determined by its *rank* or `r`. You'll also want to specify the `target_modules` which determines where the smaller matrices are inserted. For this guide, you'll target the *query* and *value* matrices of the attention blocks. Other important parameters to set are `alpha` (scaling factor), and `modules_to_save` (the modules apart from the LoKr layers to be trained and saved). All of these parameters - and more - are found in the [`LoKrConfig`].
```py
from peft import LoKrConfig, get_peft_model
config = LoKrConfig(
r=16,
alpha=16,
target_modules=["query", "value"],
module_dropout=0.1,
modules_to_save=["classifier"],
)
model = get_peft_model(model, config)
model.print_trainable_parameters()
"trainable params: 116,069 || all params: 87,172,042 || trainable%: 0.13314934162033282"
```
</hfoption>
<hfoption id="AdaLoRA">
[AdaLoRA](../conceptual_guides/adapter#adaptive-low-rank-adaptation-adalora) efficiently manages the LoRA parameter budget by assigning important weight matrices more parameters and pruning less important ones. In contrast, LoRA evenly distributes parameters across all modules. You can control the average desired *rank* or `r` of the matrices, and which modules to apply AdaLoRA to with `target_modules`. Other important parameters to set are `lora_alpha` (scaling factor), and `modules_to_save` (the modules apart from the AdaLoRA layers to be trained and saved). All of these parameters - and more - are found in the [`AdaLoraConfig`].
```py
from peft import AdaLoraConfig, get_peft_model
config = AdaLoraConfig(
r=8,
init_r=12,
tinit=200,
tfinal=1000,
deltaT=10,
target_modules=["query", "value"],
modules_to_save=["classifier"],
)
model = get_peft_model(model, config)
model.print_trainable_parameters()
"trainable params: 520,325 || all params: 87,614,722 || trainable%: 0.5938785036606062"
```
</hfoption>
</hfoptions>
### Training
For training, let's use the [`~transformers.Trainer`] class from Transformers. The [`Trainer`] contains a PyTorch training loop, and when you're ready, call [`~transformers.Trainer.train`] to start training. To customize the training run, configure the training hyperparameters in the [`~transformers.TrainingArguments`] class. With LoRA-like methods, you can afford to use a higher batch size and learning rate.
> [!WARNING]
> AdaLoRA has an [`~AdaLoraModel.update_and_allocate`] method that should be called at each training step to update the parameter budget and mask, otherwise the adaptation step is not performed. This requires writing a custom training loop or subclassing the [`~transformers.Trainer`] to incorporate this method. As an example, take a look at this [custom training loop](https://github.com/huggingface/peft/blob/912ad41e96e03652cabf47522cd876076f7a0c4f/examples/conditional_generation/peft_adalora_seq2seq.py#L120).
```py
from transformers import TrainingArguments, Trainer
account = "stevhliu"
peft_model_id = f"{account}/google/vit-base-patch16-224-in21k-lora"
batch_size = 128
args = TrainingArguments(
peft_model_id,
remove_unused_columns=False,
eval_strategy="epoch",
save_strategy="epoch",
learning_rate=5e-3,
per_device_train_batch_size=batch_size,
gradient_accumulation_steps=4,
per_device_eval_batch_size=batch_size,
fp16=True,
num_train_epochs=5,
logging_steps=10,
load_best_model_at_end=True,
label_names=["labels"],
)
```
Begin training with [`~transformers.Trainer.train`].
```py
trainer = Trainer(
model,
args,
train_dataset=train_ds,
eval_dataset=val_ds,
tokenizer=image_processor,
data_collator=collate_fn,
)
trainer.train()
```
## Share your model
Once training is complete, you can upload your model to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method. You’ll need to login to your Hugging Face account first and enter your token when prompted.
```py
from huggingface_hub import notebook_login
notebook_login()
```
Call [`~transformers.PreTrainedModel.push_to_hub`] to save your model to your repositoy.
```py
model.push_to_hub(peft_model_id)
```
## Inference
Let's load the model from the Hub and test it out on a food image.
```py
from peft import PeftConfig, PeftModel
from transformers import AutoImageProcessor
from PIL import Image
import requests
config = PeftConfig.from_pretrained("stevhliu/vit-base-patch16-224-in21k-lora")
model = AutoModelForImageClassification.from_pretrained(
config.base_model_name_or_path,
label2id=label2id,
id2label=id2label,
ignore_mismatched_sizes=True,
)
model = PeftModel.from_pretrained(model, "stevhliu/vit-base-patch16-224-in21k-lora")
url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/beignets.jpeg"
image = Image.open(requests.get(url, stream=True).raw)
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/beignets.jpeg">
</div>
Convert the image to RGB and return the underlying PyTorch tensors.
```py
encoding = image_processor(image.convert("RGB"), return_tensors="pt")
```
Now run the model and return the predicted class!
```py
with torch.no_grad():
outputs = model(**encoding)
logits = outputs.logits
predicted_class_idx = logits.argmax(-1).item()
print("Predicted class:", model.config.id2label[predicted_class_idx])
"Predicted class: beignets"
```
| peft/docs/source/task_guides/lora_based_methods.md/0 | {
"file_path": "peft/docs/source/task_guides/lora_based_methods.md",
"repo_id": "peft",
"token_count": 4895
} |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, Union
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.unet_2d_blocks import (
CrossAttnDownBlock2D,
DownBlock2D,
)
from diffusers.utils import BaseOutput, logging
from torch import nn
from torch.nn import functional as F
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class ControlNetOutput(BaseOutput):
down_block_res_samples: Tuple[torch.Tensor]
mid_block_res_sample: torch.Tensor
class ControlNetConditioningEmbedding(nn.Module):
"""
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
model) to encode image-space conditions ... into feature maps ..."
"""
def __init__(
self,
conditioning_embedding_channels: int,
conditioning_channels: int = 3,
block_out_channels: Tuple[int] = (16, 32, 96, 256),
):
super().__init__()
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
self.blocks = nn.ModuleList([])
for i in range(len(block_out_channels) - 1):
channel_in = block_out_channels[i]
channel_out = block_out_channels[i + 1]
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
self.conv_out = zero_module(
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
)
def forward(self, conditioning):
embedding = self.conv_in(conditioning)
embedding = F.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = F.silu(embedding)
embedding = self.conv_out(embedding)
return embedding
class ControlNetModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
in_channels: int = 4,
out_channels: int = 320,
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
):
super().__init__()
# for control image
self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
conditioning_embedding_channels=out_channels,
block_out_channels=conditioning_embedding_out_channels,
)
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "set_processor"):
processors[f"{name}.processor"] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Parameters:
`processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
of **all** `Attention` layers.
In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
self.set_attn_processor(AttnProcessor())
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_sliceable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_sliceable_dims(module)
num_sliceable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_sliceable_layers * [1]
slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):
module.gradient_checkpointing = value
def forward(
self,
controlnet_cond: torch.FloatTensor,
) -> Union[ControlNetOutput, Tuple]:
# check channel order
channel_order = self.config.controlnet_conditioning_channel_order
if channel_order == "rgb":
# in rgb order by default
...
elif channel_order == "bgr":
controlnet_cond = torch.flip(controlnet_cond, dims=[1])
else:
raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
# 2. pre-process
controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
return controlnet_cond
def zero_module(module):
for p in module.parameters():
nn.init.zeros_(p)
return module
| peft/examples/boft_controlnet/utils/light_controlnet.py/0 | {
"file_path": "peft/examples/boft_controlnet/utils/light_controlnet.py",
"repo_id": "peft",
"token_count": 4318
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass, field
from typing import Literal, Optional
import torch
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
from trl import SFTConfig, SFTTrainer
from peft import BoneConfig, get_peft_model
@dataclass
class ScriptArguments(SFTConfig):
# model configs
base_model_name_or_path: Optional[str] = field(
default=None, metadata={"help": "The name or path of the fp32/16 base model."}
)
bits: str = field(default="bf16", metadata={"help": "(`['bf16', 'fp16', fp32]`)"})
init_weights: Literal[True, "bat"] = field(
default=True,
metadata={
"help": ("True -> Bone; `bat` -> Bat"),
},
)
bone_r: int = field(default=16)
merge_and_save: bool = field(default=False)
# dataset configs
data_path: str = field(default="imdb", metadata={"help": "Path to the training data."})
dataset_split: str = field(default="train[:1%]", metadata={"help": "(`['train', 'test', 'eval']`):"})
dataset_field: list[str] = field(default=None, metadata={"help": "Fields of dataset input and output."})
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
print(script_args)
print(f"Load pre-processed residual model in {script_args.bits} bits.")
if script_args.bits in ["nf4", "fp4", "int8"]:
print("Bone currently does not support quantization.")
elif script_args.base_model_name_or_path is not None:
print(f"No available pre-processed model, manually initialize a Bone using {script_args.base_model_name_or_path}.")
model = AutoModelForCausalLM.from_pretrained(
script_args.base_model_name_or_path,
torch_dtype=(
torch.float16
if script_args.bits == "fp16"
else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32)
),
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name_or_path)
tokenizer.pad_token_id = tokenizer.eos_token_id
bone_config = BoneConfig(
r=script_args.bone_r,
target_modules=["q_proj", "o_proj", "k_proj", "v_proj", "gate_proj", "up_proj", "down_proj"],
bias="none",
task_type="CAUSAL_LM",
init_weights=script_args.init_weights,
)
peft_model = get_peft_model(model, bone_config)
print(peft_model)
peft_model.print_trainable_parameters()
print(f"Training Bone with trl on the {script_args.data_path}[{script_args.dataset_split}] dataset.")
dataset = load_dataset(script_args.data_path, split=script_args.dataset_split)
dataset = dataset.map(
lambda example: {
"text": f"### USER: {example[script_args.dataset_field[0]]}\n### ASSISTANT: {example[script_args.dataset_field[1]]}"
}
)
trainer = SFTTrainer(
model=peft_model,
args=script_args,
train_dataset=dataset,
tokenizer=tokenizer,
)
trainer.train()
trainer.save_state()
peft_model.save_pretrained(
os.path.join(script_args.output_dir, "bone_ft"),
)
if script_args.merge_and_save:
model = peft_model.merge_and_unload()
model.save_pretrained(os.path.join(script_args.output_dir, "bone_merged"))
tokenizer.save_pretrained(os.path.join(script_args.output_dir, "bone_merged"))
| peft/examples/bone_finetuning/bone_finetuning.py/0 | {
"file_path": "peft/examples/bone_finetuning/bone_finetuning.py",
"repo_id": "peft",
"token_count": 1490
} |
import os
import torch
from accelerate import Accelerator
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup
from peft import LoraConfig, TaskType, get_peft_model
from peft.utils.other import fsdp_auto_wrap_policy
def main():
accelerator = Accelerator()
model_name_or_path = "t5-base"
batch_size = 8
text_column = "sentence"
label_column = "label"
max_length = 64
lr = 1e-3
num_epochs = 1
base_path = "temp/data/FinancialPhraseBank-v1.0"
peft_config = LoraConfig(
task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
model = get_peft_model(model, peft_config)
accelerator.print(model.print_trainable_parameters())
dataset = load_dataset(
"json",
data_files={
"train": os.path.join(base_path, "financial_phrase_bank_train.jsonl"),
"validation": os.path.join(base_path, "financial_phrase_bank_val.jsonl"),
},
)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[label_column]
model_inputs = tokenizer(
inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt"
)
labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt")
labels = labels["input_ids"]
labels[labels == tokenizer.pad_token_id] = -100
model_inputs["labels"] = labels
return model_inputs
with accelerator.main_process_first():
processed_datasets = dataset.map(
preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation"]
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True
)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=(len(train_dataloader) * num_epochs),
)
if getattr(accelerator.state, "fsdp_plugin", None) is not None:
accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(model)
model, train_dataloader, eval_dataloader, optimizer, lr_scheduler = accelerator.prepare(
model, train_dataloader, eval_dataloader, optimizer, lr_scheduler
)
accelerator.print(model)
for epoch in range(num_epochs):
model.train()
total_loss = 0
for step, batch in enumerate(tqdm(train_dataloader)):
outputs = model(**batch)
loss = outputs.loss
total_loss += loss.detach().float()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
eval_loss = 0
eval_preds = []
for step, batch in enumerate(tqdm(eval_dataloader)):
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
eval_loss += loss.detach().float()
preds = accelerator.gather_for_metrics(torch.argmax(outputs.logits, -1)).detach().cpu().numpy()
eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True))
eval_epoch_loss = eval_loss / len(eval_dataloader)
eval_ppl = torch.exp(eval_epoch_loss)
train_epoch_loss = total_loss / len(train_dataloader)
train_ppl = torch.exp(train_epoch_loss)
accelerator.print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")
correct = 0
total = 0
for pred, true in zip(eval_preds, dataset["validation"][label_column]):
if pred.strip() == true.strip():
correct += 1
total += 1
accuracy = correct / total * 100
accelerator.print(f"{accuracy=}")
accelerator.print(f"{eval_preds[:10]=}")
accelerator.print(f"{dataset['validation'][label_column][:10]=}")
accelerator.wait_for_everyone()
# Option1: Pushing the model to Hugging Face Hub
# model.push_to_hub(
# f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"),
# token = "hf_..."
# )
# token (`bool` or `str`, *optional*):
# `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated
# when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url`
# is not specified.
# Or you can get your token from https://huggingface.co/settings/token
# Option2: Saving the model locally
peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_")
model.save_pretrained(peft_model_id)
accelerator.wait_for_everyone()
if __name__ == "__main__":
main()
| peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_fsdp.py/0 | {
"file_path": "peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_fsdp.py",
"repo_id": "peft",
"token_count": 2543
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser
from trl import SFTConfig, SFTTrainer
from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training
@dataclass
class ScriptArguments(SFTConfig):
# model configs
base_model_name_or_path: Optional[str] = field(
default=None, metadata={"help": "The name or path of the fp32/16 base model."}
)
residual_model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The name or path of the fp32/16 residual model. (`['fxmeng/pissa-llama-2-7b-r16-alpha-16']`)"
},
)
bits: str = field(default="fp32", metadata={"help": "(`['fp4', 'nf4', 'int8', 'bf16', 'fp16', fp32]`)"})
init_lora_weights: str = field(default="pissa", metadata={"help": "(`['gaussian', 'pissa', 'pissa_niter_4']`)"})
lora_r: int = field(default=16)
lora_alpha: int = field(default=16)
lora_dropout: float = field(default=0)
convert_pissa_to_lora: bool = field(default=False)
merge_and_save: bool = field(default=False)
# dataset configs
data_path: str = field(default="imdb", metadata={"help": "Path to the training data."})
dataset_split: str = field(default="train[:1%]", metadata={"help": "(`['train', 'test', 'eval']`):"})
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
print(script_args)
print(f"Load pre-processed residual model in {script_args.bits} bits.")
if script_args.bits in ["nf4", "fp4", "int8"]:
quantization_config = BitsAndBytesConfig(
load_in_4bit=(script_args.bits == "nf4" or script_args.bits == "fp4"),
load_in_8bit=script_args.bits == "int8",
bnb_4bit_quant_type=script_args.bits,
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=torch.bfloat16,
)
res_model = AutoModelForCausalLM.from_pretrained(
script_args.residual_model_name_or_path, quantization_config=quantization_config, low_cpu_mem_usage=True
)
res_model = prepare_model_for_kbit_training(res_model)
print("Wrapping the residual model with PiSSA.")
peft_model = PeftModel.from_pretrained(
res_model, script_args.residual_model_name_or_path, subfolder="pissa_init", is_trainable=True
)
tokenizer = AutoTokenizer.from_pretrained(script_args.residual_model_name_or_path)
elif script_args.residual_model_name_or_path is not None:
res_model = AutoModelForCausalLM.from_pretrained(
script_args.residual_model_name_or_path,
torch_dtype=(
torch.float16
if script_args.bits == "fp16"
else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32)
),
device_map="auto",
)
print("Wrapping the residual model with PiSSA.")
peft_model = PeftModel.from_pretrained(
res_model, script_args.residual_model_name_or_path, subfolder="pissa_init", is_trainable=True
)
tokenizer = AutoTokenizer.from_pretrained(script_args.residual_model_name_or_path)
elif script_args.base_model_name_or_path is not None:
print(
f"No available pre-processed model, manually initialize a PiSSA using {script_args.base_model_name_or_path}."
)
model = AutoModelForCausalLM.from_pretrained(
script_args.base_model_name_or_path,
torch_dtype=(
torch.float16
if script_args.bits == "fp16"
else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32)
),
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name_or_path)
tokenizer.pad_token_id = tokenizer.eos_token_id
lora_config = LoraConfig(
r=script_args.lora_r,
lora_alpha=script_args.lora_alpha,
init_lora_weights=script_args.init_lora_weights,
lora_dropout=script_args.lora_dropout,
target_modules=["q_proj", "o_proj", "k_proj", "v_proj", "gate_proj", "up_proj", "down_proj"],
bias="none",
task_type="CAUSAL_LM",
)
peft_model = get_peft_model(model, lora_config)
print(peft_model)
peft_model.print_trainable_parameters()
print(f"Training PiSSA with trl on the {script_args.data_path}[{script_args.dataset_split}] dataset.")
dataset = load_dataset(script_args.data_path, split=script_args.dataset_split)
dataset = dataset.map(
lambda example: {
"text": f"### USER: {example[script_args.dataset_field[0]]}\n### ASSISTANT: {example[script_args.dataset_field[1]]}"
}
)
trainer = SFTTrainer(
model=peft_model,
args=script_args,
train_dataset=dataset,
tokenizer=tokenizer,
)
trainer.train()
trainer.save_state()
############################## Upon training completion, convert and save PiSSA in LoRA format ##############################
if script_args.convert_pissa_to_lora:
peft_model.save_pretrained(
os.path.join(script_args.output_dir, "pissa_lora"),
path_initial_model_for_weight_conversion=os.path.join(script_args.residual_model_name_or_path, "pissa_init"),
)
else:
peft_model.save_pretrained(
os.path.join(script_args.output_dir, "pissa_ft"),
)
if script_args.merge_and_save:
model = peft_model.merge_and_unload()
model.save_pretrained(os.path.join(script_args.output_dir, "pissa_merged"))
tokenizer.save_pretrained(os.path.join(script_args.output_dir, "pissa_merged"))
| peft/examples/pissa_finetuning/pissa_finetuning.py/0 | {
"file_path": "peft/examples/pissa_finetuning/pissa_finetuning.py",
"repo_id": "peft",
"token_count": 2492
} |
import os
from enum import Enum
import packaging.version
import torch
import transformers
from datasets import DatasetDict, load_dataset, load_from_disk
from datasets.builder import DatasetGenerationError
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
)
from peft import LoraConfig
DEFAULT_CHATML_CHAT_TEMPLATE = "{% for message in messages %}\n{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% if loop.last and add_generation_prompt %}{{'<|im_start|>assistant\n' }}{% endif %}{% endfor %}"
DEFAULT_ZEPHYR_CHAT_TEMPLATE = "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"
class ZephyrSpecialTokens(str, Enum):
user = "<|user|>"
assistant = "<|assistant|>"
system = "<|system|>"
eos_token = "</s>"
bos_token = "<s>"
pad_token = "<pad>"
@classmethod
def list(cls):
return [c.value for c in cls]
class ChatmlSpecialTokens(str, Enum):
user = "<|im_start|>user"
assistant = "<|im_start|>assistant"
system = "<|im_start|>system"
eos_token = "<|im_end|>"
bos_token = "<s>"
pad_token = "<pad>"
@classmethod
def list(cls):
return [c.value for c in cls]
def create_datasets(tokenizer, data_args, training_args, apply_chat_template=False):
def preprocess(samples):
batch = []
for conversation in samples["messages"]:
batch.append(tokenizer.apply_chat_template(conversation, tokenize=False))
return {"content": batch}
raw_datasets = DatasetDict()
for split in data_args.splits.split(","):
try:
# Try first if dataset on a Hub repo
dataset = load_dataset(data_args.dataset_name, split=split)
except DatasetGenerationError:
# If not, check local dataset
dataset = load_from_disk(os.path.join(data_args.dataset_name, split))
if "train" in split:
raw_datasets["train"] = dataset
elif "test" in split:
raw_datasets["test"] = dataset
else:
raise ValueError(f"Split type {split} not recognized as one of test or train.")
if apply_chat_template:
raw_datasets = raw_datasets.map(
preprocess,
batched=True,
remove_columns=raw_datasets["train"].column_names,
)
train_data = raw_datasets["train"]
valid_data = raw_datasets["test"]
print(f"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}")
print(f"A sample of train dataset: {train_data[0]}")
return train_data, valid_data
def create_and_prepare_model(args, data_args, training_args):
if args.use_unsloth:
from unsloth import FastLanguageModel
bnb_config = None
quant_storage_dtype = None
if (
torch.distributed.is_available()
and torch.distributed.is_initialized()
and torch.distributed.get_world_size() > 1
and args.use_unsloth
):
raise NotImplementedError("Unsloth is not supported in distributed training")
if args.use_4bit_quantization:
compute_dtype = getattr(torch, args.bnb_4bit_compute_dtype)
quant_storage_dtype = getattr(torch, args.bnb_4bit_quant_storage_dtype)
bnb_config = BitsAndBytesConfig(
load_in_4bit=args.use_4bit_quantization,
bnb_4bit_quant_type=args.bnb_4bit_quant_type,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=args.use_nested_quant,
bnb_4bit_quant_storage=quant_storage_dtype,
)
if compute_dtype == torch.float16 and args.use_4bit_quantization:
major, _ = torch.cuda.get_device_capability()
if major >= 8:
print("=" * 80)
print("Your GPU supports bfloat16, you can accelerate training with the argument --bf16")
print("=" * 80)
elif args.use_8bit_quantization:
bnb_config = BitsAndBytesConfig(load_in_8bit=args.use_8bit_quantization)
if args.use_unsloth:
# Load model
model, _ = FastLanguageModel.from_pretrained(
model_name=args.model_name_or_path,
max_seq_length=data_args.max_seq_length,
dtype=None,
load_in_4bit=args.use_4bit_quantization,
)
else:
torch_dtype = (
quant_storage_dtype if quant_storage_dtype and quant_storage_dtype.is_floating_point else torch.float32
)
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
quantization_config=bnb_config,
trust_remote_code=True,
attn_implementation="flash_attention_2" if args.use_flash_attn else "eager",
torch_dtype=torch_dtype,
)
peft_config = None
chat_template = None
if args.use_peft_lora and not args.use_unsloth:
peft_config = LoraConfig(
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
r=args.lora_r,
bias="none",
task_type="CAUSAL_LM",
target_modules=args.lora_target_modules.split(",")
if args.lora_target_modules != "all-linear"
else args.lora_target_modules,
)
special_tokens = None
chat_template = None
if args.chat_template_format == "chatml":
special_tokens = ChatmlSpecialTokens
chat_template = DEFAULT_CHATML_CHAT_TEMPLATE
elif args.chat_template_format == "zephyr":
special_tokens = ZephyrSpecialTokens
chat_template = DEFAULT_ZEPHYR_CHAT_TEMPLATE
if special_tokens is not None:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path,
pad_token=special_tokens.pad_token.value,
bos_token=special_tokens.bos_token.value,
eos_token=special_tokens.eos_token.value,
additional_special_tokens=special_tokens.list(),
trust_remote_code=True,
)
tokenizer.chat_template = chat_template
# make embedding resizing configurable?
# Transformers 4.46.0+ defaults uses mean_resizing by default, which fails with QLoRA + FSDP because the
# embedding could be on meta device, therefore, we set mean_resizing=False in that case (i.e. the status quo
# ante). See https://github.com/huggingface/accelerate/issues/1620.
uses_transformers_4_46 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.46.0")
uses_fsdp = os.environ.get("ACCELERATE_USE_FSDP").lower() == "true"
if (bnb_config is not None) and uses_fsdp and uses_transformers_4_46:
model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=8, mean_resizing=False)
else:
model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=8)
else:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
if args.use_unsloth:
# Do model patching and add fast LoRA weights
model = FastLanguageModel.get_peft_model(
model,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
r=args.lora_r,
target_modules=args.lora_target_modules.split(",")
if args.lora_target_modules != "all-linear"
else args.lora_target_modules,
use_gradient_checkpointing=training_args.gradient_checkpointing,
random_state=training_args.seed,
max_seq_length=data_args.max_seq_length,
)
return model, peft_config, tokenizer
| peft/examples/sft/utils.py/0 | {
"file_path": "peft/examples/sft/utils.py",
"repo_id": "peft",
"token_count": 3623
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import importlib
import os
from typing import Optional
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoTokenizer,
)
from .config import PeftConfig
from .peft_model import (
PeftModel,
PeftModelForCausalLM,
PeftModelForFeatureExtraction,
PeftModelForQuestionAnswering,
PeftModelForSeq2SeqLM,
PeftModelForSequenceClassification,
PeftModelForTokenClassification,
)
from .utils.constants import TOKENIZER_CONFIG_NAME
from .utils.other import check_file_exists_on_hf_hub
MODEL_TYPE_TO_PEFT_MODEL_MAPPING: dict[str, type[PeftModel]] = {
"SEQ_CLS": PeftModelForSequenceClassification,
"SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM,
"CAUSAL_LM": PeftModelForCausalLM,
"TOKEN_CLS": PeftModelForTokenClassification,
"QUESTION_ANS": PeftModelForQuestionAnswering,
"FEATURE_EXTRACTION": PeftModelForFeatureExtraction,
}
class _BaseAutoPeftModel:
_target_class = None
_target_peft_class = None
def __init__(self, *args, **kwargs):
# For consistency with transformers: https://github.com/huggingface/transformers/blob/91d7df58b6537d385e90578dac40204cb550f706/src/transformers/models/auto/auto_factory.py#L400
raise EnvironmentError( # noqa: UP024
f"{self.__class__.__name__} is designed to be instantiated "
f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
f"`{self.__class__.__name__}.from_config(config)` methods."
)
@classmethod
def from_pretrained(
cls,
pretrained_model_name_or_path,
adapter_name: str = "default",
is_trainable: bool = False,
config: Optional[PeftConfig] = None,
revision: Optional[str] = None,
**kwargs,
):
r"""
A wrapper around all the preprocessing steps a user needs to perform in order to load a PEFT model. The kwargs
are passed along to `PeftConfig` that automatically takes care of filtering the kwargs of the Hub methods and
the config object init.
"""
peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, revision=revision, **kwargs)
base_model_path = peft_config.base_model_name_or_path
base_model_revision = peft_config.revision
task_type = getattr(peft_config, "task_type", None)
if cls._target_class is not None:
target_class = cls._target_class
elif cls._target_class is None and task_type is not None:
# this is only in the case where we use `AutoPeftModel`
raise ValueError(
"Cannot use `AutoPeftModel` with a task type, please use a specific class for your task type. (e.g. `AutoPeftModelForCausalLM` for `task_type='CAUSAL_LM'`)"
)
if task_type is not None:
expected_target_class = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[task_type]
if cls._target_peft_class.__name__ != expected_target_class.__name__:
raise ValueError(
f"Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__}"
" make sure that you are loading the correct model for your task type."
)
elif task_type is None and getattr(peft_config, "auto_mapping", None) is not None:
auto_mapping = getattr(peft_config, "auto_mapping", None)
base_model_class = auto_mapping["base_model_class"]
parent_library_name = auto_mapping["parent_library"]
parent_library = importlib.import_module(parent_library_name)
target_class = getattr(parent_library, base_model_class)
else:
raise ValueError(
"Cannot infer the auto class from the config, please make sure that you are loading the correct model for your task type."
)
base_model = target_class.from_pretrained(base_model_path, revision=base_model_revision, **kwargs)
tokenizer_exists = False
if os.path.exists(os.path.join(pretrained_model_name_or_path, TOKENIZER_CONFIG_NAME)):
tokenizer_exists = True
else:
token = kwargs.get("token", None)
if token is None:
token = kwargs.get("use_auth_token", None)
tokenizer_exists = check_file_exists_on_hf_hub(
repo_id=pretrained_model_name_or_path,
filename=TOKENIZER_CONFIG_NAME,
revision=revision,
repo_type=kwargs.get("repo_type", None),
token=token,
)
if tokenizer_exists:
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=kwargs.get("trust_remote_code", False)
)
base_model.resize_token_embeddings(len(tokenizer))
return cls._target_peft_class.from_pretrained(
base_model,
pretrained_model_name_or_path,
adapter_name=adapter_name,
is_trainable=is_trainable,
config=config,
**kwargs,
)
class AutoPeftModel(_BaseAutoPeftModel):
_target_class = None
_target_peft_class = PeftModel
class AutoPeftModelForCausalLM(_BaseAutoPeftModel):
_target_class = AutoModelForCausalLM
_target_peft_class = PeftModelForCausalLM
class AutoPeftModelForSeq2SeqLM(_BaseAutoPeftModel):
_target_class = AutoModelForSeq2SeqLM
_target_peft_class = PeftModelForSeq2SeqLM
class AutoPeftModelForSequenceClassification(_BaseAutoPeftModel):
_target_class = AutoModelForSequenceClassification
_target_peft_class = PeftModelForSequenceClassification
class AutoPeftModelForTokenClassification(_BaseAutoPeftModel):
_target_class = AutoModelForTokenClassification
_target_peft_class = PeftModelForTokenClassification
class AutoPeftModelForQuestionAnswering(_BaseAutoPeftModel):
_target_class = AutoModelForQuestionAnswering
_target_peft_class = PeftModelForQuestionAnswering
class AutoPeftModelForFeatureExtraction(_BaseAutoPeftModel):
_target_class = AutoModel
_target_peft_class = PeftModelForFeatureExtraction
| peft/src/peft/auto.py/0 | {
"file_path": "peft/src/peft/auto.py",
"repo_id": "peft",
"token_count": 2874
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .layer import AdaLoraLayer
class SVDQuantLinear(torch.nn.Module, AdaLoraLayer):
def __init__(
self,
base_layer,
adapter_name,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
init_lora_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
AdaLoraLayer.__init__(self, base_layer)
# self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
# for backwards compatibility
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
def forward(self, x: torch.Tensor) -> torch.Tensor:
result = self.quant_linear_module(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
lora_E = self.lora_E[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
ranknum = self.ranknum[active_adapter] + 1e-5
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = self._cast_input_dtype(x, torch.float32)
output = (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum
# TODO: here, the dtype conversion is applied on the *whole expression*,
# not the intermediate result, unlike for SVDLinear8bitLT and
# SVDLinear4bit, is that correct?
if requires_conversion:
output = output.to(expected_dtype)
result += output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "adalora." + rep
| peft/src/peft/tuners/adalora/gptq.py/0 | {
"file_path": "peft/src/peft/tuners/adalora/gptq.py",
"repo_id": "peft",
"token_count": 1154
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Literal, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class BoneConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`BoneModel`].
Args:
r (`int`):
The rank of Bone across different layers. It is best to set 'r' to an even number; otherwise, the default
initialization method will not work.
target_modules (`Optional[Union[List[str], str]]`):
The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
names will be replaced. When passing a string, a regex match will be performed. When passing a list of
strings, either an exact match will be performed or it is checked if the name of the module ends with any
of the passed strings. If this is specified as 'all-linear', then all linear modules are chosen, excluding
the output layer. If this is not specified, modules will be chosen according to the model architecture. If
the architecture is not known, an error will be raised -- in this case, you should specify the target
modules manually.
exclude_modules (`Optional[Union[List[str], str]]`):
The names of the modules to not apply the adapter. When passing a string, a regex match will be performed.
When passing a list of strings, either an exact match will be performed or it is checked if the name of the
module ends with any of the passed strings.
init_weights (bool | Literal["bat"]):
Different initializations correspond to different Bone variants. By default, setting True uses the Bone
structure, while "bat" selects the Bat structure.
layers_to_transform (`Union[List[int], int]`):
The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
that are specified in this list. If a single integer is passed, it will apply the transformations on the
layer at this index.
layers_pattern (`str`):
The layer pattern name, used only if `layers_to_transform` is different from `None`.
rank_pattern (`dict`):
The mapping from layer names or regexp expression to ranks which are different from the default rank
specified by `r`.
modules_to_save (`List[str]`):
List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
"""
r: int = field(
default=64,
metadata={
"help": "The rank of Bone across different layers.",
"note": "It is best to set 'r' to an even number; otherwise, the default initialization method will not work.",
},
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": "List of module names or regex expression of the module names to replace with Bone.",
"example": "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' ",
},
)
exclude_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={"help": "List of module names or regex expression of the module names to exclude from Bone."},
)
init_weights: bool | Literal["bat"] = field(
default=True,
metadata={
"help": (
"Whether to initialize the weights of the Bone layers with their default initialization. Don't change "
"this setting, except if you know exactly what you're doing."
),
},
)
layers_to_transform: Optional[Union[list[int], int]] = field(
default=None,
metadata={
"help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
},
)
layers_pattern: Optional[str] = field(
default=None,
metadata={
"help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
},
)
bias: str = field(default="none", metadata={"help": "Bias type for Bone. Can be 'none', 'all' or 'Bone_only'"})
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": "List of modules apart from Bone layers to be set as trainable and saved in the final checkpoint. "
"For example, in Sequence Classification or Token Classification tasks, "
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.BONE
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
self.exclude_modules = (
set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules
)
# if target_modules is a regex expression, then layers_to_transform should be None
if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.")
# if target_modules is a regex expression, then layers_pattern should be None
if isinstance(self.target_modules, str) and self.layers_pattern is not None:
raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.")
| peft/src/peft/tuners/bone/config.py/0 | {
"file_path": "peft/src/peft/tuners/bone/config.py",
"repo_id": "peft",
"token_count": 2283
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class IA3Config(PeftConfig):
"""
This is the configuration class to store the configuration of a [`IA3Model`].
Args:
target_modules (`Optional[Union[List[str], str]]`):
The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
names will be replaced. When passing a string, a regex match will be performed. When passing a list of
strings, either an exact match will be performed or it is checked if the name of the module ends with any
of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
excluding the output layer. If this is not specified, modules will be chosen according to the model
architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
the target modules manually.
exclude_modules (`Optional[Union[List[str], str]]`):
The names of the modules to not apply the adapter. When passing a string, a regex match will be performed.
When passing a list of strings, either an exact match will be performed or it is checked if the name of the
module ends with any of the passed strings.
feedforward_modules (`Optional[Union[List[str], str]]`):
The names of the modules to be treated as feedforward modules, as in the original paper. These modules will
have (IA)³ vectors multiplied to the input, instead of the output. `feedforward_modules` must be a name or
a subset of names present in `target_modules`.
fan_in_fan_out (`bool`):
Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses
`Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.
modules_to_save (`Optional[List[str]]`):
List of modules apart from (IA)³ layers to be set as trainable and saved in the final checkpoint.
init_ia3_weights (`bool`):
Whether to initialize the vectors in the (IA)³ layers, defaults to `True`. Setting this to `False` is
discouraged.
"""
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or regex expression of the module names to replace with (IA)³."
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'."
"This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
"If not specified, modules will be chosen according to the model architecture, If the architecture is "
"not known, an error will be raised -- in this case, you should specify the target modules manually."
),
},
)
exclude_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={"help": "List of module names or regex expression of the module names to exclude from (IA)³."},
)
feedforward_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": "List of module names or a regex expression of module names which are feedforward"
"For example, ['output.dense']"
},
)
fan_in_fan_out: bool = field(
default=False,
metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
)
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": "List of modules apart from (IA)^3 layers to be set as trainable and saved in the final checkpoint. "
"For example, in Sequence Classification or Token Classification tasks, "
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
},
)
init_ia3_weights: bool = field(
default=True,
metadata={"help": "Whether to initialize the vectors in the (IA)^3 layers."},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.IA3
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
self.exclude_modules = (
set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules
)
self.feedforward_modules = (
set(self.feedforward_modules) if isinstance(self.feedforward_modules, list) else self.feedforward_modules
)
# check if feedforward_modules is a subset of target_modules. run the check only if both are sets
if isinstance(self.feedforward_modules, set) and isinstance(self.target_modules, set):
if not self.feedforward_modules.issubset(self.target_modules):
raise ValueError("`feedforward_modules` should be a subset of `target_modules`")
| peft/src/peft/tuners/ia3/config.py/0 | {
"file_path": "peft/src/peft/tuners/ia3/config.py",
"repo_id": "peft",
"token_count": 2119
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
import torch
from peft.import_utils import is_aqlm_available
from peft.tuners.lora.layer import LoraLayer
from peft.tuners.tuners_utils import BaseTunerLayer
if is_aqlm_available():
from aqlm import QuantizedLinear
class AqlmLoraLinear(torch.nn.Module, LoraLayer):
def __init__(
self,
base_layer,
adapter_name: str,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
init_lora_weights: bool = True,
use_rslora: bool = False,
use_dora: bool = False,
lora_bias: bool = False,
**kwargs,
):
if use_dora:
raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False")
super().__init__()
LoraLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
init_lora_weights=init_lora_weights,
use_rslora=use_rslora,
use_dora=use_dora,
lora_bias=lora_bias,
)
def forward(self, x: torch.Tensor):
# note: logic differs from default Linear because merging is not supported
result = self.base_layer(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = self._cast_input_dtype(x, lora_A.weight.dtype)
output = lora_B(lora_A(dropout(x)))
if requires_conversion:
output = output.to(expected_dtype)
output = output * scaling
result += output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "lora." + rep
# TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102
# def reset_lora_parameters(self, adapter_name):
# if adapter_name in self.lora_A.keys():
# torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight)
# torch.nn.init.zeros_(self.lora_B[adapter_name].weight)
def dispatch_aqlm(
target: torch.nn.Module,
adapter_name: str,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_aqlm_available() and isinstance(target_base_layer, QuantizedLinear):
new_module = AqlmLoraLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.codes
return new_module
| peft/src/peft/tuners/lora/aqlm.py/0 | {
"file_path": "peft/src/peft/tuners/lora/aqlm.py",
"repo_id": "peft",
"token_count": 1647
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from typing import Any, Optional, Union
from torch import nn
from tqdm import tqdm
from peft.tuners import adalora, loha, lokr, lora, oft
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import (
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
ModulesToSaveWrapper,
PeftType,
_get_submodules,
get_auto_gptq_quant_linear,
)
# Collection of constants used for all tuners
COMPATIBLE_TUNER_TYPES = (PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.ADALORA, PeftType.OFT)
PREFIXES = [lora.LoraModel.prefix, lokr.LoKrModel.prefix, loha.LoHaModel.prefix, oft.OFTModel.prefix]
Configs = Union[lora.LoraConfig, loha.LoHaConfig, lokr.LoKrConfig, adalora.AdaLoraConfig, oft.OFTConfig]
Layers = (lora.layer.LoraLayer, loha.layer.LoHaLayer, lokr.layer.LoKrLayer, adalora.layer.AdaLoraLayer, oft.OFTLayer)
class MixedModel(BaseTuner):
"""
A class that allows to mix different types of adapters in a single model.
Note: This class should usually not be initialized directly. Instead, use `get_peft_model` with the argument
`mixed=True`.
Args:
model (:obj:`nn.Module`):
The model to be tuned.
config (:obj:`PeftConfig`):
The config of the model to be tuned. The adapter type must be compatible.
adapter_name (:obj:`str`):
The name of the first adapter.
"""
def __init__(self, model: nn.Module, config: Configs, adapter_name: str) -> None:
super().__init__(model, config, adapter_name)
def _check_new_adapter_config(self, config: Configs) -> None:
"""
A helper method to check the config when a new adapter is being added.
Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
"""
if not isinstance(config, Configs.__args__):
raise ValueError(
f"{self.__class__.__name__} only supports {COMPATIBLE_TUNER_TYPES} configs, but got {type(config)}."
)
biases = (getattr(config, "bias", None) for config in self.peft_config)
biases = [bias for bias in biases if bias not in (None, "none")]
if len(biases) > 1:
raise ValueError(
f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, "
"set bias to 'none' for all adapters."
)
@staticmethod
def _check_target_module_exists(config: Configs, key: str):
return check_target_module_exists(config, key)
def _create_and_replace(
self,
config: Configs,
*args: Any,
**kwargs: Any,
) -> None:
if isinstance(config, adalora.AdaLoraConfig):
adalora.AdaLoraModel._create_and_replace(self, config, *args, **kwargs)
elif isinstance(config, lora.LoraConfig):
lora.LoraModel._create_and_replace(self, config, *args, **kwargs)
elif isinstance(config, loha.LoHaConfig):
loha.LoHaModel._create_and_replace(self, config, *args, **kwargs)
elif isinstance(config, lokr.LoKrConfig):
lokr.LoKrModel._create_and_replace(self, config, *args, **kwargs)
elif isinstance(config, oft.OFTConfig):
oft.OFTModel._create_and_replace(self, config, *args, **kwargs)
else:
raise ValueError(f"Unsupported config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.")
def _replace_module(self, parent, child_name, new_module, child) -> None:
setattr(parent, child_name, new_module)
# It's not necessary to set requires_grad here, as that is handled by
# _mark_only_adapters_as_trainable
# child layer wraps the original module, unpack it
if hasattr(child, "base_layer"):
child = child.get_base_layer()
elif hasattr(child, "quant_linear_module"):
# TODO maybe not necessary to have special treatment?
child = child.quant_linear_module
if not hasattr(new_module, "base_layer"):
new_module.weight = child.weight
if hasattr(child, "bias"):
new_module.bias = child.bias
if getattr(child, "state", None) is not None:
if hasattr(new_module, "base_layer"):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
# dispatch to correct device
for name, module in new_module.named_modules():
if any(prefix in name for prefix in PREFIXES):
module.to(child.weight.device)
if "ranknum" in name:
module.to(child.weight.device)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for n, p in model.named_parameters():
if not any(prefix in n for prefix in PREFIXES):
p.requires_grad = False
for active_adapter in self.active_adapters:
bias = getattr(self.peft_config[active_adapter], "bias", "none")
if bias == "none":
continue
if bias == "all":
for n, p in model.named_parameters():
if "bias" in n:
p.requires_grad = True
elif bias == "lora_only":
# TODO: check if this is needed for other supported types
for m in model.modules():
if isinstance(m, Layers) and hasattr(m, "bias") and m.bias is not None:
m.bias.requires_grad = True
else:
raise ValueError(f"Requested bias: {bias}, is not implemented.")
@staticmethod
def _create_new_module(config, adapter_name, target, **kwargs):
gptq_quantization_config = kwargs.get("gptq_quantization_config", None)
AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
if (gptq_quantization_config is not None) or (AutoGPTQQuantLinear is not None):
raise ValueError(f"GPTQ quantization not supported for {config.peft_type.value} (yet).")
loaded_in_8bit = kwargs.pop("loaded_in_8bit", False)
loaded_in_4bit = kwargs.pop("loaded_in_4bit", False)
if loaded_in_8bit or loaded_in_4bit:
raise ValueError(f"8bit and 4bit quantization not supported for {config.peft_type.value} (yet).")
if isinstance(config, adalora.AdaLoraConfig):
new_module = adalora.AdaLoraModel._create_new_module(config, adapter_name, target, **kwargs)
elif isinstance(config, lora.LoraConfig):
new_module = lora.LoraModel._create_new_module(config, adapter_name, target, **kwargs)
elif isinstance(config, loha.LoHaConfig):
new_module = loha.LoHaModel._create_new_module(config, adapter_name, target, **kwargs)
elif isinstance(config, lokr.LoKrConfig):
new_module = lokr.LoKrModel._create_new_module(config, adapter_name, target, **kwargs)
elif isinstance(config, oft.OFTConfig):
new_module = oft.OFTModel._create_new_module(config, adapter_name, target, **kwargs)
else:
raise ValueError(f"Unknown config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.")
return new_module
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
if name == "model": # see #1892: prevent infinite recursion if class is not initialized
raise
return getattr(self.model, name)
def _set_adapter_layers(self, enabled=True):
for module in self.model.modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self):
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self):
for active_adapter in self.active_adapters:
val = getattr(self.peft_config[active_adapter], "bias", "none")
if val != "none":
msg = (
f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same "
"output as the the base model would without adaption."
)
warnings.warn(msg)
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name: Union[str, list[str]]) -> None:
for module in self.model.modules():
if isinstance(module, Layers):
if module.merged:
warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
raise ValueError("Please specify `target_modules` in `peft_config`")
peft_config.target_modules = set(
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]]
)
return peft_config
def _unload_and_optionally_merge(
self,
merge=True,
progressbar: bool = False,
safe_merge: bool = False,
adapter_names: Optional[list[str]] = None,
):
if merge:
if getattr(self.model, "quantization_method", None) == "gptq":
raise ValueError("Cannot merge layers when the model is gptq quantized")
def merge_recursively(module):
# helper function to recursively merge the base_layer of the target
path = []
layer = module
while hasattr(layer, "base_layer"):
path.append(layer)
layer = layer.base_layer
for layer_before, layer_after in zip(path[:-1], path[1:]):
layer_after.merge(safe_merge=safe_merge, adapter_names=adapter_names)
layer_before.base_layer = layer_after.base_layer
module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)]
desc = "Unloading " + ("and merging " if merge else "") + "model"
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
parent, target, target_name = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, "base_layer"):
if merge:
merge_recursively(target)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
# save any additional trainable modules part of `modules_to_save`
new_module = target.modules_to_save[target.active_adapter]
if hasattr(new_module, "base_layer"):
# check if the module is itself a tuner layer
if merge:
new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
new_module = new_module.get_base_layer()
setattr(parent, target_name, new_module)
return self.model
def add_weighted_adapter(self, *args: Any, **kwargs: Any) -> None:
raise NotImplementedError(f"Weighted adapters are not supported for {self.__class__.__name__} (yet).")
def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None:
"""
Deletes an existing adapter.
Args:
adapter_name (Union[str, list[str]]): Name of the adapter(s) to delete.
"""
if isinstance(adapter_name, str):
adapter_names = [adapter_name]
else:
adapter_names = adapter_name
mismatched = set(adapter_names) - set(self.peft_config.keys())
if mismatched:
raise ValueError(
f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}"
)
for adapter_name in adapter_names:
del self.peft_config[adapter_name]
key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)]
new_adapter = None
for key in key_list:
_, target, _ = _get_submodules(self.model, key)
if isinstance(target, BaseTunerLayer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapters[:]
self.active_adapter = new_adapter or []
def merge_and_unload(
self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
) -> nn.Module:
r"""
This method merges the layers into the base model. This is needed if someone wants to use the base model as a
standalone model.
Args:
progressbar (`bool`):
whether to show a progressbar indicating the unload and merge process
safe_merge (`bool`):
whether to activate the safe merging check to check if there is any potential Nan in the adapter
weights
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
return self._unload_and_optionally_merge(
progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names
)
def unload(self) -> nn.Module:
"""
Gets back the base model by removing all the lora modules without merging. This gives back the original base
model.
"""
return self._unload_and_optionally_merge(merge=False)
def generate(self, *args: Any, **kwargs: Any):
return self.model.generate(*args, **kwargs)
| peft/src/peft/tuners/mixed/model.py/0 | {
"file_path": "peft/src/peft/tuners/mixed/model.py",
"repo_id": "peft",
"token_count": 6670
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import torch
from transformers import AutoModelForCausalLM
from peft import AutoPeftModelForCausalLM, LoraConfig, PeftConfig, PeftModel, get_peft_model
PEFT_MODELS_TO_TEST = [("peft-internal-testing/test-lora-subfolder", "test")]
class PeftHubFeaturesTester(unittest.TestCase):
def test_subfolder(self):
r"""
Test if subfolder argument works as expected
"""
for model_id, subfolder in PEFT_MODELS_TO_TEST:
config = PeftConfig.from_pretrained(model_id, subfolder=subfolder)
model = AutoModelForCausalLM.from_pretrained(
config.base_model_name_or_path,
)
model = PeftModel.from_pretrained(model, model_id, subfolder=subfolder)
assert isinstance(model, PeftModel)
class TestLocalModel:
def test_local_model_saving_no_warning(self, recwarn, tmp_path):
# When the model is saved, the library checks for vocab changes by
# examining `config.json` in the model path.
# However, previously, those checks only covered huggingface hub models.
# This test makes sure that the local `config.json` is checked as well.
# If `save_pretrained` could not find the file, it will issue a warning.
model_id = "facebook/opt-125m"
model = AutoModelForCausalLM.from_pretrained(model_id)
local_dir = tmp_path / model_id
model.save_pretrained(local_dir)
del model
base_model = AutoModelForCausalLM.from_pretrained(local_dir)
peft_config = LoraConfig()
peft_model = get_peft_model(base_model, peft_config)
peft_model.save_pretrained(local_dir)
for warning in recwarn.list:
assert "Could not find a config file" not in warning.message.args[0]
class TestBaseModelRevision:
def test_save_and_load_base_model_revision(self, tmp_path):
r"""
Test saving a PeftModel with a base model revision and loading with AutoPeftModel to recover the same base
model
"""
lora_config = LoraConfig(r=8, lora_alpha=16, lora_dropout=0.0)
test_inputs = torch.arange(10).reshape(-1, 1)
base_model_id = "peft-internal-testing/tiny-random-BertModel"
revision = "v2.0.0"
base_model_revision = AutoModelForCausalLM.from_pretrained(base_model_id, revision=revision).eval()
peft_model_revision = get_peft_model(base_model_revision, lora_config, revision=revision)
output_revision = peft_model_revision(test_inputs).logits
# sanity check: the model without revision should be different
base_model_no_revision = AutoModelForCausalLM.from_pretrained(base_model_id, revision="main").eval()
# we need a copy of the config because otherwise, we are changing in-place the `revision` of the previous config and model
lora_config_no_revision = copy.deepcopy(lora_config)
lora_config_no_revision.revision = "main"
peft_model_no_revision = get_peft_model(base_model_no_revision, lora_config_no_revision, revision="main")
output_no_revision = peft_model_no_revision(test_inputs).logits
assert not torch.allclose(output_no_revision, output_revision)
# check that if we save and load the model, the output corresponds to the one with revision
peft_model_revision.save_pretrained(tmp_path / "peft_model_revision")
peft_model_revision_loaded = AutoPeftModelForCausalLM.from_pretrained(tmp_path / "peft_model_revision").eval()
assert peft_model_revision_loaded.peft_config["default"].revision == revision
output_revision_loaded = peft_model_revision_loaded(test_inputs).logits
assert torch.allclose(output_revision, output_revision_loaded)
def test_load_different_peft_and_base_model_revision(self, tmp_path):
r"""
Test loading an AutoPeftModel from the hub where the base model revision and peft revision differ
"""
base_model_id = "hf-internal-testing/tiny-random-BertModel"
base_model_revision = None
peft_model_id = "peft-internal-testing/tiny-random-BertModel-lora"
peft_model_revision = "v1.2.3"
peft_model = AutoPeftModelForCausalLM.from_pretrained(peft_model_id, revision=peft_model_revision).eval()
assert peft_model.peft_config["default"].base_model_name_or_path == base_model_id
assert peft_model.peft_config["default"].revision == base_model_revision
| peft/tests/test_hub_features.py/0 | {
"file_path": "peft/tests/test_hub_features.py",
"repo_id": "peft",
"token_count": 1945
} |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This test file is for tests specific to VeRA, since VeRA has some specific challenges due to the shared weights.
import os
import pytest
import torch
from safetensors import safe_open
from torch import nn
from peft import PeftModel, VeraConfig, get_peft_model
from peft.utils import infer_device
class MLP(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.relu = nn.ReLU()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.lin1 = nn.Linear(20, 20, bias=bias) # lin1 and lin2 have same shape
self.lin2 = nn.Linear(20, 20, bias=bias)
self.lin3 = nn.Linear(20, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = self.lin0(X)
X = self.relu(X)
X = self.lin1(X)
X = self.relu(X)
X = self.lin2(X)
X = self.relu(X)
X = self.lin3(X)
X = self.sm(X)
return X
class TestVera:
@pytest.fixture
def mlp(self):
torch.manual_seed(0)
model = MLP()
return model
@pytest.fixture
def mlp_same_prng(self, mlp):
torch.manual_seed(0)
config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False)
# creates a default VeRA adapter
peft_model = get_peft_model(mlp, config)
config2 = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False)
peft_model.add_adapter("other", config2)
return peft_model
def test_multiple_adapters_same_prng_weights(self, mlp_same_prng):
# we can have multiple adapters with the same prng key, in which case the weights should be shared
assert (
mlp_same_prng.base_model.model.lin1.vera_A["default"]
is mlp_same_prng.base_model.model.lin1.vera_A["other"]
)
assert (
mlp_same_prng.base_model.model.lin1.vera_B["default"]
is mlp_same_prng.base_model.model.lin1.vera_B["other"]
)
assert (
mlp_same_prng.base_model.model.lin2.vera_A["default"]
is mlp_same_prng.base_model.model.lin2.vera_A["other"]
)
assert (
mlp_same_prng.base_model.model.lin2.vera_B["default"]
is mlp_same_prng.base_model.model.lin2.vera_B["other"]
)
input = torch.randn(5, 10)
mlp_same_prng.set_adapter("default")
output_default = mlp_same_prng(input)
mlp_same_prng.set_adapter("other")
output_other = mlp_same_prng(input)
assert not torch.allclose(output_default, output_other, atol=1e-3, rtol=1e-3)
def test_multiple_adapters_different_prng_raises(self):
# we cannot have multiple adapters with different prng keys
model = MLP()
config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False)
# creates a default VeRA adapter
peft_model = get_peft_model(model, config)
config2 = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, projection_prng_key=123)
msg = (
r"Vera PRNG initialisation key must be the same for all adapters. Got config.projection_prng_key=123 but "
r"previous config had 0"
)
with pytest.raises(ValueError, match=msg):
peft_model.add_adapter("other", config2)
def test_multiple_adapters_save_load_save_projection_true(self, mlp_same_prng, tmp_path):
# check saving and loading works with multiple adapters and saved projection weights
torch.manual_seed(0)
input = torch.randn(5, 10)
mlp_same_prng.set_adapter("default")
output_default = mlp_same_prng(input)
mlp_same_prng.set_adapter("other")
output_other = mlp_same_prng(input)
# sanity check
assert not torch.allclose(output_default, output_other, atol=1e-3, rtol=1e-3)
save_path = tmp_path / "vera"
mlp_same_prng.save_pretrained(save_path)
assert os.path.exists(save_path / "adapter_config.json")
assert os.path.exists(save_path / "other" / "adapter_config.json")
torch.manual_seed(0)
mlp = MLP()
peft_model = PeftModel.from_pretrained(mlp, save_path)
peft_model.load_adapter(save_path / "other", "other")
peft_model.set_adapter("default")
output_default_loaded = peft_model(input)
peft_model.set_adapter("other")
output_other_loaded = peft_model(input)
assert torch.allclose(output_default, output_default_loaded, atol=1e-3, rtol=1e-3)
assert torch.allclose(output_other, output_other_loaded, atol=1e-3, rtol=1e-3)
def test_multiple_adapters_save_load_save_projection_false(self, mlp, tmp_path):
# check saving and loading works with multiple adapters without saved projection weights
torch.manual_seed(1)
config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False)
# creates a default VeRA adapter
peft_model = get_peft_model(mlp, config, adapter_name="first")
config2 = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False)
peft_model.add_adapter("second", config2)
input = torch.randn(5, 10)
peft_model.set_adapter("first")
output_first = peft_model(input)
peft_model.set_adapter("second")
output_second = peft_model(input)
# sanity check
assert not torch.allclose(output_first, output_second, atol=1e-3, rtol=1e-3)
save_path = tmp_path / "vera"
peft_model.save_pretrained(save_path)
assert os.path.exists(save_path / "first" / "adapter_config.json")
assert os.path.exists(save_path / "second" / "adapter_config.json")
torch.manual_seed(0)
mlp = MLP()
peft_model = PeftModel.from_pretrained(mlp, save_path / "first", adapter_name="first")
peft_model.load_adapter(save_path / "second", "second")
peft_model.set_adapter("first")
output_first_loaded = peft_model(input)
peft_model.set_adapter("second")
output_second_loaded = peft_model(input)
assert torch.allclose(output_first, output_first_loaded, atol=1e-3, rtol=1e-3)
assert torch.allclose(output_second, output_second_loaded, atol=1e-3, rtol=1e-3)
def test_multiple_adapters_save_projection_true_contains_vera_A_vera_B(self, mlp_same_prng, tmp_path):
# check that the state_dicts don't contain the projection weights
save_path = tmp_path / "vera"
mlp_same_prng.save_pretrained(save_path)
sd_default = {}
with safe_open(save_path / "adapter_model.safetensors", framework="pt", device="cpu") as f:
for key in f.keys():
sd_default[key] = f.get_tensor(key)
assert any("vera_A" in key for key in sd_default)
assert any("vera_B" in key for key in sd_default)
# default rank for VeRA is 256
assert sd_default["base_model.vera_A"].shape == (256, 20)
assert sd_default["base_model.vera_B"].shape == (20, 256)
sd_other = {}
with safe_open(save_path / "other" / "adapter_model.safetensors", framework="pt", device="cpu") as f:
for key in f.keys():
sd_other[key] = f.get_tensor(key)
assert any("vera_A" in key for key in sd_other)
assert any("vera_B" in key for key in sd_other)
assert sd_other["base_model.vera_A"].shape == (256, 20)
assert sd_other["base_model.vera_B"].shape == (20, 256)
def test_multiple_adapters_save_projection_false_contains_no_vera_A_vera_B(self, mlp, tmp_path):
torch.manual_seed(1)
config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False)
# creates a default VeRA adapter
peft_model = get_peft_model(mlp, config, adapter_name="first")
config2 = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False)
peft_model.add_adapter("second", config2)
save_path = tmp_path / "vera"
peft_model.save_pretrained(save_path)
sd_default = {}
with safe_open(save_path / "first" / "adapter_model.safetensors", framework="pt", device="cpu") as f:
for key in f.keys():
sd_default[key] = f.get_tensor(key)
assert not any("vera_A" in key for key in sd_default)
assert not any("vera_B" in key for key in sd_default)
sd_other = {}
with safe_open(save_path / "second" / "adapter_model.safetensors", framework="pt", device="cpu") as f:
for key in f.keys():
sd_other[key] = f.get_tensor(key)
assert not any("vera_A" in key for key in sd_other)
assert not any("vera_B" in key for key in sd_other)
def test_vera_A_vera_B_share_memory(self, mlp_same_prng):
vera_A = mlp_same_prng.vera_A["default"]
vera_B = mlp_same_prng.vera_B["default"]
# these tensors should share the same data
assert vera_A.data_ptr() == mlp_same_prng.base_model.model.lin1.vera_A["default"].data_ptr()
assert vera_B.data_ptr() == mlp_same_prng.base_model.model.lin1.vera_B["default"].data_ptr()
assert vera_A.data_ptr() == mlp_same_prng.base_model.model.lin2.vera_A["default"].data_ptr()
assert vera_B.data_ptr() == mlp_same_prng.base_model.model.lin2.vera_B["default"].data_ptr()
# sanity check: these tensors shouldn't share the same data
assert vera_A.data_ptr() != vera_B.data_ptr()
def test_vera_lambda_dont_share_memory(self, mlp_same_prng):
# sanity check: these tensors shouldn't share the same data
assert (
mlp_same_prng.base_model.model.lin1.vera_lambda_b["default"].data_ptr()
!= mlp_same_prng.base_model.model.lin1.vera_lambda_b["other"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.vera_lambda_b["default"].data_ptr()
!= mlp_same_prng.base_model.model.lin2.vera_lambda_b["default"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.vera_lambda_b["other"].data_ptr()
!= mlp_same_prng.base_model.model.lin2.vera_lambda_b["other"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.vera_lambda_d["default"].data_ptr()
!= mlp_same_prng.base_model.model.lin1.vera_lambda_d["other"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.vera_lambda_d["default"].data_ptr()
!= mlp_same_prng.base_model.model.lin2.vera_lambda_d["default"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.vera_lambda_d["other"].data_ptr()
!= mlp_same_prng.base_model.model.lin2.vera_lambda_d["other"].data_ptr()
)
def test_vera_different_shapes(self, mlp):
config = VeraConfig(target_modules=["lin0", "lin3"], init_weights=False)
mlp_different_shapes = get_peft_model(mlp, config)
vera_A = mlp_different_shapes.vera_A["default"]
vera_B = mlp_different_shapes.vera_B["default"]
# sanity check
assert mlp.lin0.base_layer.weight.shape != mlp.lin3.base_layer.weight.shape
# lin0 has the largest output dimension, lin3 has the largest input dimension
# vera_A should have the shape of (rank, largest_in), vera_B should have the shape of (largest_out, rank)
assert vera_A.shape == (config.r, mlp.lin3.in_features)
assert vera_B.shape == (mlp.lin0.out_features, config.r)
# should not raise
input = torch.randn(5, 10)
mlp_different_shapes(input)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16])
def test_vera_dtypes(self, dtype):
if dtype == torch.bfloat16:
# skip if bf16 is not supported on hardware, see #1872
is_xpu = infer_device() == "xpu"
is_cuda_bf16 = torch.cuda.is_available() and torch.cuda.is_bf16_supported()
if not (is_xpu or is_cuda_bf16):
pytest.skip("bfloat16 not supported on this system, skipping the test")
model = MLP().to(dtype)
config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False)
peft_model = get_peft_model(model, config)
inputs = torch.randn(5, 10).to(dtype)
output = peft_model(inputs) # should not raise
assert output.dtype == dtype
| peft/tests/test_vera.py/0 | {
"file_path": "peft/tests/test_vera.py",
"repo_id": "peft",
"token_count": 5925
} |
# PyTorch Image Models
- [What's New](#whats-new)
- [Introduction](#introduction)
- [Models](#models)
- [Features](#features)
- [Results](#results)
- [Getting Started (Documentation)](#getting-started-documentation)
- [Train, Validation, Inference Scripts](#train-validation-inference-scripts)
- [Awesome PyTorch Resources](#awesome-pytorch-resources)
- [Licenses](#licenses)
- [Citing](#citing)
## What's New
## Feb 1, 2025
* FYI PyTorch 2.6 & Python 3.13 are tested and working w/ current main and released version of `timm`
## Jan 27, 2025
* Add Kron Optimizer (PSGD w/ Kronecker-factored preconditioner)
* Code from https://github.com/evanatyourservice/kron_torch
* See also https://sites.google.com/site/lixilinx/home/psgd
## Jan 19, 2025
* Fix loading of LeViT safetensor weights, remove conversion code which should have been deactivated
* Add 'SO150M' ViT weights trained with SBB recipes, decent results, but not optimal shape for ImageNet-12k/1k pretrain/ft
* `vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k_ft_in1k` - 86.7% top-1
* `vit_so150m_patch16_reg4_gap_384.sbb_e250_in12k_ft_in1k` - 87.4% top-1
* `vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k`
* Misc typing, typo, etc. cleanup
* 1.0.14 release to get above LeViT fix out
## Jan 9, 2025
* Add support to train and validate in pure `bfloat16` or `float16`
* `wandb` project name arg added by https://github.com/caojiaolong, use arg.experiment for name
* Fix old issue w/ checkpoint saving not working on filesystem w/o hard-link support (e.g. FUSE fs mounts)
* 1.0.13 release
## Jan 6, 2025
* Add `torch.utils.checkpoint.checkpoint()` wrapper in `timm.models` that defaults `use_reentrant=False`, unless `TIMM_REENTRANT_CKPT=1` is set in env.
## Dec 31, 2024
* `convnext_nano` 384x384 ImageNet-12k pretrain & fine-tune. https://huggingface.co/models?search=convnext_nano%20r384
* Add AIM-v2 encoders from https://github.com/apple/ml-aim, see on Hub: https://huggingface.co/models?search=timm%20aimv2
* Add PaliGemma2 encoders from https://github.com/google-research/big_vision to existing PaliGemma, see on Hub: https://huggingface.co/models?search=timm%20pali2
* Add missing L/14 DFN2B 39B CLIP ViT, `vit_large_patch14_clip_224.dfn2b_s39b`
* Fix existing `RmsNorm` layer & fn to match standard formulation, use PT 2.5 impl when possible. Move old impl to `SimpleNorm` layer, it's LN w/o centering or bias. There were only two `timm` models using it, and they have been updated.
* Allow override of `cache_dir` arg for model creation
* Pass through `trust_remote_code` for HF datasets wrapper
* `inception_next_atto` model added by creator
* Adan optimizer caution, and Lamb decoupled weighgt decay options
* Some feature_info metadata fixed by https://github.com/brianhou0208
* All OpenCLIP and JAX (CLIP, SigLIP, Pali, etc) model weights that used load time remapping were given their own HF Hub instances so that they work with `hf-hub:` based loading, and thus will work with new Transformers `TimmWrapperModel`
## Nov 28, 2024
* More optimizers
* Add MARS optimizer (https://arxiv.org/abs/2411.10438, https://github.com/AGI-Arena/MARS)
* Add LaProp optimizer (https://arxiv.org/abs/2002.04839, https://github.com/Z-T-WANG/LaProp-Optimizer)
* Add masking from 'Cautious Optimizers' (https://arxiv.org/abs/2411.16085, https://github.com/kyleliang919/C-Optim) to Adafactor, Adafactor Big Vision, AdamW (legacy), Adopt, Lamb, LaProp, Lion, NadamW, RMSPropTF, SGDW
* Cleanup some docstrings and type annotations re optimizers and factory
* Add MobileNet-V4 Conv Medium models pretrained on in12k and fine-tuned in1k @ 384x384
* https://huggingface.co/timm/mobilenetv4_conv_medium.e250_r384_in12k_ft_in1k
* https://huggingface.co/timm/mobilenetv4_conv_medium.e250_r384_in12k
* https://huggingface.co/timm/mobilenetv4_conv_medium.e180_ad_r384_in12k
* https://huggingface.co/timm/mobilenetv4_conv_medium.e180_r384_in12k
* Add small cs3darknet, quite good for the speed
* https://huggingface.co/timm/cs3darknet_focus_s.ra4_e3600_r256_in1k
## Nov 12, 2024
* Optimizer factory refactor
* New factory works by registering optimizers using an OptimInfo dataclass w/ some key traits
* Add `list_optimizers`, `get_optimizer_class`, `get_optimizer_info` to reworked `create_optimizer_v2` fn to explore optimizers, get info or class
* deprecate `optim.optim_factory`, move fns to `optim/_optim_factory.py` and `optim/_param_groups.py` and encourage import via `timm.optim`
* Add Adopt (https://github.com/iShohei220/adopt) optimizer
* Add 'Big Vision' variant of Adafactor (https://github.com/google-research/big_vision/blob/main/big_vision/optax.py) optimizer
* Fix original Adafactor to pick better factorization dims for convolutions
* Tweak LAMB optimizer with some improvements in torch.where functionality since original, refactor clipping a bit
* dynamic img size support in vit, deit, eva improved to support resize from non-square patch grids, thanks https://github.com/wojtke
*
## Oct 31, 2024
Add a set of new very well trained ResNet & ResNet-V2 18/34 (basic block) weights. See https://huggingface.co/blog/rwightman/resnet-trick-or-treat
## Oct 19, 2024
* Cleanup torch amp usage to avoid cuda specific calls, merge support for Ascend (NPU) devices from [MengqingCao](https://github.com/MengqingCao) that should work now in PyTorch 2.5 w/ new device extension autoloading feature. Tested Intel Arc (XPU) in Pytorch 2.5 too and it (mostly) worked.
## Oct 16, 2024
* Fix error on importing from deprecated path `timm.models.registry`, increased priority of existing deprecation warnings to be visible
* Port weights of InternViT-300M (https://huggingface.co/OpenGVLab/InternViT-300M-448px) to `timm` as `vit_intern300m_patch14_448`
### Oct 14, 2024
* Pre-activation (ResNetV2) version of 18/18d/34/34d ResNet model defs added by request (weights pending)
* Release 1.0.10
### Oct 11, 2024
* MambaOut (https://github.com/yuweihao/MambaOut) model & weights added. A cheeky take on SSM vision models w/o the SSM (essentially ConvNeXt w/ gating). A mix of original weights + custom variations & weights.
|model |img_size|top1 |top5 |param_count|
|---------------------------------------------------------------------------------------------------------------------|--------|------|------|-----------|
|[mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k)|384 |87.506|98.428|101.66 |
|[mambaout_base_plus_rw.sw_e150_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_in12k_ft_in1k)|288 |86.912|98.236|101.66 |
|[mambaout_base_plus_rw.sw_e150_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_in12k_ft_in1k)|224 |86.632|98.156|101.66 |
|[mambaout_base_tall_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_tall_rw.sw_e500_in1k) |288 |84.974|97.332|86.48 |
|[mambaout_base_wide_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_wide_rw.sw_e500_in1k) |288 |84.962|97.208|94.45 |
|[mambaout_base_short_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_short_rw.sw_e500_in1k) |288 |84.832|97.27 |88.83 |
|[mambaout_base.in1k](http://huggingface.co/timm/mambaout_base.in1k) |288 |84.72 |96.93 |84.81 |
|[mambaout_small_rw.sw_e450_in1k](http://huggingface.co/timm/mambaout_small_rw.sw_e450_in1k) |288 |84.598|97.098|48.5 |
|[mambaout_small.in1k](http://huggingface.co/timm/mambaout_small.in1k) |288 |84.5 |96.974|48.49 |
|[mambaout_base_wide_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_wide_rw.sw_e500_in1k) |224 |84.454|96.864|94.45 |
|[mambaout_base_tall_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_tall_rw.sw_e500_in1k) |224 |84.434|96.958|86.48 |
|[mambaout_base_short_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_short_rw.sw_e500_in1k) |224 |84.362|96.952|88.83 |
|[mambaout_base.in1k](http://huggingface.co/timm/mambaout_base.in1k) |224 |84.168|96.68 |84.81 |
|[mambaout_small.in1k](http://huggingface.co/timm/mambaout_small.in1k) |224 |84.086|96.63 |48.49 |
|[mambaout_small_rw.sw_e450_in1k](http://huggingface.co/timm/mambaout_small_rw.sw_e450_in1k) |224 |84.024|96.752|48.5 |
|[mambaout_tiny.in1k](http://huggingface.co/timm/mambaout_tiny.in1k) |288 |83.448|96.538|26.55 |
|[mambaout_tiny.in1k](http://huggingface.co/timm/mambaout_tiny.in1k) |224 |82.736|96.1 |26.55 |
|[mambaout_kobe.in1k](http://huggingface.co/timm/mambaout_kobe.in1k) |288 |81.054|95.718|9.14 |
|[mambaout_kobe.in1k](http://huggingface.co/timm/mambaout_kobe.in1k) |224 |79.986|94.986|9.14 |
|[mambaout_femto.in1k](http://huggingface.co/timm/mambaout_femto.in1k) |288 |79.848|95.14 |7.3 |
|[mambaout_femto.in1k](http://huggingface.co/timm/mambaout_femto.in1k) |224 |78.87 |94.408|7.3 |
* SigLIP SO400M ViT fine-tunes on ImageNet-1k @ 378x378, added 378x378 option for existing SigLIP 384x384 models
* [vit_so400m_patch14_siglip_378.webli_ft_in1k](https://huggingface.co/timm/vit_so400m_patch14_siglip_378.webli_ft_in1k) - 89.42 top-1
* [vit_so400m_patch14_siglip_gap_378.webli_ft_in1k](https://huggingface.co/timm/vit_so400m_patch14_siglip_gap_378.webli_ft_in1k) - 89.03
* SigLIP SO400M ViT encoder from recent multi-lingual (i18n) variant, patch16 @ 256x256 (https://huggingface.co/timm/ViT-SO400M-16-SigLIP-i18n-256). OpenCLIP update pending.
* Add two ConvNeXt 'Zepto' models & weights (one w/ overlapped stem and one w/ patch stem). Uses RMSNorm, smaller than previous 'Atto', 2.2M params.
* [convnext_zepto_rms_ols.ra4_e3600_r224_in1k](https://huggingface.co/timm/convnext_zepto_rms_ols.ra4_e3600_r224_in1k) - 73.20 top-1 @ 224
* [convnext_zepto_rms.ra4_e3600_r224_in1k](https://huggingface.co/timm/convnext_zepto_rms.ra4_e3600_r224_in1k) - 72.81 @ 224
### Sept 2024
* Add a suite of tiny test models for improved unit tests and niche low-resource applications (https://huggingface.co/blog/rwightman/timm-tiny-test)
* Add MobileNetV4-Conv-Small (0.5x) model (https://huggingface.co/posts/rwightman/793053396198664)
* [mobilenetv4_conv_small_050.e3000_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small_050.e3000_r224_in1k) - 65.81 top-1 @ 256, 64.76 @ 224
* Add MobileNetV3-Large variants trained with MNV4 Small recipe
* [mobilenetv3_large_150d.ra4_e3600_r256_in1k](http://hf.co/timm/mobilenetv3_large_150d.ra4_e3600_r256_in1k) - 81.81 @ 320, 80.94 @ 256
* [mobilenetv3_large_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv3_large_100.ra4_e3600_r224_in1k) - 77.16 @ 256, 76.31 @ 224
### Aug 21, 2024
* Updated SBB ViT models trained on ImageNet-12k and fine-tuned on ImageNet-1k, challenging quite a number of much larger, slower models
| model | top1 | top5 | param_count | img_size |
| -------------------------------------------------- | ------ | ------ | ----------- | -------- |
| [vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k) | 87.438 | 98.256 | 64.11 | 384 |
| [vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k) | 86.608 | 97.934 | 64.11 | 256 |
| [vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k) | 86.594 | 98.02 | 60.4 | 384 |
| [vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k) | 85.734 | 97.61 | 60.4 | 256 |
* MobileNet-V1 1.25, EfficientNet-B1, & ResNet50-D weights w/ MNV4 baseline challenge recipe
| model | top1 | top5 | param_count | img_size |
|--------------------------------------------------------------------------------------------------------------------------|--------|--------|-------------|----------|
| [resnet50d.ra4_e3600_r224_in1k](http://hf.co/timm/resnet50d.ra4_e3600_r224_in1k) | 81.838 | 95.922 | 25.58 | 288 |
| [efficientnet_b1.ra4_e3600_r240_in1k](http://hf.co/timm/efficientnet_b1.ra4_e3600_r240_in1k) | 81.440 | 95.700 | 7.79 | 288 |
| [resnet50d.ra4_e3600_r224_in1k](http://hf.co/timm/resnet50d.ra4_e3600_r224_in1k) | 80.952 | 95.384 | 25.58 | 224 |
| [efficientnet_b1.ra4_e3600_r240_in1k](http://hf.co/timm/efficientnet_b1.ra4_e3600_r240_in1k) | 80.406 | 95.152 | 7.79 | 240 |
| [mobilenetv1_125.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_125.ra4_e3600_r224_in1k) | 77.600 | 93.804 | 6.27 | 256 |
| [mobilenetv1_125.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_125.ra4_e3600_r224_in1k) | 76.924 | 93.234 | 6.27 | 224 |
* Add SAM2 (HieraDet) backbone arch & weight loading support
* Add Hiera Small weights trained w/ abswin pos embed on in12k & fine-tuned on 1k
|model |top1 |top5 |param_count|
|---------------------------------|------|------|-----------|
|hiera_small_abswin_256.sbb2_e200_in12k_ft_in1k |84.912|97.260|35.01 |
|hiera_small_abswin_256.sbb2_pd_e200_in12k_ft_in1k |84.560|97.106|35.01 |
### Aug 8, 2024
* Add RDNet ('DenseNets Reloaded', https://arxiv.org/abs/2403.19588), thanks [Donghyun Kim](https://github.com/dhkim0225)
### July 28, 2024
* Add `mobilenet_edgetpu_v2_m` weights w/ `ra4` mnv4-small based recipe. 80.1% top-1 @ 224 and 80.7 @ 256.
* Release 1.0.8
### July 26, 2024
* More MobileNet-v4 weights, ImageNet-12k pretrain w/ fine-tunes, and anti-aliased ConvLarge models
| model |top1 |top1_err|top5 |top5_err|param_count|img_size|
|--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------|
| [mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k)|84.99 |15.01 |97.294|2.706 |32.59 |544 |
| [mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k)|84.772|15.228 |97.344|2.656 |32.59 |480 |
| [mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k)|84.64 |15.36 |97.114|2.886 |32.59 |448 |
| [mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k)|84.314|15.686 |97.102|2.898 |32.59 |384 |
| [mobilenetv4_conv_aa_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e600_r384_in1k) |83.824|16.176 |96.734|3.266 |32.59 |480 |
| [mobilenetv4_conv_aa_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e600_r384_in1k) |83.244|16.756 |96.392|3.608 |32.59 |384 |
| [mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k)|82.99 |17.01 |96.67 |3.33 |11.07 |320 |
| [mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k)|82.364|17.636 |96.256|3.744 |11.07 |256 |
* Impressive MobileNet-V1 and EfficientNet-B0 baseline challenges (https://huggingface.co/blog/rwightman/mobilenet-baselines)
| model |top1 |top1_err|top5 |top5_err|param_count|img_size|
|--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------|
| [efficientnet_b0.ra4_e3600_r224_in1k](http://hf.co/timm/efficientnet_b0.ra4_e3600_r224_in1k) |79.364|20.636 |94.754|5.246 |5.29 |256 |
| [efficientnet_b0.ra4_e3600_r224_in1k](http://hf.co/timm/efficientnet_b0.ra4_e3600_r224_in1k) |78.584|21.416 |94.338|5.662 |5.29 |224 |
| [mobilenetv1_100h.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100h.ra4_e3600_r224_in1k) |76.596|23.404 |93.272|6.728 |5.28 |256 |
| [mobilenetv1_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100.ra4_e3600_r224_in1k) |76.094|23.906 |93.004|6.996 |4.23 |256 |
| [mobilenetv1_100h.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100h.ra4_e3600_r224_in1k) |75.662|24.338 |92.504|7.496 |5.28 |224 |
| [mobilenetv1_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100.ra4_e3600_r224_in1k) |75.382|24.618 |92.312|7.688 |4.23 |224 |
* Prototype of `set_input_size()` added to vit and swin v1/v2 models to allow changing image size, patch size, window size after model creation.
* Improved support in swin for different size handling, in addition to `set_input_size`, `always_partition` and `strict_img_size` args have been added to `__init__` to allow more flexible input size constraints
* Fix out of order indices info for intermediate 'Getter' feature wrapper, check out or range indices for same.
* Add several `tiny` < .5M param models for testing that are actually trained on ImageNet-1k
|model |top1 |top1_err|top5 |top5_err|param_count|img_size|crop_pct|
|----------------------------|------|--------|------|--------|-----------|--------|--------|
|test_efficientnet.r160_in1k |47.156|52.844 |71.726|28.274 |0.36 |192 |1.0 |
|test_byobnet.r160_in1k |46.698|53.302 |71.674|28.326 |0.46 |192 |1.0 |
|test_efficientnet.r160_in1k |46.426|53.574 |70.928|29.072 |0.36 |160 |0.875 |
|test_byobnet.r160_in1k |45.378|54.622 |70.572|29.428 |0.46 |160 |0.875 |
|test_vit.r160_in1k|42.0 |58.0 |68.664|31.336 |0.37 |192 |1.0 |
|test_vit.r160_in1k|40.822|59.178 |67.212|32.788 |0.37 |160 |0.875 |
* Fix vit reg token init, thanks [Promisery](https://github.com/Promisery)
* Other misc fixes
### June 24, 2024
* 3 more MobileNetV4 hyrid weights with different MQA weight init scheme
| model |top1 |top1_err|top5 |top5_err|param_count|img_size|
|--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------|
| [mobilenetv4_hybrid_large.ix_e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.ix_e600_r384_in1k) |84.356|15.644 |96.892 |3.108 |37.76 |448 |
| [mobilenetv4_hybrid_large.ix_e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.ix_e600_r384_in1k) |83.990|16.010 |96.702 |3.298 |37.76 |384 |
| [mobilenetv4_hybrid_medium.ix_e550_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r384_in1k) |83.394|16.606 |96.760|3.240 |11.07 |448 |
| [mobilenetv4_hybrid_medium.ix_e550_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r384_in1k) |82.968|17.032 |96.474|3.526 |11.07 |384 |
| [mobilenetv4_hybrid_medium.ix_e550_r256_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r256_in1k) |82.492|17.508 |96.278|3.722 |11.07 |320 |
| [mobilenetv4_hybrid_medium.ix_e550_r256_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r256_in1k) |81.446|18.554 |95.704|4.296 |11.07 |256 |
* florence2 weight loading in DaViT model
### June 12, 2024
* MobileNetV4 models and initial set of `timm` trained weights added:
| model |top1 |top1_err|top5 |top5_err|param_count|img_size|
|--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------|
| [mobilenetv4_hybrid_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.e600_r384_in1k) |84.266|15.734 |96.936 |3.064 |37.76 |448 |
| [mobilenetv4_hybrid_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.e600_r384_in1k) |83.800|16.200 |96.770 |3.230 |37.76 |384 |
| [mobilenetv4_conv_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_large.e600_r384_in1k) |83.392|16.608 |96.622 |3.378 |32.59 |448 |
| [mobilenetv4_conv_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_large.e600_r384_in1k) |82.952|17.048 |96.266 |3.734 |32.59 |384 |
| [mobilenetv4_conv_large.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_large.e500_r256_in1k) |82.674|17.326 |96.31 |3.69 |32.59 |320 |
| [mobilenetv4_conv_large.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_large.e500_r256_in1k) |81.862|18.138 |95.69 |4.31 |32.59 |256 |
| [mobilenetv4_hybrid_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e500_r224_in1k) |81.276|18.724 |95.742|4.258 |11.07 |256 |
| [mobilenetv4_conv_medium.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r256_in1k) |80.858|19.142 |95.768|4.232 |9.72 |320 |
| [mobilenetv4_hybrid_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e500_r224_in1k) |80.442|19.558 |95.38 |4.62 |11.07 |224 |
| [mobilenetv4_conv_blur_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_blur_medium.e500_r224_in1k) |80.142|19.858 |95.298|4.702 |9.72 |256 |
| [mobilenetv4_conv_medium.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r256_in1k) |79.928|20.072 |95.184|4.816 |9.72 |256 |
| [mobilenetv4_conv_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r224_in1k) |79.808|20.192 |95.186|4.814 |9.72 |256 |
| [mobilenetv4_conv_blur_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_blur_medium.e500_r224_in1k) |79.438|20.562 |94.932|5.068 |9.72 |224 |
| [mobilenetv4_conv_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r224_in1k) |79.094|20.906 |94.77 |5.23 |9.72 |224 |
| [mobilenetv4_conv_small.e2400_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e2400_r224_in1k) |74.616|25.384 |92.072|7.928 |3.77 |256 |
| [mobilenetv4_conv_small.e1200_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e1200_r224_in1k) |74.292|25.708 |92.116|7.884 |3.77 |256 |
| [mobilenetv4_conv_small.e2400_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e2400_r224_in1k) |73.756|26.244 |91.422|8.578 |3.77 |224 |
| [mobilenetv4_conv_small.e1200_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e1200_r224_in1k) |73.454|26.546 |91.34 |8.66 |3.77 |224 |
* Apple MobileCLIP (https://arxiv.org/pdf/2311.17049, FastViT and ViT-B) image tower model support & weights added (part of OpenCLIP support).
* ViTamin (https://arxiv.org/abs/2404.02132) CLIP image tower model & weights added (part of OpenCLIP support).
* OpenAI CLIP Modified ResNet image tower modelling & weight support (via ByobNet). Refactor AttentionPool2d.
### May 14, 2024
* Support loading PaliGemma jax weights into SigLIP ViT models with average pooling.
* Add Hiera models from Meta (https://github.com/facebookresearch/hiera).
* Add `normalize=` flag for transforms, return non-normalized torch.Tensor with original dytpe (for `chug`)
* Version 1.0.3 release
### May 11, 2024
* `Searching for Better ViT Baselines (For the GPU Poor)` weights and vit variants released. Exploring model shapes between Tiny and Base.
| model | top1 | top5 | param_count | img_size |
| -------------------------------------------------- | ------ | ------ | ----------- | -------- |
| [vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k) | 86.202 | 97.874 | 64.11 | 256 |
| [vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k) | 85.418 | 97.48 | 60.4 | 256 |
| [vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k) | 84.322 | 96.812 | 63.95 | 256 |
| [vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k) | 83.906 | 96.684 | 60.23 | 256 |
| [vit_base_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_base_patch16_rope_reg1_gap_256.sbb_in1k) | 83.866 | 96.67 | 86.43 | 256 |
| [vit_medium_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_rope_reg1_gap_256.sbb_in1k) | 83.81 | 96.824 | 38.74 | 256 |
| [vit_betwixt_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in1k) | 83.706 | 96.616 | 60.4 | 256 |
| [vit_betwixt_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg1_gap_256.sbb_in1k) | 83.628 | 96.544 | 60.4 | 256 |
| [vit_medium_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_reg4_gap_256.sbb_in1k) | 83.47 | 96.622 | 38.88 | 256 |
| [vit_medium_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_reg1_gap_256.sbb_in1k) | 83.462 | 96.548 | 38.88 | 256 |
| [vit_little_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_little_patch16_reg4_gap_256.sbb_in1k) | 82.514 | 96.262 | 22.52 | 256 |
| [vit_wee_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_wee_patch16_reg1_gap_256.sbb_in1k) | 80.256 | 95.360 | 13.42 | 256 |
| [vit_pwee_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_pwee_patch16_reg1_gap_256.sbb_in1k) | 80.072 | 95.136 | 15.25 | 256 |
| [vit_mediumd_patch16_reg4_gap_256.sbb_in12k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb_in12k) | N/A | N/A | 64.11 | 256 |
| [vit_betwixt_patch16_reg4_gap_256.sbb_in12k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in12k) | N/A | N/A | 60.4 | 256 |
* AttentionExtract helper added to extract attention maps from `timm` models. See example in https://github.com/huggingface/pytorch-image-models/discussions/1232#discussioncomment-9320949
* `forward_intermediates()` API refined and added to more models including some ConvNets that have other extraction methods.
* 1017 of 1047 model architectures support `features_only=True` feature extraction. Remaining 34 architectures can be supported but based on priority requests.
* Remove torch.jit.script annotated functions including old JIT activations. Conflict with dynamo and dynamo does a much better job when used.
### April 11, 2024
* Prepping for a long overdue 1.0 release, things have been stable for a while now.
* Significant feature that's been missing for a while, `features_only=True` support for ViT models with flat hidden states or non-std module layouts (so far covering `'vit_*', 'twins_*', 'deit*', 'beit*', 'mvitv2*', 'eva*', 'samvit_*', 'flexivit*'`)
* Above feature support achieved through a new `forward_intermediates()` API that can be used with a feature wrapping module or directly.
```python
model = timm.create_model('vit_base_patch16_224')
final_feat, intermediates = model.forward_intermediates(input)
output = model.forward_head(final_feat) # pooling + classifier head
print(final_feat.shape)
torch.Size([2, 197, 768])
for f in intermediates:
print(f.shape)
torch.Size([2, 768, 14, 14])
torch.Size([2, 768, 14, 14])
torch.Size([2, 768, 14, 14])
torch.Size([2, 768, 14, 14])
torch.Size([2, 768, 14, 14])
torch.Size([2, 768, 14, 14])
torch.Size([2, 768, 14, 14])
torch.Size([2, 768, 14, 14])
torch.Size([2, 768, 14, 14])
torch.Size([2, 768, 14, 14])
torch.Size([2, 768, 14, 14])
torch.Size([2, 768, 14, 14])
print(output.shape)
torch.Size([2, 1000])
```
```python
model = timm.create_model('eva02_base_patch16_clip_224', pretrained=True, img_size=512, features_only=True, out_indices=(-3, -2,))
output = model(torch.randn(2, 3, 512, 512))
for o in output:
print(o.shape)
torch.Size([2, 768, 32, 32])
torch.Size([2, 768, 32, 32])
```
* TinyCLIP vision tower weights added, thx [Thien Tran](https://github.com/gau-nernst)
### Feb 19, 2024
* Next-ViT models added. Adapted from https://github.com/bytedance/Next-ViT
* HGNet and PP-HGNetV2 models added. Adapted from https://github.com/PaddlePaddle/PaddleClas by [SeeFun](https://github.com/seefun)
* Removed setup.py, moved to pyproject.toml based build supported by PDM
* Add updated model EMA impl using _for_each for less overhead
* Support device args in train script for non GPU devices
* Other misc fixes and small additions
* Min supported Python version increased to 3.8
* Release 0.9.16
## Introduction
Py**T**orch **Im**age **M**odels (`timm`) is a collection of image models, layers, utilities, optimizers, schedulers, data-loaders / augmentations, and reference training / validation scripts that aim to pull together a wide variety of SOTA models with ability to reproduce ImageNet training results.
The work of many others is present here. I've tried to make sure all source material is acknowledged via links to github, arxiv papers, etc in the README, documentation, and code docstrings. Please let me know if I missed anything.
## Features
### Models
All model architecture families include variants with pretrained weights. There are specific model variants without any weights, it is NOT a bug. Help training new or better weights is always appreciated.
* Aggregating Nested Transformers - https://arxiv.org/abs/2105.12723
* BEiT - https://arxiv.org/abs/2106.08254
* Big Transfer ResNetV2 (BiT) - https://arxiv.org/abs/1912.11370
* Bottleneck Transformers - https://arxiv.org/abs/2101.11605
* CaiT (Class-Attention in Image Transformers) - https://arxiv.org/abs/2103.17239
* CoaT (Co-Scale Conv-Attentional Image Transformers) - https://arxiv.org/abs/2104.06399
* CoAtNet (Convolution and Attention) - https://arxiv.org/abs/2106.04803
* ConvNeXt - https://arxiv.org/abs/2201.03545
* ConvNeXt-V2 - http://arxiv.org/abs/2301.00808
* ConViT (Soft Convolutional Inductive Biases Vision Transformers)- https://arxiv.org/abs/2103.10697
* CspNet (Cross-Stage Partial Networks) - https://arxiv.org/abs/1911.11929
* DeiT - https://arxiv.org/abs/2012.12877
* DeiT-III - https://arxiv.org/pdf/2204.07118.pdf
* DenseNet - https://arxiv.org/abs/1608.06993
* DLA - https://arxiv.org/abs/1707.06484
* DPN (Dual-Path Network) - https://arxiv.org/abs/1707.01629
* EdgeNeXt - https://arxiv.org/abs/2206.10589
* EfficientFormer - https://arxiv.org/abs/2206.01191
* EfficientNet (MBConvNet Family)
* EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252
* EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665
* EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946
* EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
* EfficientNet V2 - https://arxiv.org/abs/2104.00298
* FBNet-C - https://arxiv.org/abs/1812.03443
* MixNet - https://arxiv.org/abs/1907.09595
* MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626
* MobileNet-V2 - https://arxiv.org/abs/1801.04381
* Single-Path NAS - https://arxiv.org/abs/1904.02877
* TinyNet - https://arxiv.org/abs/2010.14819
* EfficientViT (MIT) - https://arxiv.org/abs/2205.14756
* EfficientViT (MSRA) - https://arxiv.org/abs/2305.07027
* EVA - https://arxiv.org/abs/2211.07636
* EVA-02 - https://arxiv.org/abs/2303.11331
* FastViT - https://arxiv.org/abs/2303.14189
* FlexiViT - https://arxiv.org/abs/2212.08013
* FocalNet (Focal Modulation Networks) - https://arxiv.org/abs/2203.11926
* GCViT (Global Context Vision Transformer) - https://arxiv.org/abs/2206.09959
* GhostNet - https://arxiv.org/abs/1911.11907
* GhostNet-V2 - https://arxiv.org/abs/2211.12905
* gMLP - https://arxiv.org/abs/2105.08050
* GPU-Efficient Networks - https://arxiv.org/abs/2006.14090
* Halo Nets - https://arxiv.org/abs/2103.12731
* HGNet / HGNet-V2 - TBD
* HRNet - https://arxiv.org/abs/1908.07919
* InceptionNeXt - https://arxiv.org/abs/2303.16900
* Inception-V3 - https://arxiv.org/abs/1512.00567
* Inception-ResNet-V2 and Inception-V4 - https://arxiv.org/abs/1602.07261
* Lambda Networks - https://arxiv.org/abs/2102.08602
* LeViT (Vision Transformer in ConvNet's Clothing) - https://arxiv.org/abs/2104.01136
* MambaOut - https://arxiv.org/abs/2405.07992
* MaxViT (Multi-Axis Vision Transformer) - https://arxiv.org/abs/2204.01697
* MetaFormer (PoolFormer-v2, ConvFormer, CAFormer) - https://arxiv.org/abs/2210.13452
* MLP-Mixer - https://arxiv.org/abs/2105.01601
* MobileCLIP - https://arxiv.org/abs/2311.17049
* MobileNet-V3 (MBConvNet w/ Efficient Head) - https://arxiv.org/abs/1905.02244
* FBNet-V3 - https://arxiv.org/abs/2006.02049
* HardCoRe-NAS - https://arxiv.org/abs/2102.11646
* LCNet - https://arxiv.org/abs/2109.15099
* MobileNetV4 - https://arxiv.org/abs/2404.10518
* MobileOne - https://arxiv.org/abs/2206.04040
* MobileViT - https://arxiv.org/abs/2110.02178
* MobileViT-V2 - https://arxiv.org/abs/2206.02680
* MViT-V2 (Improved Multiscale Vision Transformer) - https://arxiv.org/abs/2112.01526
* NASNet-A - https://arxiv.org/abs/1707.07012
* NesT - https://arxiv.org/abs/2105.12723
* Next-ViT - https://arxiv.org/abs/2207.05501
* NFNet-F - https://arxiv.org/abs/2102.06171
* NF-RegNet / NF-ResNet - https://arxiv.org/abs/2101.08692
* PNasNet - https://arxiv.org/abs/1712.00559
* PoolFormer (MetaFormer) - https://arxiv.org/abs/2111.11418
* Pooling-based Vision Transformer (PiT) - https://arxiv.org/abs/2103.16302
* PVT-V2 (Improved Pyramid Vision Transformer) - https://arxiv.org/abs/2106.13797
* RDNet (DenseNets Reloaded) - https://arxiv.org/abs/2403.19588
* RegNet - https://arxiv.org/abs/2003.13678
* RegNetZ - https://arxiv.org/abs/2103.06877
* RepVGG - https://arxiv.org/abs/2101.03697
* RepGhostNet - https://arxiv.org/abs/2211.06088
* RepViT - https://arxiv.org/abs/2307.09283
* ResMLP - https://arxiv.org/abs/2105.03404
* ResNet/ResNeXt
* ResNet (v1b/v1.5) - https://arxiv.org/abs/1512.03385
* ResNeXt - https://arxiv.org/abs/1611.05431
* 'Bag of Tricks' / Gluon C, D, E, S variations - https://arxiv.org/abs/1812.01187
* Weakly-supervised (WSL) Instagram pretrained / ImageNet tuned ResNeXt101 - https://arxiv.org/abs/1805.00932
* Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet/ResNeXts - https://arxiv.org/abs/1905.00546
* ECA-Net (ECAResNet) - https://arxiv.org/abs/1910.03151v4
* Squeeze-and-Excitation Networks (SEResNet) - https://arxiv.org/abs/1709.01507
* ResNet-RS - https://arxiv.org/abs/2103.07579
* Res2Net - https://arxiv.org/abs/1904.01169
* ResNeSt - https://arxiv.org/abs/2004.08955
* ReXNet - https://arxiv.org/abs/2007.00992
* SelecSLS - https://arxiv.org/abs/1907.00837
* Selective Kernel Networks - https://arxiv.org/abs/1903.06586
* Sequencer2D - https://arxiv.org/abs/2205.01972
* Swin S3 (AutoFormerV2) - https://arxiv.org/abs/2111.14725
* Swin Transformer - https://arxiv.org/abs/2103.14030
* Swin Transformer V2 - https://arxiv.org/abs/2111.09883
* Transformer-iN-Transformer (TNT) - https://arxiv.org/abs/2103.00112
* TResNet - https://arxiv.org/abs/2003.13630
* Twins (Spatial Attention in Vision Transformers) - https://arxiv.org/pdf/2104.13840.pdf
* Visformer - https://arxiv.org/abs/2104.12533
* Vision Transformer - https://arxiv.org/abs/2010.11929
* ViTamin - https://arxiv.org/abs/2404.02132
* VOLO (Vision Outlooker) - https://arxiv.org/abs/2106.13112
* VovNet V2 and V1 - https://arxiv.org/abs/1911.06667
* Xception - https://arxiv.org/abs/1610.02357
* Xception (Modified Aligned, Gluon) - https://arxiv.org/abs/1802.02611
* Xception (Modified Aligned, TF) - https://arxiv.org/abs/1802.02611
* XCiT (Cross-Covariance Image Transformers) - https://arxiv.org/abs/2106.09681
### Optimizers
To see full list of optimizers w/ descriptions: `timm.optim.list_optimizers(with_description=True)`
Included optimizers available via `timm.optim.create_optimizer_v2` factory method:
* `adabelief` an implementation of AdaBelief adapted from https://github.com/juntang-zhuang/Adabelief-Optimizer - https://arxiv.org/abs/2010.07468
* `adafactor` adapted from [FAIRSeq impl](https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py) - https://arxiv.org/abs/1804.04235
* `adafactorbv` adapted from [Big Vision](https://github.com/google-research/big_vision/blob/main/big_vision/optax.py) - https://arxiv.org/abs/2106.04560
* `adahessian` by [David Samuel](https://github.com/davda54/ada-hessian) - https://arxiv.org/abs/2006.00719
* `adamp` and `sgdp` by [Naver ClovAI](https://github.com/clovaai) - https://arxiv.org/abs/2006.08217
* `adan` an implementation of Adan adapted from https://github.com/sail-sg/Adan - https://arxiv.org/abs/2208.06677
* `adopt` ADOPT adapted from https://github.com/iShohei220/adopt - https://arxiv.org/abs/2411.02853
* `kron` PSGD w/ Kronecker-factored preconditioner from https://github.com/evanatyourservice/kron_torch - https://sites.google.com/site/lixilinx/home/psgd
* `lamb` an implementation of Lamb and LambC (w/ trust-clipping) cleaned up and modified to support use with XLA - https://arxiv.org/abs/1904.00962
* `laprop` optimizer from https://github.com/Z-T-WANG/LaProp-Optimizer - https://arxiv.org/abs/2002.04839
* `lars` an implementation of LARS and LARC (w/ trust-clipping) - https://arxiv.org/abs/1708.03888
* `lion` and implementation of Lion adapted from https://github.com/google/automl/tree/master/lion - https://arxiv.org/abs/2302.06675
* `lookahead` adapted from impl by [Liam](https://github.com/alphadl/lookahead.pytorch) - https://arxiv.org/abs/1907.08610
* `madgrad` an implementation of MADGRAD adapted from https://github.com/facebookresearch/madgrad - https://arxiv.org/abs/2101.11075
* `mars` MARS optimizer from https://github.com/AGI-Arena/MARS - https://arxiv.org/abs/2411.10438
* `nadam` an implementation of Adam w/ Nesterov momentum
* `nadamw` an implementation of AdamW (Adam w/ decoupled weight-decay) w/ Nesterov momentum. A simplified impl based on https://github.com/mlcommons/algorithmic-efficiency
* `novograd` by [Masashi Kimura](https://github.com/convergence-lab/novograd) - https://arxiv.org/abs/1905.11286
* `radam` by [Liyuan Liu](https://github.com/LiyuanLucasLiu/RAdam) - https://arxiv.org/abs/1908.03265
* `rmsprop_tf` adapted from PyTorch RMSProp by myself. Reproduces much improved Tensorflow RMSProp behaviour
* `sgdw` and implementation of SGD w/ decoupled weight-decay
* `fused<name>` optimizers by name with [NVIDIA Apex](https://github.com/NVIDIA/apex/tree/master/apex/optimizers) installed
* `bnb<name>` optimizers by name with [BitsAndBytes](https://github.com/TimDettmers/bitsandbytes) installed
* `cadamw`, `clion`, and more 'Cautious' optimizers from https://github.com/kyleliang919/C-Optim - https://arxiv.org/abs/2411.16085
* `adam`, `adamw`, `rmsprop`, `adadelta`, `adagrad`, and `sgd` pass through to `torch.optim` implementations
### Augmentations
* Random Erasing from [Zhun Zhong](https://github.com/zhunzhong07/Random-Erasing/blob/master/transforms.py) - https://arxiv.org/abs/1708.04896)
* Mixup - https://arxiv.org/abs/1710.09412
* CutMix - https://arxiv.org/abs/1905.04899
* AutoAugment (https://arxiv.org/abs/1805.09501) and RandAugment (https://arxiv.org/abs/1909.13719) ImageNet configurations modeled after impl for EfficientNet training (https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py)
* AugMix w/ JSD loss, JSD w/ clean + augmented mixing support works with AutoAugment and RandAugment as well - https://arxiv.org/abs/1912.02781
* SplitBachNorm - allows splitting batch norm layers between clean and augmented (auxiliary batch norm) data
### Regularization
* DropPath aka "Stochastic Depth" - https://arxiv.org/abs/1603.09382
* DropBlock - https://arxiv.org/abs/1810.12890
* Blur Pooling - https://arxiv.org/abs/1904.11486
### Other
Several (less common) features that I often utilize in my projects are included. Many of their additions are the reason why I maintain my own set of models, instead of using others' via PIP:
* All models have a common default configuration interface and API for
* accessing/changing the classifier - `get_classifier` and `reset_classifier`
* doing a forward pass on just the features - `forward_features` (see [documentation](https://huggingface.co/docs/timm/feature_extraction))
* these makes it easy to write consistent network wrappers that work with any of the models
* All models support multi-scale feature map extraction (feature pyramids) via create_model (see [documentation](https://huggingface.co/docs/timm/feature_extraction))
* `create_model(name, features_only=True, out_indices=..., output_stride=...)`
* `out_indices` creation arg specifies which feature maps to return, these indices are 0 based and generally correspond to the `C(i + 1)` feature level.
* `output_stride` creation arg controls output stride of the network by using dilated convolutions. Most networks are stride 32 by default. Not all networks support this.
* feature map channel counts, reduction level (stride) can be queried AFTER model creation via the `.feature_info` member
* All models have a consistent pretrained weight loader that adapts last linear if necessary, and from 3 to 1 channel input if desired
* High performance [reference training, validation, and inference scripts](https://huggingface.co/docs/timm/training_script) that work in several process/GPU modes:
* NVIDIA DDP w/ a single GPU per process, multiple processes with APEX present (AMP mixed-precision optional)
* PyTorch DistributedDataParallel w/ multi-gpu, single process (AMP disabled as it crashes when enabled)
* PyTorch w/ single GPU single process (AMP optional)
* A dynamic global pool implementation that allows selecting from average pooling, max pooling, average + max, or concat([average, max]) at model creation. All global pooling is adaptive average by default and compatible with pretrained weights.
* A 'Test Time Pool' wrapper that can wrap any of the included models and usually provides improved performance doing inference with input images larger than the training size. Idea adapted from original DPN implementation when I ported (https://github.com/cypw/DPNs)
* Learning rate schedulers
* Ideas adopted from
* [AllenNLP schedulers](https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers)
* [FAIRseq lr_scheduler](https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler)
* SGDR: Stochastic Gradient Descent with Warm Restarts (https://arxiv.org/abs/1608.03983)
* Schedulers include `step`, `cosine` w/ restarts, `tanh` w/ restarts, `plateau`
* Space-to-Depth by [mrT23](https://github.com/mrT23/TResNet/blob/master/src/models/tresnet/layers/space_to_depth.py) (https://arxiv.org/abs/1801.04590) -- original paper?
* Adaptive Gradient Clipping (https://arxiv.org/abs/2102.06171, https://github.com/deepmind/deepmind-research/tree/master/nfnets)
* An extensive selection of channel and/or spatial attention modules:
* Bottleneck Transformer - https://arxiv.org/abs/2101.11605
* CBAM - https://arxiv.org/abs/1807.06521
* Effective Squeeze-Excitation (ESE) - https://arxiv.org/abs/1911.06667
* Efficient Channel Attention (ECA) - https://arxiv.org/abs/1910.03151
* Gather-Excite (GE) - https://arxiv.org/abs/1810.12348
* Global Context (GC) - https://arxiv.org/abs/1904.11492
* Halo - https://arxiv.org/abs/2103.12731
* Involution - https://arxiv.org/abs/2103.06255
* Lambda Layer - https://arxiv.org/abs/2102.08602
* Non-Local (NL) - https://arxiv.org/abs/1711.07971
* Squeeze-and-Excitation (SE) - https://arxiv.org/abs/1709.01507
* Selective Kernel (SK) - (https://arxiv.org/abs/1903.06586
* Split (SPLAT) - https://arxiv.org/abs/2004.08955
* Shifted Window (SWIN) - https://arxiv.org/abs/2103.14030
## Results
Model validation results can be found in the [results tables](results/README.md)
## Getting Started (Documentation)
The official documentation can be found at https://huggingface.co/docs/hub/timm. Documentation contributions are welcome.
[Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055) by [Chris Hughes](https://github.com/Chris-hughes10) is an extensive blog post covering many aspects of `timm` in detail.
[timmdocs](http://timm.fast.ai/) is an alternate set of documentation for `timm`. A big thanks to [Aman Arora](https://github.com/amaarora) for his efforts creating timmdocs.
[paperswithcode](https://paperswithcode.com/lib/timm) is a good resource for browsing the models within `timm`.
## Train, Validation, Inference Scripts
The root folder of the repository contains reference train, validation, and inference scripts that work with the included models and other features of this repository. They are adaptable for other datasets and use cases with a little hacking. See [documentation](https://huggingface.co/docs/timm/training_script).
## Awesome PyTorch Resources
One of the greatest assets of PyTorch is the community and their contributions. A few of my favourite resources that pair well with the models and components here are listed below.
### Object Detection, Instance and Semantic Segmentation
* Detectron2 - https://github.com/facebookresearch/detectron2
* Segmentation Models (Semantic) - https://github.com/qubvel/segmentation_models.pytorch
* EfficientDet (Obj Det, Semantic soon) - https://github.com/rwightman/efficientdet-pytorch
### Computer Vision / Image Augmentation
* Albumentations - https://github.com/albumentations-team/albumentations
* Kornia - https://github.com/kornia/kornia
### Knowledge Distillation
* RepDistiller - https://github.com/HobbitLong/RepDistiller
* torchdistill - https://github.com/yoshitomo-matsubara/torchdistill
### Metric Learning
* PyTorch Metric Learning - https://github.com/KevinMusgrave/pytorch-metric-learning
### Training / Frameworks
* fastai - https://github.com/fastai/fastai
## Licenses
### Code
The code here is licensed Apache 2.0. I've taken care to make sure any third party code included or adapted has compatible (permissive) licenses such as MIT, BSD, etc. I've made an effort to avoid any GPL / LGPL conflicts. That said, it is your responsibility to ensure you comply with licenses here and conditions of any dependent licenses. Where applicable, I've linked the sources/references for various components in docstrings. If you think I've missed anything please create an issue.
### Pretrained Weights
So far all of the pretrained weights available here are pretrained on ImageNet with a select few that have some additional pretraining (see extra note below). ImageNet was released for non-commercial research purposes only (https://image-net.org/download). It's not clear what the implications of that are for the use of pretrained weights from that dataset. Any models I have trained with ImageNet are done for research purposes and one should assume that the original dataset license applies to the weights. It's best to seek legal advice if you intend to use the pretrained weights in a commercial product.
#### Pretrained on more than ImageNet
Several weights included or references here were pretrained with proprietary datasets that I do not have access to. These include the Facebook WSL, SSL, SWSL ResNe(Xt) and the Google Noisy Student EfficientNet models. The Facebook models have an explicit non-commercial license (CC-BY-NC 4.0, https://github.com/facebookresearch/semi-supervised-ImageNet1K-models, https://github.com/facebookresearch/WSL-Images). The Google models do not appear to have any restriction beyond the Apache 2.0 license (and ImageNet concerns). In either case, you should contact Facebook or Google with any questions.
## Citing
### BibTeX
```bibtex
@misc{rw2019timm,
author = {Ross Wightman},
title = {PyTorch Image Models},
year = {2019},
publisher = {GitHub},
journal = {GitHub repository},
doi = {10.5281/zenodo.4414861},
howpublished = {\url{https://github.com/rwightman/pytorch-image-models}}
}
```
### Latest DOI
[](https://zenodo.org/badge/latestdoi/168799526)
| pytorch-image-models/README.md/0 | {
"file_path": "pytorch-image-models/README.md",
"repo_id": "pytorch-image-models",
"token_count": 21449
} |
# Model Summaries
The model architectures included come from a wide variety of sources. Sources, including papers, original impl ("reference code") that I rewrote / adapted, and PyTorch impl that I leveraged directly ("code") are listed below.
Most included models have pretrained weights. The weights are either:
1. from their original sources
2. ported by myself from their original impl in a different framework (e.g. Tensorflow models)
3. trained from scratch using the included training script
The validation results for the pretrained weights are [here](results)
A more exciting view (with pretty pictures) of the models within `timm` can be found at [paperswithcode](https://paperswithcode.com/lib/timm).
## Big Transfer ResNetV2 (BiT)
* Implementation: [resnetv2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnetv2.py)
* Paper: `Big Transfer (BiT): General Visual Representation Learning` - https://arxiv.org/abs/1912.11370
* Reference code: https://github.com/google-research/big_transfer
## Cross-Stage Partial Networks
* Implementation: [cspnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/cspnet.py)
* Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929
* Reference impl: https://github.com/WongKinYiu/CrossStagePartialNetworks
## DenseNet
* Implementation: [densenet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/densenet.py)
* Paper: `Densely Connected Convolutional Networks` - https://arxiv.org/abs/1608.06993
* Code: https://github.com/pytorch/vision/tree/master/torchvision/models
## DLA
* Implementation: [dla.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dla.py)
* Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484
* Code: https://github.com/ucbdrive/dla
## Dual-Path Networks
* Implementation: [dpn.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dpn.py)
* Paper: `Dual Path Networks` - https://arxiv.org/abs/1707.01629
* My PyTorch code: https://github.com/rwightman/pytorch-dpn-pretrained
* Reference code: https://github.com/cypw/DPNs
## GPU-Efficient Networks
* Implementation: [byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)
* Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
* Reference code: https://github.com/idstcv/GPU-Efficient-Networks
## HRNet
* Implementation: [hrnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/hrnet.py)
* Paper: `Deep High-Resolution Representation Learning for Visual Recognition` - https://arxiv.org/abs/1908.07919
* Code: https://github.com/HRNet/HRNet-Image-Classification
## Inception-V3
* Implementation: [inception_v3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v3.py)
* Paper: `Rethinking the Inception Architecture for Computer Vision` - https://arxiv.org/abs/1512.00567
* Code: https://github.com/pytorch/vision/tree/master/torchvision/models
## Inception-V4
* Implementation: [inception_v4.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v4.py)
* Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261
* Code: https://github.com/Cadene/pretrained-models.pytorch
* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets
## Inception-ResNet-V2
* Implementation: [inception_resnet_v2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_resnet_v2.py)
* Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261
* Code: https://github.com/Cadene/pretrained-models.pytorch
* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets
## NASNet-A
* Implementation: [nasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/nasnet.py)
* Paper: `Learning Transferable Architectures for Scalable Image Recognition` - https://arxiv.org/abs/1707.07012
* Code: https://github.com/Cadene/pretrained-models.pytorch
* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet
## PNasNet-5
* Implementation: [pnasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/pnasnet.py)
* Paper: `Progressive Neural Architecture Search` - https://arxiv.org/abs/1712.00559
* Code: https://github.com/Cadene/pretrained-models.pytorch
* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet
## EfficientNet
* Implementation: [efficientnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py)
* Papers:
* EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252
* EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665
* EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946
* EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
* MixNet - https://arxiv.org/abs/1907.09595
* MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626
* MobileNet-V2 - https://arxiv.org/abs/1801.04381
* FBNet-C - https://arxiv.org/abs/1812.03443
* Single-Path NAS - https://arxiv.org/abs/1904.02877
* My PyTorch code: https://github.com/rwightman/gen-efficientnet-pytorch
* Reference code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
## MobileNet-V3
* Implementation: [mobilenetv3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py)
* Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244
* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet
## RegNet
* Implementation: [regnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/regnet.py)
* Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678
* Reference code: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py
## RepVGG
* Implementation: [byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)
* Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
* Reference code: https://github.com/DingXiaoH/RepVGG
## ResNet, ResNeXt
* Implementation: [resnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py)
* ResNet (V1B)
* Paper: `Deep Residual Learning for Image Recognition` - https://arxiv.org/abs/1512.03385
* Code: https://github.com/pytorch/vision/tree/master/torchvision/models
* ResNeXt
* Paper: `Aggregated Residual Transformations for Deep Neural Networks` - https://arxiv.org/abs/1611.05431
* Code: https://github.com/pytorch/vision/tree/master/torchvision/models
* 'Bag of Tricks' / Gluon C, D, E, S ResNet variants
* Paper: `Bag of Tricks for Image Classification with CNNs` - https://arxiv.org/abs/1812.01187
* Code: https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnetv1b.py
* Instagram pretrained / ImageNet tuned ResNeXt101
* Paper: `Exploring the Limits of Weakly Supervised Pretraining` - https://arxiv.org/abs/1805.00932
* Weights: https://pytorch.org/hub/facebookresearch_WSL-Images_resnext (NOTE: CC BY-NC 4.0 License, NOT commercial friendly)
* Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet and ResNeXts
* Paper: `Billion-scale semi-supervised learning for image classification` - https://arxiv.org/abs/1905.00546
* Weights: https://github.com/facebookresearch/semi-supervised-ImageNet1K-models (NOTE: CC BY-NC 4.0 License, NOT commercial friendly)
* Squeeze-and-Excitation Networks
* Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507
* Code: Added to ResNet base, this is current version going forward, old `senet.py` is being deprecated
* ECAResNet (ECA-Net)
* Paper: `ECA-Net: Efficient Channel Attention for Deep CNN` - https://arxiv.org/abs/1910.03151v4
* Code: Added to ResNet base, ECA module contributed by @VRandme, reference https://github.com/BangguWu/ECANet
## Res2Net
* Implementation: [res2net.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/res2net.py)
* Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169
* Code: https://github.com/gasvn/Res2Net
## ResNeSt
* Implementation: [resnest.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnest.py)
* Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955
* Code: https://github.com/zhanghang1989/ResNeSt
## ReXNet
* Implementation: [rexnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/rexnet.py)
* Paper: `ReXNet: Diminishing Representational Bottleneck on CNN` - https://arxiv.org/abs/2007.00992
* Code: https://github.com/clovaai/rexnet
## Selective-Kernel Networks
* Implementation: [sknet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/sknet.py)
* Paper: `Selective-Kernel Networks` - https://arxiv.org/abs/1903.06586
* Code: https://github.com/implus/SKNet, https://github.com/clovaai/assembled-cnn
## SelecSLS
* Implementation: [selecsls.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/selecsls.py)
* Paper: `XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera` - https://arxiv.org/abs/1907.00837
* Code: https://github.com/mehtadushy/SelecSLS-Pytorch
## Squeeze-and-Excitation Networks
* Implementation: [senet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/senet.py)
NOTE: I am deprecating this version of the networks, the new ones are part of `resnet.py`
* Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507
* Code: https://github.com/Cadene/pretrained-models.pytorch
## TResNet
* Implementation: [tresnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tresnet.py)
* Paper: `TResNet: High Performance GPU-Dedicated Architecture` - https://arxiv.org/abs/2003.13630
* Code: https://github.com/mrT23/TResNet
## VGG
* Implementation: [vgg.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vgg.py)
* Paper: `Very Deep Convolutional Networks For Large-Scale Image Recognition` - https://arxiv.org/pdf/1409.1556.pdf
* Reference code: https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
## Vision Transformer
* Implementation: [vision_transformer.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py)
* Paper: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929
* Reference code and pretrained weights: https://github.com/google-research/vision_transformer
## VovNet V2 and V1
* Implementation: [vovnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vovnet.py)
* Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
* Reference code: https://github.com/youngwanLEE/vovnet-detectron2
## Xception
* Implementation: [xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/xception.py)
* Paper: `Xception: Deep Learning with Depthwise Separable Convolutions` - https://arxiv.org/abs/1610.02357
* Code: https://github.com/Cadene/pretrained-models.pytorch
## Xception (Modified Aligned, Gluon)
* Implementation: [gluon_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/gluon_xception.py)
* Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611
* Reference code: https://github.com/dmlc/gluon-cv/tree/master/gluoncv/model_zoo, https://github.com/jfzhang95/pytorch-deeplab-xception/
## Xception (Modified Aligned, TF)
* Implementation: [aligned_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/aligned_xception.py)
* Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611
* Reference code: https://github.com/tensorflow/models/tree/master/research/deeplab
| pytorch-image-models/hfdocs/source/models.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models.mdx",
"repo_id": "pytorch-image-models",
"token_count": 4501
} |
# MobileNet v2
**MobileNetV2** is a convolutional neural network architecture that seeks to perform well on mobile devices. It is based on an [inverted residual structure](https://paperswithcode.com/method/inverted-residual-block) where the residual connections are between the bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. As a whole, the architecture of MobileNetV2 contains the initial fully convolution layer with 32 filters, followed by 19 residual bottleneck layers.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('mobilenetv2_100', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `mobilenetv2_100`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('mobilenetv2_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1801-04381,
author = {Mark Sandler and
Andrew G. Howard and
Menglong Zhu and
Andrey Zhmoginov and
Liang{-}Chieh Chen},
title = {Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification,
Detection and Segmentation},
journal = {CoRR},
volume = {abs/1801.04381},
year = {2018},
url = {http://arxiv.org/abs/1801.04381},
archivePrefix = {arXiv},
eprint = {1801.04381},
timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1801-04381.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: MobileNet V2
Paper:
Title: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks'
URL: https://paperswithcode.com/paper/mobilenetv2-inverted-residuals-and-linear
Models:
- Name: mobilenetv2_100
In Collection: MobileNet V2
Metadata:
FLOPs: 401920448
Parameters: 3500000
File Size: 14202571
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Depthwise Separable Convolution
- Dropout
- Inverted Residual Block
- Max Pooling
- ReLU6
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: mobilenetv2_100
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1536
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L955
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 72.95%
Top 5 Accuracy: 91.0%
- Name: mobilenetv2_110d
In Collection: MobileNet V2
Metadata:
FLOPs: 573958832
Parameters: 4520000
File Size: 18316431
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Depthwise Separable Convolution
- Dropout
- Inverted Residual Block
- Max Pooling
- ReLU6
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: mobilenetv2_110d
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1536
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L969
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.05%
Top 5 Accuracy: 92.19%
- Name: mobilenetv2_120d
In Collection: MobileNet V2
Metadata:
FLOPs: 888510048
Parameters: 5830000
File Size: 23651121
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Depthwise Separable Convolution
- Dropout
- Inverted Residual Block
- Max Pooling
- ReLU6
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: mobilenetv2_120d
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1536
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L977
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.28%
Top 5 Accuracy: 93.51%
- Name: mobilenetv2_140
In Collection: MobileNet V2
Metadata:
FLOPs: 770196784
Parameters: 6110000
File Size: 24673555
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Depthwise Separable Convolution
- Dropout
- Inverted Residual Block
- Max Pooling
- ReLU6
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: mobilenetv2_140
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1536
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L962
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 76.51%
Top 5 Accuracy: 93.0%
--> | pytorch-image-models/hfdocs/source/models/mobilenet-v2.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/mobilenet-v2.mdx",
"repo_id": "pytorch-image-models",
"token_count": 3403
} |
# Quickstart
This quickstart is intended for developers who are ready to dive into the code and see an example of how to integrate `timm` into their model training workflow.
First, you'll need to install `timm`. For more information on installation, see [Installation](installation).
```bash
pip install timm
```
## Load a Pretrained Model
Pretrained models can be loaded using [`create_model`].
Here, we load the pretrained `mobilenetv3_large_100` model.
```py
>>> import timm
>>> m = timm.create_model('mobilenetv3_large_100', pretrained=True)
>>> m.eval()
```
<Tip>
Note: The returned PyTorch model is set to train mode by default, so you must call .eval() on it if you plan to use it for inference.
</Tip>
## List Models with Pretrained Weights
To list models packaged with `timm`, you can use [`list_models`]. If you specify `pretrained=True`, this function will only return model names that have associated pretrained weights available.
```py
>>> import timm
>>> from pprint import pprint
>>> model_names = timm.list_models(pretrained=True)
>>> pprint(model_names)
[
'adv_inception_v3',
'cspdarknet53',
'cspresnext50',
'densenet121',
'densenet161',
'densenet169',
'densenet201',
'densenetblur121d',
'dla34',
'dla46_c',
]
```
You can also list models with a specific pattern in their name.
```py
>>> import timm
>>> from pprint import pprint
>>> model_names = timm.list_models('*resne*t*')
>>> pprint(model_names)
[
'cspresnet50',
'cspresnet50d',
'cspresnet50w',
'cspresnext50',
...
]
```
## Fine-Tune a Pretrained Model
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('mobilenetv3_large_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To fine-tune on your own dataset, you have to write a PyTorch training loop or adapt `timm`'s [training script](training_script) to use your dataset.
## Use a Pretrained Model for Feature Extraction
Without modifying the network, one can call model.forward_features(input) on any model instead of the usual model(input). This will bypass the head classifier and global pooling for networks.
For a more in depth guide to using `timm` for feature extraction, see [Feature Extraction](feature_extraction).
```py
>>> import timm
>>> import torch
>>> x = torch.randn(1, 3, 224, 224)
>>> model = timm.create_model('mobilenetv3_large_100', pretrained=True)
>>> features = model.forward_features(x)
>>> print(features.shape)
torch.Size([1, 960, 7, 7])
```
## Image Augmentation
To transform images into valid inputs for a model, you can use [`timm.data.create_transform`], providing the desired `input_size` that the model expects.
This will return a generic transform that uses reasonable defaults.
```py
>>> timm.data.create_transform((3, 224, 224))
Compose(
Resize(size=256, interpolation=bilinear, max_size=None, antialias=None)
CenterCrop(size=(224, 224))
ToTensor()
Normalize(mean=tensor([0.4850, 0.4560, 0.4060]), std=tensor([0.2290, 0.2240, 0.2250]))
)
```
Pretrained models have specific transforms that were applied to images fed into them while training. If you use the wrong transform on your image, the model won't understand what it's seeing!
To figure out which transformations were used for a given pretrained model, we can start by taking a look at its `pretrained_cfg`
```py
>>> model.pretrained_cfg
{'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth',
'num_classes': 1000,
'input_size': (3, 224, 224),
'pool_size': (7, 7),
'crop_pct': 0.875,
'interpolation': 'bicubic',
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'first_conv': 'conv_stem',
'classifier': 'classifier',
'architecture': 'mobilenetv3_large_100'}
```
We can then resolve only the data related configuration by using [`timm.data.resolve_data_config`].
```py
>>> timm.data.resolve_data_config(model.pretrained_cfg)
{'input_size': (3, 224, 224),
'interpolation': 'bicubic',
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'crop_pct': 0.875}
```
We can pass this data config to [`timm.data.create_transform`] to initialize the model's associated transform.
```py
>>> data_cfg = timm.data.resolve_data_config(model.pretrained_cfg)
>>> transform = timm.data.create_transform(**data_cfg)
>>> transform
Compose(
Resize(size=256, interpolation=bicubic, max_size=None, antialias=None)
CenterCrop(size=(224, 224))
ToTensor()
Normalize(mean=tensor([0.4850, 0.4560, 0.4060]), std=tensor([0.2290, 0.2240, 0.2250]))
)
```
<Tip>
Note: Here, the pretrained model's config happens to be the same as the generic config we made earlier. This is not always the case. So, it's safer to use the data config to create the transform as we did here instead of using the generic transform.
</Tip>
## Using Pretrained Models for Inference
Here, we will put together the above sections and use a pretrained model for inference.
First we'll need an image to do inference on. Here we load a picture of a leaf from the web:
```py
>>> import requests
>>> from PIL import Image
>>> from io import BytesIO
>>> url = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/timm/cat.jpg'
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image
```
Here's the image we loaded:
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/timm/cat.jpg" alt="An Image from a link" width="300"/>
Now, we'll create our model and transforms again. This time, we make sure to set our model in evaluation mode.
```py
>>> model = timm.create_model('mobilenetv3_large_100', pretrained=True).eval()
>>> transform = timm.data.create_transform(
**timm.data.resolve_data_config(model.pretrained_cfg)
)
```
We can prepare this image for the model by passing it to the transform.
```py
>>> image_tensor = transform(image)
>>> image_tensor.shape
torch.Size([3, 224, 224])
```
Now we can pass that image to the model to get the predictions. We use `unsqueeze(0)` in this case, as the model is expecting a batch dimension.
```py
>>> output = model(image_tensor.unsqueeze(0))
>>> output.shape
torch.Size([1, 1000])
```
To get the predicted probabilities, we apply softmax to the output. This leaves us with a tensor of shape `(num_classes,)`.
```py
>>> probabilities = torch.nn.functional.softmax(output[0], dim=0)
>>> probabilities.shape
torch.Size([1000])
```
Now we'll find the top 5 predicted class indexes and values using `torch.topk`.
```py
>>> values, indices = torch.topk(probabilities, 5)
>>> indices
tensor([281, 282, 285, 673, 670])
```
If we check the imagenet labels for the top index, we can see what the model predicted...
```py
>>> IMAGENET_1k_URL = 'https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt'
>>> IMAGENET_1k_LABELS = requests.get(IMAGENET_1k_URL).text.strip().split('\n')
>>> [{'label': IMAGENET_1k_LABELS[idx], 'value': val.item()} for val, idx in zip(values, indices)]
[{'label': 'tabby, tabby_cat', 'value': 0.5101025700569153},
{'label': 'tiger_cat', 'value': 0.22490699589252472},
{'label': 'Egyptian_cat', 'value': 0.1835290789604187},
{'label': 'mouse, computer_mouse', 'value': 0.006752475164830685},
{'label': 'motor_scooter, scooter', 'value': 0.004942195490002632}]
``` | pytorch-image-models/hfdocs/source/quickstart.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/quickstart.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2581
} |
import os
import pickle
def load_class_map(map_or_filename, root=''):
if isinstance(map_or_filename, dict):
assert dict, 'class_map dict must be non-empty'
return map_or_filename
class_map_path = map_or_filename
if not os.path.exists(class_map_path):
class_map_path = os.path.join(root, class_map_path)
assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % map_or_filename
class_map_ext = os.path.splitext(map_or_filename)[-1].lower()
if class_map_ext == '.txt':
with open(class_map_path) as f:
class_to_idx = {v.strip(): k for k, v in enumerate(f)}
elif class_map_ext == '.pkl':
with open(class_map_path, 'rb') as f:
class_to_idx = pickle.load(f)
else:
assert False, f'Unsupported class map file extension ({class_map_ext}).'
return class_to_idx
| pytorch-image-models/timm/data/readers/class_map.py/0 | {
"file_path": "pytorch-image-models/timm/data/readers/class_map.py",
"repo_id": "pytorch-image-models",
"token_count": 387
} |
from .activations import *
from .adaptive_avgmax_pool import \
adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d
from .attention2d import MultiQueryAttention2d, Attention2d, MultiQueryAttentionV2
from .attention_pool import AttentionPoolLatent
from .attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding
from .blur_pool import BlurPool2d, create_aa
from .classifier import create_classifier, ClassifierHead, NormMlpClassifierHead, ClNormMlpClassifierHead
from .cond_conv2d import CondConv2d, get_condconv_initializer
from .config import is_exportable, is_scriptable, is_no_jit, use_fused_attn, \
set_exportable, set_scriptable, set_no_jit, set_layer_config, set_fused_attn, \
set_reentrant_ckpt, use_reentrant_ckpt
from .conv2d_same import Conv2dSame, conv2d_same
from .conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct
from .create_act import create_act_layer, get_act_layer, get_act_fn
from .create_attn import get_attn, create_attn
from .create_conv2d import create_conv2d
from .create_norm import get_norm_layer, create_norm_layer
from .create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer
from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path
from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn
from .evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\
EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a
from .fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm
from .filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d
from .format import Format, get_channel_dim, get_spatial_dim, nchw_to, nhwc_to
from .gather_excite import GatherExcite
from .global_context import GlobalContext
from .grid import ndgrid, meshgrid
from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple
from .hybrid_embed import HybridEmbed, HybridEmbedWithSize
from .inplace_abn import InplaceAbn
from .layer_scale import LayerScale, LayerScale2d
from .linear import Linear
from .mixed_conv2d import MixedConv2d
from .mlp import Mlp, GluMlp, GatedMlp, SwiGLU, SwiGLUPacked, ConvMlp, GlobalResponseNormMlp
from .non_local_attn import NonLocalAttn, BatNonLocalAttn
from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm, RmsNorm2d, SimpleNorm, SimpleNorm2d
from .norm_act import BatchNormAct2d, GroupNormAct, GroupNorm1Act, LayerNormAct, LayerNormAct2d,\
SyncBatchNormAct, convert_sync_batchnorm, FrozenBatchNormAct2d, freeze_batch_norm_2d, unfreeze_batch_norm_2d
from .padding import get_padding, get_same_padding, pad_same
from .patch_dropout import PatchDropout
from .patch_embed import PatchEmbed, PatchEmbedWithSize, resample_patch_embed
from .pool2d_same import AvgPool2dSame, create_pool2d
from .pos_embed import resample_abs_pos_embed, resample_abs_pos_embed_nhwc
from .pos_embed_rel import RelPosMlp, RelPosBias, RelPosBiasTf, gen_relative_position_index, gen_relative_log_coords, \
resize_rel_pos_bias_table, resize_rel_pos_bias_table_simple, resize_rel_pos_bias_table_levit
from .pos_embed_sincos import pixel_freq_bands, freq_bands, build_sincos2d_pos_embed, build_fourier_pos_embed, \
build_rotary_pos_embed, apply_rot_embed, apply_rot_embed_cat, apply_rot_embed_list, apply_keep_indices_nlc, \
FourierEmbed, RotaryEmbedding, RotaryEmbeddingCat
from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite
from .selective_kernel import SelectiveKernel
from .separable_conv import SeparableConv2d, SeparableConvNormAct
from .space_to_depth import SpaceToDepth, DepthToSpace
from .split_attn import SplitAttn
from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model
from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame
from .test_time_pool import TestTimePoolHead, apply_test_time_pool
from .trace_utils import _assert, _float_to_int
from .typing import LayerType, PadType
from .weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_, \
init_weight_jax, init_weight_vit
| pytorch-image-models/timm/layers/__init__.py/0 | {
"file_path": "pytorch-image-models/timm/layers/__init__.py",
"repo_id": "pytorch-image-models",
"token_count": 1500
} |
""" Attention Factory
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
from functools import partial
from .bottleneck_attn import BottleneckAttn
from .cbam import CbamModule, LightCbamModule
from .eca import EcaModule, CecaModule
from .gather_excite import GatherExcite
from .global_context import GlobalContext
from .halo_attn import HaloAttn
from .lambda_layer import LambdaLayer
from .non_local_attn import NonLocalAttn, BatNonLocalAttn
from .selective_kernel import SelectiveKernel
from .split_attn import SplitAttn
from .squeeze_excite import SEModule, EffectiveSEModule
def get_attn(attn_type):
if isinstance(attn_type, torch.nn.Module):
return attn_type
module_cls = None
if attn_type:
if isinstance(attn_type, str):
attn_type = attn_type.lower()
# Lightweight attention modules (channel and/or coarse spatial).
# Typically added to existing network architecture blocks in addition to existing convolutions.
if attn_type == 'se':
module_cls = SEModule
elif attn_type == 'ese':
module_cls = EffectiveSEModule
elif attn_type == 'eca':
module_cls = EcaModule
elif attn_type == 'ecam':
module_cls = partial(EcaModule, use_mlp=True)
elif attn_type == 'ceca':
module_cls = CecaModule
elif attn_type == 'ge':
module_cls = GatherExcite
elif attn_type == 'gc':
module_cls = GlobalContext
elif attn_type == 'gca':
module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False)
elif attn_type == 'cbam':
module_cls = CbamModule
elif attn_type == 'lcbam':
module_cls = LightCbamModule
# Attention / attention-like modules w/ significant params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'sk':
module_cls = SelectiveKernel
elif attn_type == 'splat':
module_cls = SplitAttn
# Self-attention / attention-like modules w/ significant compute and/or params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'lambda':
return LambdaLayer
elif attn_type == 'bottleneck':
return BottleneckAttn
elif attn_type == 'halo':
return HaloAttn
elif attn_type == 'nl':
module_cls = NonLocalAttn
elif attn_type == 'bat':
module_cls = BatNonLocalAttn
# Woops!
else:
assert False, "Invalid attn module (%s)" % attn_type
elif isinstance(attn_type, bool):
if attn_type:
module_cls = SEModule
else:
module_cls = attn_type
return module_cls
def create_attn(attn_type, channels, **kwargs):
module_cls = get_attn(attn_type)
if module_cls is not None:
# NOTE: it's expected the first (positional) argument of all attention layers is the # input channels
return module_cls(channels, **kwargs)
return None
| pytorch-image-models/timm/layers/create_attn.py/0 | {
"file_path": "pytorch-image-models/timm/layers/create_attn.py",
"repo_id": "pytorch-image-models",
"token_count": 1588
} |
""" Image to Patch Hybird Embedding Layer
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
import math
from typing import List, Optional, Tuple, Union
import torch
from torch import nn as nn
import torch.nn.functional as F
from .format import Format, nchw_to
from .helpers import to_2tuple
from .patch_embed import resample_patch_embed
_logger = logging.getLogger(__name__)
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
output_fmt: Format
dynamic_img_pad: torch.jit.Final[bool]
def __init__(
self,
backbone: nn.Module,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: Union[int, Tuple[int, int]] = 1,
feature_size: Optional[Union[int, Tuple[int, int]]] = None,
feature_ratio: Optional[Union[int, Tuple[int, int]]] = None,
in_chans: int = 3,
embed_dim: int = 768,
bias: bool = True,
proj: bool = True,
flatten: bool = True,
output_fmt: Optional[str] = None,
strict_img_size: bool = True,
dynamic_img_pad: bool = False,
):
super().__init__()
assert isinstance(backbone, nn.Module)
self.backbone = backbone
self.in_chans = in_chans
(
self.img_size,
self.patch_size,
self.feature_size,
self.feature_ratio,
self.feature_dim,
self.grid_size,
self.num_patches,
) = self._init_backbone(
img_size=img_size,
patch_size=patch_size,
feature_size=feature_size,
feature_ratio=feature_ratio,
)
if output_fmt is not None:
self.flatten = False
self.output_fmt = Format(output_fmt)
else:
# flatten spatial dim and transpose to channels last, kept for bwd compat
self.flatten = flatten
self.output_fmt = Format.NCHW
self.strict_img_size = strict_img_size
self.dynamic_img_pad = dynamic_img_pad
if not dynamic_img_pad:
assert self.feature_size[0] % self.patch_size[0] == 0 and self.feature_size[1] % self.patch_size[1] == 0
if proj:
self.proj = nn.Conv2d(
self.feature_dim,
embed_dim,
kernel_size=patch_size,
stride=patch_size,
bias=bias,
)
else:
assert self.feature_dim == embed_dim, \
f'The feature dim ({self.feature_dim} must match embed dim ({embed_dim}) when projection disabled.'
self.proj = nn.Identity()
def _init_backbone(
self,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: Union[int, Tuple[int, int]] = 1,
feature_size: Optional[Union[int, Tuple[int, int]]] = None,
feature_ratio: Optional[Union[int, Tuple[int, int]]] = None,
feature_dim: Optional[int] = None,
):
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
if feature_size is None:
with torch.no_grad():
# NOTE Most reliable way of determining output dims is to run forward pass
training = self.backbone.training
if training:
self.backbone.eval()
o = self.backbone(torch.zeros(1, self.in_chans, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
o = o[-1] # last feature if backbone outputs list/tuple of features
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
self.backbone.train(training)
feature_ratio = tuple([s // f for s, f in zip(img_size, feature_size)])
else:
feature_size = to_2tuple(feature_size)
feature_ratio = to_2tuple(feature_ratio or 16)
if feature_dim is None:
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info.channels()[-1]
else:
feature_dim = self.backbone.num_features
grid_size = tuple([f // p for f, p in zip(feature_size, patch_size)])
num_patches = grid_size[0] * grid_size[1]
return img_size, patch_size, feature_size, feature_ratio, feature_dim, grid_size, num_patches
def set_input_size(
self,
img_size: Optional[Union[int, Tuple[int, int]]] = None,
patch_size: Optional[Union[int, Tuple[int, int]]] = None,
feature_size: Optional[Union[int, Tuple[int, int]]] = None,
feature_ratio: Optional[Union[int, Tuple[int, int]]] = None,
feature_dim: Optional[int] = None,
):
assert img_size is not None or patch_size is not None
img_size = img_size or self.img_size
new_patch_size = None
if patch_size is not None:
new_patch_size = to_2tuple(patch_size)
if new_patch_size is not None and new_patch_size != self.patch_size:
assert isinstance(self.proj, nn.Conv2d), 'HybridEmbed must have a projection layer to change patch size.'
with torch.no_grad():
new_proj = nn.Conv2d(
self.proj.in_channels,
self.proj.out_channels,
kernel_size=new_patch_size,
stride=new_patch_size,
bias=self.proj.bias is not None,
)
new_proj.weight.copy_(resample_patch_embed(self.proj.weight, new_patch_size, verbose=True))
if self.proj.bias is not None:
new_proj.bias.copy_(self.proj.bias)
self.proj = new_proj
patch_size = new_patch_size
patch_size = patch_size or self.patch_size
if img_size != self.img_size or patch_size != self.patch_size:
(
self.img_size,
self.patch_size,
self.feature_size,
self.feature_ratio,
self.feature_dim,
self.grid_size,
self.num_patches,
) = self._init_backbone(
img_size=img_size,
patch_size=patch_size,
feature_size=feature_size,
feature_ratio=feature_ratio,
feature_dim=feature_dim,
)
def feat_ratio(self, as_scalar=True) -> Union[Tuple[int, int], int]:
total_reduction = (
self.feature_ratio[0] * self.patch_size[0],
self.feature_ratio[1] * self.patch_size[1]
)
if as_scalar:
return max(total_reduction)
else:
return total_reduction
def dynamic_feat_size(self, img_size: Tuple[int, int]) -> Tuple[int, int]:
""" Get feature grid size taking account dynamic padding and backbone network feat reduction
"""
feat_size = (img_size[0] // self.feature_ratio[0], img_size[1] // self.feature_ratio[1])
if self.dynamic_img_pad:
return math.ceil(feat_size[0] / self.patch_size[0]), math.ceil(feat_size[1] / self.patch_size[1])
else:
return feat_size[0] // self.patch_size[0], feat_size[1] // self.patch_size[1]
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
if hasattr(self.backbone, 'set_grad_checkpointing'):
self.backbone.set_grad_checkpointing(enable=enable)
elif hasattr(self.backbone, 'grad_checkpointing'):
self.backbone.grad_checkpointing = enable
def forward(self, x):
x = self.backbone(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
_, _, H, W = x.shape
if self.dynamic_img_pad:
pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0]
pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1]
x = F.pad(x, (0, pad_w, 0, pad_h))
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
elif self.output_fmt != Format.NCHW:
x = nchw_to(x, self.output_fmt)
return x
class HybridEmbedWithSize(HybridEmbed):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(
self,
backbone: nn.Module,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: Union[int, Tuple[int, int]] = 1,
feature_size: Optional[Union[int, Tuple[int, int]]] = None,
feature_ratio: Optional[Union[int, Tuple[int, int]]] = None,
in_chans: int = 3,
embed_dim: int = 768,
bias=True,
proj=True,
):
super().__init__(
backbone=backbone,
img_size=img_size,
patch_size=patch_size,
feature_size=feature_size,
feature_ratio=feature_ratio,
in_chans=in_chans,
embed_dim=embed_dim,
bias=bias,
proj=proj,
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
if hasattr(self.backbone, 'set_grad_checkpointing'):
self.backbone.set_grad_checkpointing(enable=enable)
elif hasattr(self.backbone, 'grad_checkpointing'):
self.backbone.grad_checkpointing = enable
def forward(self, x) -> Tuple[torch.Tensor, List[int]]:
x = self.backbone(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
x = self.proj(x)
return x.flatten(2).transpose(1, 2), x.shape[-2:] | pytorch-image-models/timm/layers/hybrid_embed.py/0 | {
"file_path": "pytorch-image-models/timm/layers/hybrid_embed.py",
"repo_id": "pytorch-image-models",
"token_count": 5059
} |
""" AvgPool2d w/ Same Padding
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Tuple, Optional
from .helpers import to_2tuple
from .padding import pad_same, get_padding_value
def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0),
ceil_mode: bool = False, count_include_pad: bool = True):
# FIXME how to deal with count_include_pad vs not for external padding?
x = pad_same(x, kernel_size, stride)
return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
class AvgPool2dSame(nn.AvgPool2d):
""" Tensorflow like 'SAME' wrapper for 2D average pooling
"""
def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
def forward(self, x):
x = pad_same(x, self.kernel_size, self.stride)
return F.avg_pool2d(
x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad)
def max_pool2d_same(
x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0),
dilation: List[int] = (1, 1), ceil_mode: bool = False):
x = pad_same(x, kernel_size, stride, value=-float('inf'))
return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode)
class MaxPool2dSame(nn.MaxPool2d):
""" Tensorflow like 'SAME' wrapper for 2D max pooling
"""
def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False):
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode)
def forward(self, x):
x = pad_same(x, self.kernel_size, self.stride, value=-float('inf'))
return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode)
def create_pool2d(pool_type, kernel_size, stride=None, **kwargs):
stride = stride or kernel_size
padding = kwargs.pop('padding', '')
padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs)
if is_dynamic:
if pool_type == 'avg':
return AvgPool2dSame(kernel_size, stride=stride, **kwargs)
elif pool_type == 'max':
return MaxPool2dSame(kernel_size, stride=stride, **kwargs)
else:
assert False, f'Unsupported pool type {pool_type}'
else:
if pool_type == 'avg':
return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs)
elif pool_type == 'max':
return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs)
else:
assert False, f'Unsupported pool type {pool_type}'
| pytorch-image-models/timm/layers/pool2d_same.py/0 | {
"file_path": "pytorch-image-models/timm/layers/pool2d_same.py",
"repo_id": "pytorch-image-models",
"token_count": 1294
} |
import torch
import torch.nn as nn
class AsymmetricLossMultiLabel(nn.Module):
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False):
super(AsymmetricLossMultiLabel, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
"""
# Calculating Probabilities
x_sigmoid = torch.sigmoid(x)
xs_pos = x_sigmoid
xs_neg = 1 - x_sigmoid
# Asymmetric Clipping
if self.clip is not None and self.clip > 0:
xs_neg = (xs_neg + self.clip).clamp(max=1)
# Basic CE calculation
los_pos = y * torch.log(xs_pos.clamp(min=self.eps))
los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps))
loss = los_pos + los_neg
# Asymmetric Focusing
if self.gamma_neg > 0 or self.gamma_pos > 0:
if self.disable_torch_grad_focal_loss:
torch.set_grad_enabled(False)
pt0 = xs_pos * y
pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p
pt = pt0 + pt1
one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y)
one_sided_w = torch.pow(1 - pt, one_sided_gamma)
if self.disable_torch_grad_focal_loss:
torch.set_grad_enabled(True)
loss *= one_sided_w
return -loss.sum()
class AsymmetricLossSingleLabel(nn.Module):
def __init__(self, gamma_pos=1, gamma_neg=4, eps: float = 0.1, reduction='mean'):
super(AsymmetricLossSingleLabel, self).__init__()
self.eps = eps
self.logsoftmax = nn.LogSoftmax(dim=-1)
self.targets_classes = [] # prevent gpu repeated memory allocation
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.reduction = reduction
def forward(self, inputs, target, reduction=None):
""""
Parameters
----------
x: input logits
y: targets (1-hot vector)
"""
num_classes = inputs.size()[-1]
log_preds = self.logsoftmax(inputs)
self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1)
# ASL weights
targets = self.targets_classes
anti_targets = 1 - targets
xs_pos = torch.exp(log_preds)
xs_neg = 1 - xs_pos
xs_pos = xs_pos * targets
xs_neg = xs_neg * anti_targets
asymmetric_w = torch.pow(1 - xs_pos - xs_neg,
self.gamma_pos * targets + self.gamma_neg * anti_targets)
log_preds = log_preds * asymmetric_w
if self.eps > 0: # label smoothing
self.targets_classes = self.targets_classes.mul(1 - self.eps).add(self.eps / num_classes)
# loss calculation
loss = - self.targets_classes.mul(log_preds)
loss = loss.sum(dim=-1)
if self.reduction == 'mean':
loss = loss.mean()
return loss
| pytorch-image-models/timm/loss/asymmetric_loss.py/0 | {
"file_path": "pytorch-image-models/timm/loss/asymmetric_loss.py",
"repo_id": "pytorch-image-models",
"token_count": 1616
} |
""" DaViT: Dual Attention Vision Transformers
As described in https://arxiv.org/abs/2204.03645
Input size invariant transformer architecture that combines channel and spacial
attention in each block. The attention mechanisms used are linear in complexity.
DaViT model defs and weights adapted from https://github.com/dingmyu/davit, original copyright below
"""
# Copyright (c) 2022 Mingyu Ding
# All rights reserved.
# This source code is licensed under the MIT license
from functools import partial
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, to_2tuple, trunc_normal_, Mlp, LayerNorm2d, get_norm_layer, use_fused_attn
from timm.layers import NormMlpClassifierHead, ClassifierHead
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['DaVit']
class ConvPosEnc(nn.Module):
def __init__(self, dim: int, k: int = 3, act: bool = False):
super(ConvPosEnc, self).__init__()
self.proj = nn.Conv2d(
dim,
dim,
kernel_size=k,
stride=1,
padding=k // 2,
groups=dim,
)
self.act = nn.GELU() if act else nn.Identity()
def forward(self, x: Tensor):
feat = self.proj(x)
x = x + self.act(feat)
return x
class Stem(nn.Module):
""" Size-agnostic implementation of 2D image to patch embedding,
allowing input size to be adjusted during model forward operation
"""
def __init__(
self,
in_chs=3,
out_chs=96,
stride=4,
norm_layer=LayerNorm2d,
):
super().__init__()
stride = to_2tuple(stride)
self.stride = stride
self.in_chs = in_chs
self.out_chs = out_chs
assert stride[0] == 4 # only setup for stride==4
self.conv = nn.Conv2d(
in_chs,
out_chs,
kernel_size=7,
stride=stride,
padding=3,
)
self.norm = norm_layer(out_chs)
def forward(self, x: Tensor):
B, C, H, W = x.shape
pad_r = (self.stride[1] - W % self.stride[1]) % self.stride[1]
pad_b = (self.stride[0] - H % self.stride[0]) % self.stride[0]
x = F.pad(x, (0, pad_r, 0, pad_b))
x = self.conv(x)
x = self.norm(x)
return x
class Downsample(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=3,
norm_layer=LayerNorm2d,
):
super().__init__()
self.in_chs = in_chs
self.out_chs = out_chs
self.norm = norm_layer(in_chs)
self.even_k = kernel_size % 2 == 0
self.conv = nn.Conv2d(
in_chs,
out_chs,
kernel_size=kernel_size,
stride=2,
padding=0 if self.even_k else kernel_size // 2,
)
def forward(self, x: Tensor):
B, C, H, W = x.shape
x = self.norm(x)
if self.even_k:
k_h, k_w = self.conv.kernel_size
pad_r = (k_w - W % k_w) % k_w
pad_b = (k_h - H % k_h) % k_h
x = F.pad(x, (0, pad_r , 0, pad_b))
x = self.conv(x)
return x
class ChannelAttentionV2(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=True, dynamic_scale=True):
super().__init__()
self.groups = num_heads
self.head_dim = dim // num_heads
self.dynamic_scale = dynamic_scale
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.groups, C // self.groups).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
if self.dynamic_scale:
q = q * N ** -0.5
else:
q = q * self.head_dim ** -0.5
attn = q.transpose(-1, -2) @ k
attn = attn.softmax(dim=-1)
x = (attn @ v.transpose(-1, -2)).transpose(-1, -2)
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
return x
class ChannelAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
def forward(self, x: Tensor):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
k = k * self.scale
attn = k.transpose(-1, -2) @ v
attn = attn.softmax(dim=-1)
x = (attn @ q.transpose(-1, -2)).transpose(-1, -2)
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
return x
class ChannelBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
ffn=True,
cpe_act=False,
v2=False,
):
super().__init__()
self.cpe1 = ConvPosEnc(dim=dim, k=3, act=cpe_act)
self.ffn = ffn
self.norm1 = norm_layer(dim)
attn_layer = ChannelAttentionV2 if v2 else ChannelAttention
self.attn = attn_layer(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.cpe2 = ConvPosEnc(dim=dim, k=3, act=cpe_act)
if self.ffn:
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
else:
self.norm2 = None
self.mlp = None
self.drop_path2 = None
def forward(self, x: Tensor):
B, C, H, W = x.shape
x = self.cpe1(x).flatten(2).transpose(1, 2)
cur = self.norm1(x)
cur = self.attn(cur)
x = x + self.drop_path1(cur)
x = self.cpe2(x.transpose(1, 2).view(B, C, H, W))
if self.mlp is not None:
x = x.flatten(2).transpose(1, 2)
x = x + self.drop_path2(self.mlp(self.norm2(x)))
x = x.transpose(1, 2).view(B, C, H, W)
return x
def window_partition(x: Tensor, window_size: Tuple[int, int]):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows: Tensor, window_size: Tuple[int, int], H: int, W: int):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
"""
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, window_size, num_heads, qkv_bias=True):
super().__init__()
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x: Tensor):
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
if self.fused_attn:
x = F.scaled_dot_product_attention(q, k, v)
else:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = self.softmax(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
return x
class SpatialBlock(nn.Module):
r""" Windows Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
num_heads,
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
ffn=True,
cpe_act=False,
):
super().__init__()
self.dim = dim
self.ffn = ffn
self.num_heads = num_heads
self.window_size = to_2tuple(window_size)
self.mlp_ratio = mlp_ratio
self.cpe1 = ConvPosEnc(dim=dim, k=3, act=cpe_act)
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
self.window_size,
num_heads=num_heads,
qkv_bias=qkv_bias,
)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.cpe2 = ConvPosEnc(dim=dim, k=3, act=cpe_act)
if self.ffn:
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
else:
self.norm2 = None
self.mlp = None
self.drop_path1 = None
def forward(self, x: Tensor):
B, C, H, W = x.shape
shortcut = self.cpe1(x).flatten(2).transpose(1, 2)
x = self.norm1(shortcut)
x = x.view(B, H, W, C)
pad_l = pad_t = 0
pad_r = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1]
pad_b = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0]
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
x_windows = window_partition(x, self.window_size)
x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C)
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows)
# merge windows
attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C)
x = window_reverse(attn_windows, self.window_size, Hp, Wp)
# if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
x = shortcut + self.drop_path1(x)
x = self.cpe2(x.transpose(1, 2).view(B, C, H, W))
if self.mlp is not None:
x = x.flatten(2).transpose(1, 2)
x = x + self.drop_path2(self.mlp(self.norm2(x)))
x = x.transpose(1, 2).view(B, C, H, W)
return x
class DaVitStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
depth=1,
downsample=True,
attn_types=('spatial', 'channel'),
num_heads=3,
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
drop_path_rates=(0, 0),
norm_layer=LayerNorm2d,
norm_layer_cl=nn.LayerNorm,
ffn=True,
cpe_act=False,
down_kernel_size=2,
named_blocks=False,
channel_attn_v2=False,
):
super().__init__()
self.grad_checkpointing = False
# downsample embedding layer at the beginning of each stage
if downsample:
self.downsample = Downsample(in_chs, out_chs, kernel_size=down_kernel_size, norm_layer=norm_layer)
else:
self.downsample = nn.Identity()
'''
repeating alternating attention blocks in each stage
default: (spatial -> channel) x depth
potential opportunity to integrate with a more general version of ByobNet/ByoaNet
since the logic is similar
'''
stage_blocks = []
for block_idx in range(depth):
from collections import OrderedDict
dual_attention_block = []
for attn_idx, attn_type in enumerate(attn_types):
if attn_type == 'spatial':
dual_attention_block.append(('spatial_block', SpatialBlock(
dim=out_chs,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_path=drop_path_rates[block_idx],
norm_layer=norm_layer_cl,
ffn=ffn,
cpe_act=cpe_act,
window_size=window_size,
)))
elif attn_type == 'channel':
dual_attention_block.append(('channel_block', ChannelBlock(
dim=out_chs,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_path=drop_path_rates[block_idx],
norm_layer=norm_layer_cl,
ffn=ffn,
cpe_act=cpe_act,
v2=channel_attn_v2,
)))
if named_blocks:
stage_blocks.append(nn.Sequential(OrderedDict(dual_attention_block)))
else:
stage_blocks.append(nn.Sequential(*[b[1] for b in dual_attention_block]))
self.blocks = nn.Sequential(*stage_blocks)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
def forward(self, x: Tensor):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class DaVit(nn.Module):
r""" DaViT
A PyTorch implementation of `DaViT: Dual Attention Vision Transformers` - https://arxiv.org/abs/2204.03645
Supports arbitrary input sizes and pyramid feature extraction
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks in each stage. Default: (1, 1, 3, 1)
embed_dims (tuple(int)): Patch embedding dimension. Default: (96, 192, 384, 768)
num_heads (tuple(int)): Number of attention heads in different layers. Default: (3, 6, 12, 24)
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
"""
def __init__(
self,
in_chans=3,
depths=(1, 1, 3, 1),
embed_dims=(96, 192, 384, 768),
num_heads=(3, 6, 12, 24),
window_size=7,
mlp_ratio=4,
qkv_bias=True,
norm_layer='layernorm2d',
norm_layer_cl='layernorm',
norm_eps=1e-5,
attn_types=('spatial', 'channel'),
ffn=True,
cpe_act=False,
down_kernel_size=2,
channel_attn_v2=False,
named_blocks=False,
drop_rate=0.,
drop_path_rate=0.,
num_classes=1000,
global_pool='avg',
head_norm_first=False,
):
super().__init__()
num_stages = len(embed_dims)
assert num_stages == len(num_heads) == len(depths)
norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps)
norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps)
self.num_classes = num_classes
self.num_features = self.head_hidden_size = embed_dims[-1]
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.feature_info = []
self.stem = Stem(in_chans, embed_dims[0], norm_layer=norm_layer)
in_chs = embed_dims[0]
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
stages = []
for i in range(num_stages):
out_chs = embed_dims[i]
stage = DaVitStage(
in_chs,
out_chs,
depth=depths[i],
downsample=i > 0,
attn_types=attn_types,
num_heads=num_heads[i],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_path_rates=dpr[i],
norm_layer=norm_layer,
norm_layer_cl=norm_layer_cl,
ffn=ffn,
cpe_act=cpe_act,
down_kernel_size=down_kernel_size,
channel_attn_v2=channel_attn_v2,
named_blocks=named_blocks,
)
in_chs = out_chs
stages.append(stage)
self.feature_info += [dict(num_chs=out_chs, reduction=2**(i+2), module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
# if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets
# otherwise pool -> norm -> fc, the default DaViT order, similar to ConvNeXt
# FIXME generalize this structure to ClassifierHead
if head_norm_first:
self.norm_pre = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
else:
self.norm_pre = nn.Identity()
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
norm_layer=norm_layer,
)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem', # stem and embed
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^norm_pre', (99999,)),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
for stage in self.stages:
stage.set_grad_checkpointing(enable=enable)
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
x = self.norm_pre(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _convert_florence2(state_dict, model, prefix='vision_tower.'):
import re
out_dict = {}
for k, v in state_dict.items():
if k.startswith(prefix):
k = k.replace(prefix, '')
else:
continue
k = re.sub(r'convs.([0-9]+)', r'stages.\1.downsample', k)
k = re.sub(r'blocks.([0-9]+)', r'stages.\1.blocks', k)
k = k.replace('downsample.proj', 'downsample.conv')
k = k.replace('stages.0.downsample', 'stem')
#k = k.replace('head.', 'head.fc.')
#k = k.replace('norms.', 'head.norm.')
k = k.replace('window_attn.norm.', 'norm1.')
k = k.replace('window_attn.fn.', 'attn.')
k = k.replace('channel_attn.norm.', 'norm1.')
k = k.replace('channel_attn.fn.', 'attn.')
k = k.replace('ffn.norm.', 'norm2.')
k = k.replace('ffn.fn.net.', 'mlp.')
k = k.replace('conv1.fn.dw', 'cpe1.proj')
k = k.replace('conv2.fn.dw', 'cpe2.proj')
out_dict[k] = v
return out_dict
def checkpoint_filter_fn(state_dict, model):
""" Remap MSFT checkpoints -> timm """
if 'head.fc.weight' in state_dict:
return state_dict # non-MSFT checkpoint
if 'state_dict' in state_dict:
state_dict = state_dict['state_dict']
if 'vision_tower.convs.0.proj.weight' in state_dict:
return _convert_florence2(state_dict, model)
import re
out_dict = {}
for k, v in state_dict.items():
k = re.sub(r'patch_embeds.([0-9]+)', r'stages.\1.downsample', k)
k = re.sub(r'main_blocks.([0-9]+)', r'stages.\1.blocks', k)
k = k.replace('downsample.proj', 'downsample.conv')
k = k.replace('stages.0.downsample', 'stem')
k = k.replace('head.', 'head.fc.')
k = k.replace('norms.', 'head.norm.')
k = k.replace('cpe.0', 'cpe1')
k = k.replace('cpe.1', 'cpe2')
out_dict[k] = v
return out_dict
def _create_davit(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1))))
out_indices = kwargs.pop('out_indices', default_out_indices)
strict = kwargs.pop('pretrained_strict', True)
if variant.endswith('_fl'):
# FIXME cleaner approach to missing head norm?
strict = False
model = build_model_with_cfg(
DaVit,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
pretrained_strict=strict,
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.95, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
# TODO contact authors to get larger pretrained models
default_cfgs = generate_default_cfgs({
# official microsoft weights from https://github.com/dingmyu/davit
'davit_tiny.msft_in1k': _cfg(
hf_hub_id='timm/'),
'davit_small.msft_in1k': _cfg(
hf_hub_id='timm/'),
'davit_base.msft_in1k': _cfg(
hf_hub_id='timm/'),
'davit_large': _cfg(),
'davit_huge': _cfg(),
'davit_giant': _cfg(),
'davit_base_fl.msft_florence2': _cfg(
hf_hub_id='microsoft/Florence-2-base',
num_classes=0, input_size=(3, 768, 768)),
'davit_huge_fl.msft_florence2': _cfg(
hf_hub_id='microsoft/Florence-2-large',
num_classes=0, input_size=(3, 768, 768)),
})
@register_model
def davit_tiny(pretrained=False, **kwargs) -> DaVit:
model_args = dict(depths=(1, 1, 3, 1), embed_dims=(96, 192, 384, 768), num_heads=(3, 6, 12, 24))
return _create_davit('davit_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def davit_small(pretrained=False, **kwargs) -> DaVit:
model_args = dict(depths=(1, 1, 9, 1), embed_dims=(96, 192, 384, 768), num_heads=(3, 6, 12, 24))
return _create_davit('davit_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def davit_base(pretrained=False, **kwargs) -> DaVit:
model_args = dict(depths=(1, 1, 9, 1), embed_dims=(128, 256, 512, 1024), num_heads=(4, 8, 16, 32))
return _create_davit('davit_base', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def davit_large(pretrained=False, **kwargs) -> DaVit:
model_args = dict(depths=(1, 1, 9, 1), embed_dims=(192, 384, 768, 1536), num_heads=(6, 12, 24, 48))
return _create_davit('davit_large', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def davit_huge(pretrained=False, **kwargs) -> DaVit:
model_args = dict(depths=(1, 1, 9, 1), embed_dims=(256, 512, 1024, 2048), num_heads=(8, 16, 32, 64))
return _create_davit('davit_huge', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def davit_giant(pretrained=False, **kwargs) -> DaVit:
model_args = dict(depths=(1, 1, 12, 3), embed_dims=(384, 768, 1536, 3072), num_heads=(12, 24, 48, 96))
return _create_davit('davit_giant', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def davit_base_fl(pretrained=False, **kwargs) -> DaVit:
model_args = dict(
depths=(1, 1, 9, 1), embed_dims=(128, 256, 512, 1024), num_heads=(4, 8, 16, 32),
window_size=12, down_kernel_size=3, channel_attn_v2=True, named_blocks=True,
)
return _create_davit('davit_base_fl', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def davit_huge_fl(pretrained=False, **kwargs) -> DaVit:
# NOTE: huge image tower used in 'large' Florence2 model
model_args = dict(
depths=(1, 1, 9, 1), embed_dims=(256, 512, 1024, 2048), num_heads=(8, 16, 32, 64),
window_size=12, down_kernel_size=3, channel_attn_v2=True, named_blocks=True,
)
return _create_davit('davit_huge_fl', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/davit.py/0 | {
"file_path": "pytorch-image-models/timm/models/davit.py",
"repo_id": "pytorch-image-models",
"token_count": 14224
} |
"""
MambaOut models for image classification.
Some implementations are modified from:
timm (https://github.com/rwightman/pytorch-image-models),
MetaFormer (https://github.com/sail-sg/metaformer),
InceptionNeXt (https://github.com/sail-sg/inceptionnext)
"""
from collections import OrderedDict
from typing import Optional
import torch
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_, DropPath, LayerNorm, LayerScale, ClNormMlpClassifierHead, get_act_layer
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
class Stem(nn.Module):
r""" Code modified from InternImage:
https://github.com/OpenGVLab/InternImage
"""
def __init__(
self,
in_chs=3,
out_chs=96,
mid_norm: bool = True,
act_layer=nn.GELU,
norm_layer=LayerNorm,
):
super().__init__()
self.conv1 = nn.Conv2d(
in_chs,
out_chs // 2,
kernel_size=3,
stride=2,
padding=1
)
self.norm1 = norm_layer(out_chs // 2) if mid_norm else None
self.act = act_layer()
self.conv2 = nn.Conv2d(
out_chs // 2,
out_chs,
kernel_size=3,
stride=2,
padding=1
)
self.norm2 = norm_layer(out_chs)
def forward(self, x):
x = self.conv1(x)
if self.norm1 is not None:
x = x.permute(0, 2, 3, 1)
x = self.norm1(x)
x = x.permute(0, 3, 1, 2)
x = self.act(x)
x = self.conv2(x)
x = x.permute(0, 2, 3, 1)
x = self.norm2(x)
return x
class DownsampleNormFirst(nn.Module):
def __init__(
self,
in_chs=96,
out_chs=198,
norm_layer=LayerNorm,
):
super().__init__()
self.norm = norm_layer(in_chs)
self.conv = nn.Conv2d(
in_chs,
out_chs,
kernel_size=3,
stride=2,
padding=1
)
def forward(self, x):
x = self.norm(x)
x = x.permute(0, 3, 1, 2)
x = self.conv(x)
x = x.permute(0, 2, 3, 1)
return x
class Downsample(nn.Module):
def __init__(
self,
in_chs=96,
out_chs=198,
norm_layer=LayerNorm,
):
super().__init__()
self.conv = nn.Conv2d(
in_chs,
out_chs,
kernel_size=3,
stride=2,
padding=1
)
self.norm = norm_layer(out_chs)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
x = self.conv(x)
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
return x
class MlpHead(nn.Module):
""" MLP classification head
"""
def __init__(
self,
in_features,
num_classes=1000,
pool_type='avg',
act_layer=nn.GELU,
mlp_ratio=4,
norm_layer=LayerNorm,
drop_rate=0.,
bias=True,
):
super().__init__()
if mlp_ratio is not None:
hidden_size = int(mlp_ratio * in_features)
else:
hidden_size = None
self.pool_type = pool_type
self.in_features = in_features
self.hidden_size = hidden_size or in_features
self.norm = norm_layer(in_features)
if hidden_size:
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(in_features, hidden_size)),
('act', act_layer()),
('norm', norm_layer(hidden_size))
]))
self.num_features = hidden_size
else:
self.num_features = in_features
self.pre_logits = nn.Identity()
self.fc = nn.Linear(self.num_features, num_classes, bias=bias) if num_classes > 0 else nn.Identity()
self.head_dropout = nn.Dropout(drop_rate)
def reset(self, num_classes: int, pool_type: Optional[str] = None, reset_other: bool = False):
if pool_type is not None:
self.pool_type = pool_type
if reset_other:
self.norm = nn.Identity()
self.pre_logits = nn.Identity()
self.num_features = self.in_features
self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x, pre_logits: bool = False):
if self.pool_type == 'avg':
x = x.mean((1, 2))
x = self.norm(x)
x = self.pre_logits(x)
x = self.head_dropout(x)
if pre_logits:
return x
x = self.fc(x)
return x
class GatedConvBlock(nn.Module):
r""" Our implementation of Gated CNN Block: https://arxiv.org/pdf/1612.08083
Args:
conv_ratio: control the number of channels to conduct depthwise convolution.
Conduct convolution on partial channels can improve paraitcal efficiency.
The idea of partial channels is from ShuffleNet V2 (https://arxiv.org/abs/1807.11164) and
also used by InceptionNeXt (https://arxiv.org/abs/2303.16900) and FasterNet (https://arxiv.org/abs/2303.03667)
"""
def __init__(
self,
dim,
expansion_ratio=8 / 3,
kernel_size=7,
conv_ratio=1.0,
ls_init_value=None,
norm_layer=LayerNorm,
act_layer=nn.GELU,
drop_path=0.,
**kwargs
):
super().__init__()
self.norm = norm_layer(dim)
hidden = int(expansion_ratio * dim)
self.fc1 = nn.Linear(dim, hidden * 2)
self.act = act_layer()
conv_channels = int(conv_ratio * dim)
self.split_indices = (hidden, hidden - conv_channels, conv_channels)
self.conv = nn.Conv2d(
conv_channels,
conv_channels,
kernel_size=kernel_size,
padding=kernel_size // 2,
groups=conv_channels
)
self.fc2 = nn.Linear(hidden, dim)
self.ls = LayerScale(dim) if ls_init_value is not None else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x # [B, H, W, C]
x = self.norm(x)
x = self.fc1(x)
g, i, c = torch.split(x, self.split_indices, dim=-1)
c = c.permute(0, 3, 1, 2) # [B, H, W, C] -> [B, C, H, W]
c = self.conv(c)
c = c.permute(0, 2, 3, 1) # [B, C, H, W] -> [B, H, W, C]
x = self.fc2(self.act(g) * torch.cat((i, c), dim=-1))
x = self.ls(x)
x = self.drop_path(x)
return x + shortcut
class MambaOutStage(nn.Module):
def __init__(
self,
dim,
dim_out: Optional[int] = None,
depth: int = 4,
expansion_ratio=8 / 3,
kernel_size=7,
conv_ratio=1.0,
downsample: str = '',
ls_init_value: Optional[float] = None,
norm_layer=LayerNorm,
act_layer=nn.GELU,
drop_path=0.,
):
super().__init__()
dim_out = dim_out or dim
self.grad_checkpointing = False
if downsample == 'conv':
self.downsample = Downsample(dim, dim_out, norm_layer=norm_layer)
elif downsample == 'conv_nf':
self.downsample = DownsampleNormFirst(dim, dim_out, norm_layer=norm_layer)
else:
assert dim == dim_out
self.downsample = nn.Identity()
self.blocks = nn.Sequential(*[
GatedConvBlock(
dim=dim_out,
expansion_ratio=expansion_ratio,
kernel_size=kernel_size,
conv_ratio=conv_ratio,
ls_init_value=ls_init_value,
norm_layer=norm_layer,
act_layer=act_layer,
drop_path=drop_path[j] if isinstance(drop_path, (list, tuple)) else drop_path,
)
for j in range(depth)
])
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class MambaOut(nn.Module):
r""" MetaFormer
A PyTorch impl of : `MetaFormer Baselines for Vision` -
https://arxiv.org/abs/2210.13452
Args:
in_chans (int): Number of input image channels. Default: 3.
num_classes (int): Number of classes for classification head. Default: 1000.
depths (list or tuple): Number of blocks at each stage. Default: [3, 3, 9, 3].
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 576].
downsample_layers: (list or tuple): Downsampling layers before each stage.
drop_path_rate (float): Stochastic depth rate. Default: 0.
output_norm: norm before classifier head. Default: partial(nn.LayerNorm, eps=1e-6).
head_fn: classification head. Default: nn.Linear.
head_dropout (float): dropout for MLP classifier. Default: 0.
"""
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
depths=(3, 3, 9, 3),
dims=(96, 192, 384, 576),
norm_layer=LayerNorm,
act_layer=nn.GELU,
conv_ratio=1.0,
expansion_ratio=8/3,
kernel_size=7,
stem_mid_norm=True,
ls_init_value=None,
downsample='conv',
drop_path_rate=0.,
drop_rate=0.,
head_fn='default',
):
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.output_fmt = 'NHWC'
if not isinstance(depths, (list, tuple)):
depths = [depths] # it means the model has only one stage
if not isinstance(dims, (list, tuple)):
dims = [dims]
act_layer = get_act_layer(act_layer)
num_stage = len(depths)
self.num_stage = num_stage
self.feature_info = []
self.stem = Stem(
in_chans,
dims[0],
mid_norm=stem_mid_norm,
act_layer=act_layer,
norm_layer=norm_layer,
)
prev_dim = dims[0]
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
cur = 0
curr_stride = 4
self.stages = nn.Sequential()
for i in range(num_stage):
dim = dims[i]
stride = 2 if curr_stride == 2 or i > 0 else 1
curr_stride *= stride
stage = MambaOutStage(
dim=prev_dim,
dim_out=dim,
depth=depths[i],
kernel_size=kernel_size,
conv_ratio=conv_ratio,
expansion_ratio=expansion_ratio,
downsample=downsample if i > 0 else '',
ls_init_value=ls_init_value,
norm_layer=norm_layer,
act_layer=act_layer,
drop_path=dp_rates[i],
)
self.stages.append(stage)
prev_dim = dim
# NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2
self.feature_info += [dict(num_chs=prev_dim, reduction=curr_stride, module=f'stages.{i}')]
cur += depths[i]
if head_fn == 'default':
# specific to this model, unusual norm -> pool -> fc -> act -> norm -> fc combo
self.head = MlpHead(
prev_dim,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
norm_layer=norm_layer,
)
else:
# more typical norm -> pool -> fc -> act -> fc
self.head = ClNormMlpClassifierHead(
prev_dim,
num_classes,
hidden_size=int(prev_dim * 4),
pool_type=global_pool,
norm_layer=norm_layer,
drop_rate=drop_rate,
)
self.num_features = prev_dim
self.head_hidden_size = self.head.num_features
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.downsample', (0,)), # blocks
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
if 'model' in state_dict:
state_dict = state_dict['model']
if 'stem.conv1.weight' in state_dict:
return state_dict
import re
out_dict = {}
for k, v in state_dict.items():
k = k.replace('downsample_layers.0.', 'stem.')
k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k)
# remap head names
if k.startswith('norm.'):
# this is moving to head since it's after the pooling
k = k.replace('norm.', 'head.norm.')
elif k.startswith('head.'):
k = k.replace('head.fc1.', 'head.pre_logits.fc.')
k = k.replace('head.norm.', 'head.pre_logits.norm.')
k = k.replace('head.fc2.', 'head.fc.')
out_dict[k] = v
return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'test_input_size': (3, 288, 288),
'pool_size': (7, 7), 'crop_pct': 1.0, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
# original weights
'mambaout_femto.in1k': _cfg(
hf_hub_id='timm/'),
'mambaout_kobe.in1k': _cfg(
hf_hub_id='timm/'),
'mambaout_tiny.in1k': _cfg(
hf_hub_id='timm/'),
'mambaout_small.in1k': _cfg(
hf_hub_id='timm/'),
'mambaout_base.in1k': _cfg(
hf_hub_id='timm/'),
# timm experiments below
'mambaout_small_rw.sw_e450_in1k': _cfg(
hf_hub_id='timm/',
),
'mambaout_base_short_rw.sw_e500_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0,
),
'mambaout_base_tall_rw.sw_e500_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0,
),
'mambaout_base_wide_rw.sw_e500_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0,
),
'mambaout_base_plus_rw.sw_e150_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
),
'mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), test_input_size=(3, 384, 384), crop_mode='squash', pool_size=(12, 12),
),
'mambaout_base_plus_rw.sw_e150_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
),
'test_mambaout': _cfg(input_size=(3, 160, 160), test_input_size=(3, 192, 192), pool_size=(5, 5)),
})
def _create_mambaout(variant, pretrained=False, **kwargs):
model = build_model_with_cfg(
MambaOut, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs,
)
return model
# a series of MambaOut models
@register_model
def mambaout_femto(pretrained=False, **kwargs):
model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 192, 288))
return _create_mambaout('mambaout_femto', pretrained=pretrained, **dict(model_args, **kwargs))
# Kobe Memorial Version with 24 Gated CNN blocks
@register_model
def mambaout_kobe(pretrained=False, **kwargs):
model_args = dict(depths=[3, 3, 15, 3], dims=[48, 96, 192, 288])
return _create_mambaout('mambaout_kobe', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def mambaout_tiny(pretrained=False, **kwargs):
model_args = dict(depths=[3, 3, 9, 3], dims=[96, 192, 384, 576])
return _create_mambaout('mambaout_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def mambaout_small(pretrained=False, **kwargs):
model_args = dict(depths=[3, 4, 27, 3], dims=[96, 192, 384, 576])
return _create_mambaout('mambaout_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def mambaout_base(pretrained=False, **kwargs):
model_args = dict(depths=[3, 4, 27, 3], dims=[128, 256, 512, 768])
return _create_mambaout('mambaout_base', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def mambaout_small_rw(pretrained=False, **kwargs):
model_args = dict(
depths=[3, 4, 27, 3],
dims=[96, 192, 384, 576],
stem_mid_norm=False,
downsample='conv_nf',
ls_init_value=1e-6,
head_fn='norm_mlp',
)
return _create_mambaout('mambaout_small_rw', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def mambaout_base_short_rw(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 3, 25, 3),
dims=(128, 256, 512, 768),
expansion_ratio=3.0,
conv_ratio=1.25,
stem_mid_norm=False,
downsample='conv_nf',
ls_init_value=1e-6,
head_fn='norm_mlp',
)
return _create_mambaout('mambaout_base_short_rw', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def mambaout_base_tall_rw(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 4, 30, 3),
dims=(128, 256, 512, 768),
expansion_ratio=2.5,
conv_ratio=1.25,
stem_mid_norm=False,
downsample='conv_nf',
ls_init_value=1e-6,
head_fn='norm_mlp',
)
return _create_mambaout('mambaout_base_tall_rw', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def mambaout_base_wide_rw(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 4, 27, 3),
dims=(128, 256, 512, 768),
expansion_ratio=3.0,
conv_ratio=1.5,
stem_mid_norm=False,
downsample='conv_nf',
ls_init_value=1e-6,
act_layer='silu',
head_fn='norm_mlp',
)
return _create_mambaout('mambaout_base_wide_rw', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def mambaout_base_plus_rw(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 4, 30, 3),
dims=(128, 256, 512, 768),
expansion_ratio=3.0,
conv_ratio=1.5,
stem_mid_norm=False,
downsample='conv_nf',
ls_init_value=1e-6,
act_layer='silu',
head_fn='norm_mlp',
)
return _create_mambaout('mambaout_base_plus_rw', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def test_mambaout(pretrained=False, **kwargs):
model_args = dict(
depths=(1, 1, 3, 1),
dims=(16, 32, 48, 64),
expansion_ratio=3,
stem_mid_norm=False,
downsample='conv_nf',
ls_init_value=1e-4,
act_layer='silu',
head_fn='norm_mlp',
)
return _create_mambaout('test_mambaout', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/mambaout.py/0 | {
"file_path": "pytorch-image-models/timm/models/mambaout.py",
"repo_id": "pytorch-image-models",
"token_count": 10626
} |
"""RegNet X, Y, Z, and more
Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678
Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py
Paper: `Fast and Accurate Model Scaling` - https://arxiv.org/abs/2103.06877
Original Impl: None
Based on original PyTorch impl linked above, but re-wrote to use my own blocks (adapted from ResNet here)
and cleaned up with more descriptive variable names.
Weights from original pycls impl have been modified:
* first layer from BGR -> RGB as most PyTorch models are
* removed training specific dict entries from checkpoints and keep model state_dict only
* remap names to match the ones here
Supports weight loading from torchvision and classy-vision (incl VISSL SEER)
A number of custom timm model definitions additions including:
* stochastic depth, gradient checkpointing, layer-decay, configurable dilation
* a pre-activation 'V' variant
* only known RegNet-Z model definitions with pretrained weights
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
from dataclasses import dataclass, replace
from functools import partial
from typing import Callable, List, Optional, Union, Tuple
import numpy as np
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ClassifierHead, AvgPool2dSame, ConvNormAct, SEModule, DropPath, GroupNormAct
from timm.layers import get_act_layer, get_norm_act_layer, create_conv2d, make_divisible
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import checkpoint_seq, named_apply
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['RegNet', 'RegNetCfg'] # model_registry will add each entrypoint fn to this
@dataclass
class RegNetCfg:
depth: int = 21
w0: int = 80
wa: float = 42.63
wm: float = 2.66
group_size: int = 24
bottle_ratio: float = 1.
se_ratio: float = 0.
group_min_ratio: float = 0.
stem_width: int = 32
downsample: Optional[str] = 'conv1x1'
linear_out: bool = False
preact: bool = False
num_features: int = 0
act_layer: Union[str, Callable] = 'relu'
norm_layer: Union[str, Callable] = 'batchnorm'
def quantize_float(f, q):
"""Converts a float to the closest non-zero int divisible by q."""
return int(round(f / q) * q)
def adjust_widths_groups_comp(widths, bottle_ratios, groups, min_ratio=0.):
"""Adjusts the compatibility of widths and groups."""
bottleneck_widths = [int(w * b) for w, b in zip(widths, bottle_ratios)]
groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_widths)]
if min_ratio:
# torchvision uses a different rounding scheme for ensuring bottleneck widths divisible by group widths
bottleneck_widths = [make_divisible(w_bot, g, min_ratio) for w_bot, g in zip(bottleneck_widths, groups)]
else:
bottleneck_widths = [quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_widths, groups)]
widths = [int(w_bot / b) for w_bot, b in zip(bottleneck_widths, bottle_ratios)]
return widths, groups
def generate_regnet(width_slope, width_initial, width_mult, depth, group_size, quant=8):
"""Generates per block widths from RegNet parameters."""
assert width_slope >= 0 and width_initial > 0 and width_mult > 1 and width_initial % quant == 0
# TODO dWr scaling?
# depth = int(depth * (scale ** 0.1))
# width_scale = scale ** 0.4 # dWr scale, exp 0.8 / 2, applied to both group and layer widths
widths_cont = np.arange(depth) * width_slope + width_initial
width_exps = np.round(np.log(widths_cont / width_initial) / np.log(width_mult))
widths = np.round(np.divide(width_initial * np.power(width_mult, width_exps), quant)) * quant
num_stages, max_stage = len(np.unique(widths)), width_exps.max() + 1
groups = np.array([group_size for _ in range(num_stages)])
return widths.astype(int).tolist(), num_stages, groups.astype(int).tolist()
def downsample_conv(
in_chs,
out_chs,
kernel_size=1,
stride=1,
dilation=1,
norm_layer=None,
preact=False,
):
norm_layer = norm_layer or nn.BatchNorm2d
kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size
dilation = dilation if kernel_size > 1 else 1
if preact:
return create_conv2d(
in_chs,
out_chs,
kernel_size,
stride=stride,
dilation=dilation,
)
else:
return ConvNormAct(
in_chs,
out_chs,
kernel_size,
stride=stride,
dilation=dilation,
norm_layer=norm_layer,
apply_act=False,
)
def downsample_avg(
in_chs,
out_chs,
kernel_size=1,
stride=1,
dilation=1,
norm_layer=None,
preact=False,
):
""" AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment."""
norm_layer = norm_layer or nn.BatchNorm2d
avg_stride = stride if dilation == 1 else 1
pool = nn.Identity()
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
if preact:
conv = create_conv2d(in_chs, out_chs, 1, stride=1)
else:
conv = ConvNormAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, apply_act=False)
return nn.Sequential(*[pool, conv])
def create_shortcut(
downsample_type,
in_chs,
out_chs,
kernel_size,
stride,
dilation=(1, 1),
norm_layer=None,
preact=False,
):
assert downsample_type in ('avg', 'conv1x1', '', None)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
dargs = dict(stride=stride, dilation=dilation[0], norm_layer=norm_layer, preact=preact)
if not downsample_type:
return None # no shortcut, no downsample
elif downsample_type == 'avg':
return downsample_avg(in_chs, out_chs, **dargs)
else:
return downsample_conv(in_chs, out_chs, kernel_size=kernel_size, **dargs)
else:
return nn.Identity() # identity shortcut (no downsample)
class Bottleneck(nn.Module):
""" RegNet Bottleneck
This is almost exactly the same as a ResNet Bottleneck. The main difference is the SE block is moved from
after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels.
"""
def __init__(
self,
in_chs,
out_chs,
stride=1,
dilation=(1, 1),
bottle_ratio=1,
group_size=1,
se_ratio=0.25,
downsample='conv1x1',
linear_out=False,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
drop_block=None,
drop_path_rate=0.,
):
super(Bottleneck, self).__init__()
act_layer = get_act_layer(act_layer)
bottleneck_chs = int(round(out_chs * bottle_ratio))
groups = bottleneck_chs // group_size
cargs = dict(act_layer=act_layer, norm_layer=norm_layer)
self.conv1 = ConvNormAct(in_chs, bottleneck_chs, kernel_size=1, **cargs)
self.conv2 = ConvNormAct(
bottleneck_chs,
bottleneck_chs,
kernel_size=3,
stride=stride,
dilation=dilation[0],
groups=groups,
drop_layer=drop_block,
**cargs,
)
if se_ratio:
se_channels = int(round(in_chs * se_ratio))
self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer)
else:
self.se = nn.Identity()
self.conv3 = ConvNormAct(bottleneck_chs, out_chs, kernel_size=1, apply_act=False, **cargs)
self.act3 = nn.Identity() if linear_out else act_layer()
self.downsample = create_shortcut(
downsample,
in_chs,
out_chs,
kernel_size=1,
stride=stride,
dilation=dilation,
norm_layer=norm_layer,
)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
def zero_init_last(self):
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
x = self.se(x)
x = self.conv3(x)
if self.downsample is not None:
# NOTE stuck with downsample as the attr name due to weight compatibility
# now represents the shortcut, no shortcut if None, and non-downsample shortcut == nn.Identity()
x = self.drop_path(x) + self.downsample(shortcut)
x = self.act3(x)
return x
class PreBottleneck(nn.Module):
""" RegNet Bottleneck
This is almost exactly the same as a ResNet Bottleneck. The main difference is the SE block is moved from
after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels.
"""
def __init__(
self,
in_chs,
out_chs,
stride=1,
dilation=(1, 1),
bottle_ratio=1,
group_size=1,
se_ratio=0.25,
downsample='conv1x1',
linear_out=False,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
drop_block=None,
drop_path_rate=0.,
):
super(PreBottleneck, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
bottleneck_chs = int(round(out_chs * bottle_ratio))
groups = bottleneck_chs // group_size
self.norm1 = norm_act_layer(in_chs)
self.conv1 = create_conv2d(in_chs, bottleneck_chs, kernel_size=1)
self.norm2 = norm_act_layer(bottleneck_chs)
self.conv2 = create_conv2d(
bottleneck_chs,
bottleneck_chs,
kernel_size=3,
stride=stride,
dilation=dilation[0],
groups=groups,
)
if se_ratio:
se_channels = int(round(in_chs * se_ratio))
self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer)
else:
self.se = nn.Identity()
self.norm3 = norm_act_layer(bottleneck_chs)
self.conv3 = create_conv2d(bottleneck_chs, out_chs, kernel_size=1)
self.downsample = create_shortcut(
downsample,
in_chs,
out_chs,
kernel_size=1,
stride=stride,
dilation=dilation,
preact=True,
)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
def zero_init_last(self):
pass
def forward(self, x):
x = self.norm1(x)
shortcut = x
x = self.conv1(x)
x = self.norm2(x)
x = self.conv2(x)
x = self.se(x)
x = self.norm3(x)
x = self.conv3(x)
if self.downsample is not None:
# NOTE stuck with downsample as the attr name due to weight compatibility
# now represents the shortcut, no shortcut if None, and non-downsample shortcut == nn.Identity()
x = self.drop_path(x) + self.downsample(shortcut)
return x
class RegStage(nn.Module):
"""Stage (sequence of blocks w/ the same output shape)."""
def __init__(
self,
depth,
in_chs,
out_chs,
stride,
dilation,
drop_path_rates=None,
block_fn=Bottleneck,
**block_kwargs,
):
super(RegStage, self).__init__()
self.grad_checkpointing = False
first_dilation = 1 if dilation in (1, 2) else 2
for i in range(depth):
block_stride = stride if i == 0 else 1
block_in_chs = in_chs if i == 0 else out_chs
block_dilation = (first_dilation, dilation)
dpr = drop_path_rates[i] if drop_path_rates is not None else 0.
name = "b{}".format(i + 1)
self.add_module(
name,
block_fn(
block_in_chs,
out_chs,
stride=block_stride,
dilation=block_dilation,
drop_path_rate=dpr,
**block_kwargs,
)
)
first_dilation = dilation
def forward(self, x):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.children(), x)
else:
for block in self.children():
x = block(x)
return x
class RegNet(nn.Module):
"""RegNet-X, Y, and Z Models
Paper: https://arxiv.org/abs/2003.13678
Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py
"""
def __init__(
self,
cfg: RegNetCfg,
in_chans=3,
num_classes=1000,
output_stride=32,
global_pool='avg',
drop_rate=0.,
drop_path_rate=0.,
zero_init_last=True,
**kwargs,
):
"""
Args:
cfg (RegNetCfg): Model architecture configuration
in_chans (int): Number of input channels (default: 3)
num_classes (int): Number of classifier classes (default: 1000)
output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32)
global_pool (str): Global pooling type (default: 'avg')
drop_rate (float): Dropout rate (default: 0.)
drop_path_rate (float): Stochastic depth drop-path rate (default: 0.)
zero_init_last (bool): Zero-init last weight of residual path
kwargs (dict): Extra kwargs overlayed onto cfg
"""
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
assert output_stride in (8, 16, 32)
cfg = replace(cfg, **kwargs) # update cfg with extra passed kwargs
# Construct the stem
stem_width = cfg.stem_width
na_args = dict(act_layer=cfg.act_layer, norm_layer=cfg.norm_layer)
if cfg.preact:
self.stem = create_conv2d(in_chans, stem_width, 3, stride=2)
else:
self.stem = ConvNormAct(in_chans, stem_width, 3, stride=2, **na_args)
self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')]
# Construct the stages
prev_width = stem_width
curr_stride = 2
per_stage_args, common_args = self._get_stage_args(
cfg,
output_stride=output_stride,
drop_path_rate=drop_path_rate,
)
assert len(per_stage_args) == 4
block_fn = PreBottleneck if cfg.preact else Bottleneck
for i, stage_args in enumerate(per_stage_args):
stage_name = "s{}".format(i + 1)
self.add_module(
stage_name,
RegStage(
in_chs=prev_width,
block_fn=block_fn,
**stage_args,
**common_args,
)
)
prev_width = stage_args['out_chs']
curr_stride *= stage_args['stride']
self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)]
# Construct the head
if cfg.num_features:
self.final_conv = ConvNormAct(prev_width, cfg.num_features, kernel_size=1, **na_args)
self.num_features = cfg.num_features
else:
final_act = cfg.linear_out or cfg.preact
self.final_conv = get_act_layer(cfg.act_layer)() if final_act else nn.Identity()
self.num_features = prev_width
self.head_hidden_size = self.num_features
self.head = ClassifierHead(
in_features=self.num_features,
num_classes=num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
def _get_stage_args(self, cfg: RegNetCfg, default_stride=2, output_stride=32, drop_path_rate=0.):
# Generate RegNet ws per block
widths, num_stages, stage_gs = generate_regnet(cfg.wa, cfg.w0, cfg.wm, cfg.depth, cfg.group_size)
# Convert to per stage format
stage_widths, stage_depths = np.unique(widths, return_counts=True)
stage_br = [cfg.bottle_ratio for _ in range(num_stages)]
stage_strides = []
stage_dilations = []
net_stride = 2
dilation = 1
for _ in range(num_stages):
if net_stride >= output_stride:
dilation *= default_stride
stride = 1
else:
stride = default_stride
net_stride *= stride
stage_strides.append(stride)
stage_dilations.append(dilation)
stage_dpr = np.split(np.linspace(0, drop_path_rate, sum(stage_depths)), np.cumsum(stage_depths[:-1]))
# Adjust the compatibility of ws and gws
stage_widths, stage_gs = adjust_widths_groups_comp(
stage_widths, stage_br, stage_gs, min_ratio=cfg.group_min_ratio)
arg_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_size', 'drop_path_rates']
per_stage_args = [
dict(zip(arg_names, params)) for params in
zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_br, stage_gs, stage_dpr)
]
common_args = dict(
downsample=cfg.downsample,
se_ratio=cfg.se_ratio,
linear_out=cfg.linear_out,
act_layer=cfg.act_layer,
norm_layer=cfg.norm_layer,
)
return per_stage_args, common_args
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^s(\d+)' if coarse else r'^s(\d+)\.b(\d+)',
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in list(self.children())[1:-1]:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, pool_type=global_pool)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(5, indices)
# forward pass
feat_idx = 0
x = self.stem(x)
if feat_idx in take_indices:
intermediates.append(x)
layer_names = ('s1', 's2', 's3', 's4')
if stop_early:
layer_names = layer_names[:max_index]
for n in layer_names:
feat_idx += 1
x = getattr(self, n)(x) # won't work with torchscript, but keeps code reasonable, FML
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
if feat_idx == 4:
x = self.final_conv(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(5, indices)
layer_names = ('s1', 's2', 's3', 's4')
layer_names = layer_names[max_index:]
for n in layer_names:
setattr(self, n, nn.Identity())
if max_index < 4:
self.final_conv = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.stem(x)
x = self.s1(x)
x = self.s2(x)
x = self.s3(x)
x = self.s4(x)
x = self.final_conv(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name='', zero_init_last=False):
if isinstance(module, nn.Conv2d):
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif zero_init_last and hasattr(module, 'zero_init_last'):
module.zero_init_last()
def _filter_fn(state_dict):
state_dict = state_dict.get('model', state_dict)
replaces = [
('f.a.0', 'conv1.conv'),
('f.a.1', 'conv1.bn'),
('f.b.0', 'conv2.conv'),
('f.b.1', 'conv2.bn'),
('f.final_bn', 'conv3.bn'),
('f.se.excitation.0', 'se.fc1'),
('f.se.excitation.2', 'se.fc2'),
('f.se', 'se'),
('f.c.0', 'conv3.conv'),
('f.c.1', 'conv3.bn'),
('f.c', 'conv3.conv'),
('proj.0', 'downsample.conv'),
('proj.1', 'downsample.bn'),
('proj', 'downsample.conv'),
]
if 'classy_state_dict' in state_dict:
# classy-vision & vissl (SEER) weights
import re
state_dict = state_dict['classy_state_dict']['base_model']['model']
out = {}
for k, v in state_dict['trunk'].items():
k = k.replace('_feature_blocks.conv1.stem.0', 'stem.conv')
k = k.replace('_feature_blocks.conv1.stem.1', 'stem.bn')
k = re.sub(
r'^_feature_blocks.res\d.block(\d)-(\d+)',
lambda x: f's{int(x.group(1))}.b{int(x.group(2)) + 1}', k)
k = re.sub(r's(\d)\.b(\d+)\.bn', r's\1.b\2.downsample.bn', k)
for s, r in replaces:
k = k.replace(s, r)
out[k] = v
for k, v in state_dict['heads'].items():
if 'projection_head' in k or 'prototypes' in k:
continue
k = k.replace('0.clf.0', 'head.fc')
out[k] = v
return out
if 'stem.0.weight' in state_dict:
# torchvision weights
import re
out = {}
for k, v in state_dict.items():
k = k.replace('stem.0', 'stem.conv')
k = k.replace('stem.1', 'stem.bn')
k = re.sub(
r'trunk_output.block(\d)\.block(\d+)\-(\d+)',
lambda x: f's{int(x.group(1))}.b{int(x.group(3)) + 1}', k)
for s, r in replaces:
k = k.replace(s, r)
k = k.replace('fc.', 'head.fc.')
out[k] = v
return out
return state_dict
# Model FLOPS = three trailing digits * 10^8
model_cfgs = dict(
# RegNet-X
regnetx_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13),
regnetx_004=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22),
regnetx_004_tv=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22, group_min_ratio=0.9),
regnetx_006=RegNetCfg(w0=48, wa=36.97, wm=2.24, group_size=24, depth=16),
regnetx_008=RegNetCfg(w0=56, wa=35.73, wm=2.28, group_size=16, depth=16),
regnetx_016=RegNetCfg(w0=80, wa=34.01, wm=2.25, group_size=24, depth=18),
regnetx_032=RegNetCfg(w0=88, wa=26.31, wm=2.25, group_size=48, depth=25),
regnetx_040=RegNetCfg(w0=96, wa=38.65, wm=2.43, group_size=40, depth=23),
regnetx_064=RegNetCfg(w0=184, wa=60.83, wm=2.07, group_size=56, depth=17),
regnetx_080=RegNetCfg(w0=80, wa=49.56, wm=2.88, group_size=120, depth=23),
regnetx_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19),
regnetx_160=RegNetCfg(w0=216, wa=55.59, wm=2.1, group_size=128, depth=22),
regnetx_320=RegNetCfg(w0=320, wa=69.86, wm=2.0, group_size=168, depth=23),
# RegNet-Y
regnety_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13, se_ratio=0.25),
regnety_004=RegNetCfg(w0=48, wa=27.89, wm=2.09, group_size=8, depth=16, se_ratio=0.25),
regnety_006=RegNetCfg(w0=48, wa=32.54, wm=2.32, group_size=16, depth=15, se_ratio=0.25),
regnety_008=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25),
regnety_008_tv=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25, group_min_ratio=0.9),
regnety_016=RegNetCfg(w0=48, wa=20.71, wm=2.65, group_size=24, depth=27, se_ratio=0.25),
regnety_032=RegNetCfg(w0=80, wa=42.63, wm=2.66, group_size=24, depth=21, se_ratio=0.25),
regnety_040=RegNetCfg(w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25),
regnety_064=RegNetCfg(w0=112, wa=33.22, wm=2.27, group_size=72, depth=25, se_ratio=0.25),
regnety_080=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25),
regnety_080_tv=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25, group_min_ratio=0.9),
regnety_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19, se_ratio=0.25),
regnety_160=RegNetCfg(w0=200, wa=106.23, wm=2.48, group_size=112, depth=18, se_ratio=0.25),
regnety_320=RegNetCfg(w0=232, wa=115.89, wm=2.53, group_size=232, depth=20, se_ratio=0.25),
regnety_640=RegNetCfg(w0=352, wa=147.48, wm=2.4, group_size=328, depth=20, se_ratio=0.25),
regnety_1280=RegNetCfg(w0=456, wa=160.83, wm=2.52, group_size=264, depth=27, se_ratio=0.25),
regnety_2560=RegNetCfg(w0=640, wa=230.83, wm=2.53, group_size=373, depth=27, se_ratio=0.25),
#regnety_2560=RegNetCfg(w0=640, wa=124.47, wm=2.04, group_size=848, depth=27, se_ratio=0.25),
# Experimental
regnety_040_sgn=RegNetCfg(
w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25,
act_layer='silu', norm_layer=partial(GroupNormAct, group_size=16)),
# regnetv = 'preact regnet y'
regnetv_040=RegNetCfg(
depth=22, w0=96, wa=31.41, wm=2.24, group_size=64, se_ratio=0.25, preact=True, act_layer='silu'),
regnetv_064=RegNetCfg(
depth=25, w0=112, wa=33.22, wm=2.27, group_size=72, se_ratio=0.25, preact=True, act_layer='silu',
downsample='avg'),
# RegNet-Z (unverified)
regnetz_005=RegNetCfg(
depth=21, w0=16, wa=10.7, wm=2.51, group_size=4, bottle_ratio=4.0, se_ratio=0.25,
downsample=None, linear_out=True, num_features=1024, act_layer='silu',
),
regnetz_040=RegNetCfg(
depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25,
downsample=None, linear_out=True, num_features=0, act_layer='silu',
),
regnetz_040_h=RegNetCfg(
depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25,
downsample=None, linear_out=True, num_features=1536, act_layer='silu',
),
)
def _create_regnet(variant, pretrained, **kwargs):
return build_model_with_cfg(
RegNet, variant, pretrained,
model_cfg=model_cfgs[variant],
pretrained_filter_fn=_filter_fn,
**kwargs)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'test_input_size': (3, 288, 288), 'crop_pct': 0.95, 'test_crop_pct': 1.0,
'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
def _cfgpyc(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
'license': 'mit', 'origin_url': 'https://github.com/facebookresearch/pycls', **kwargs
}
def _cfgtv2(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.965, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
'license': 'bsd-3-clause', 'origin_url': 'https://github.com/pytorch/vision', **kwargs
}
default_cfgs = generate_default_cfgs({
# timm trained models
'regnety_032.ra_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth'),
'regnety_040.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_040_ra3-670e1166.pth'),
'regnety_064.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_064_ra3-aa26dc7d.pth'),
'regnety_080.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_080_ra3-1fdc4344.pth'),
'regnety_120.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'),
'regnety_160.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'),
'regnety_160.lion_in12k_ft_in1k': _cfg(hf_hub_id='timm/'),
# timm in12k pretrain
'regnety_120.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
'regnety_160.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821),
# timm custom arch (v and z guess) + trained models
'regnety_040_sgn.untrained': _cfg(url=''),
'regnetv_040.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_040_ra3-c248f51f.pth',
first_conv='stem'),
'regnetv_064.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_064_ra3-530616c2.pth',
first_conv='stem'),
'regnetz_005.untrained': _cfg(url=''),
'regnetz_040.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040_ra3-9007edf5.pth',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)),
'regnetz_040_h.ra3_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040h_ra3-f594343b.pth',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)),
# used in DeiT for distillation (from Facebook DeiT GitHub repository)
'regnety_160.deit_in1k': _cfg(
hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/regnety_160-a5fe301d.pth'),
'regnetx_004_tv.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_400mf-62229a5f.pth'),
'regnetx_008.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_800mf-94a99ebd.pth'),
'regnetx_016.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_1_6gf-a12f2b72.pth'),
'regnetx_032.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_3_2gf-7071aa85.pth'),
'regnetx_080.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_8gf-2b70d774.pth'),
'regnetx_160.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_16gf-ba3796d7.pth'),
'regnetx_320.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_x_32gf-6eb8fdc6.pth'),
'regnety_004.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_400mf-e6988f5f.pth'),
'regnety_008_tv.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_800mf-58fc7688.pth'),
'regnety_016.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_1_6gf-0d7bc02a.pth'),
'regnety_032.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_3_2gf-9180c971.pth'),
'regnety_080_tv.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_8gf-dc2b1b54.pth'),
'regnety_160.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_16gf-3e4a00f9.pth'),
'regnety_320.tv2_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_32gf-8db6d4b5.pth'),
'regnety_160.swag_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_16gf_swag-43afe44d.pth', license='cc-by-nc-4.0',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_320.swag_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_32gf_swag-04fdfa75.pth', license='cc-by-nc-4.0',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_1280.swag_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_128gf_swag-c8ce3e52.pth', license='cc-by-nc-4.0',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_160.swag_lc_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_16gf_lc_swag-f3ec0043.pth', license='cc-by-nc-4.0'),
'regnety_320.swag_lc_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_32gf_lc_swag-e1583746.pth', license='cc-by-nc-4.0'),
'regnety_1280.swag_lc_in1k': _cfgtv2(
hf_hub_id='timm/',
url='https://download.pytorch.org/models/regnet_y_128gf_lc_swag-cbe8ce12.pth', license='cc-by-nc-4.0'),
'regnety_320.seer_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
license='other', origin_url='https://github.com/facebookresearch/vissl',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_640.seer_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
license='other', origin_url='https://github.com/facebookresearch/vissl',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_1280.seer_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
license='other', origin_url='https://github.com/facebookresearch/vissl',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_2560.seer_ft_in1k': _cfgtv2(
hf_hub_id='timm/',
license='other', origin_url='https://github.com/facebookresearch/vissl',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet256_finetuned_in1k_model_final_checkpoint_phase38.torch',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'regnety_320.seer': _cfgtv2(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch',
num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'),
'regnety_640.seer': _cfgtv2(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch',
num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'),
'regnety_1280.seer': _cfgtv2(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch',
num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'),
# FIXME invalid weight <-> model match, mistake on their end
#'regnety_2560.seer': _cfgtv2(
# url='https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_cosine_rg256gf_noBNhead_wd1e5_fairstore_bs16_node64_sinkhorn10_proto16k_apex_syncBN64_warmup8k/model_final_checkpoint_phase0.torch',
# num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'),
'regnetx_002.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_004.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_006.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_008.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_016.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_032.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_040.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_064.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_080.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_120.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_160.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnetx_320.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_002.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_004.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_006.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_008.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_016.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_032.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_040.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_064.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_080.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_120.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_160.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
'regnety_320.pycls_in1k': _cfgpyc(hf_hub_id='timm/'),
})
@register_model
def regnetx_002(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-200MF"""
return _create_regnet('regnetx_002', pretrained, **kwargs)
@register_model
def regnetx_004(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-400MF"""
return _create_regnet('regnetx_004', pretrained, **kwargs)
@register_model
def regnetx_004_tv(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-400MF w/ torchvision group rounding"""
return _create_regnet('regnetx_004_tv', pretrained, **kwargs)
@register_model
def regnetx_006(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-600MF"""
return _create_regnet('regnetx_006', pretrained, **kwargs)
@register_model
def regnetx_008(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-800MF"""
return _create_regnet('regnetx_008', pretrained, **kwargs)
@register_model
def regnetx_016(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-1.6GF"""
return _create_regnet('regnetx_016', pretrained, **kwargs)
@register_model
def regnetx_032(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-3.2GF"""
return _create_regnet('regnetx_032', pretrained, **kwargs)
@register_model
def regnetx_040(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-4.0GF"""
return _create_regnet('regnetx_040', pretrained, **kwargs)
@register_model
def regnetx_064(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-6.4GF"""
return _create_regnet('regnetx_064', pretrained, **kwargs)
@register_model
def regnetx_080(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-8.0GF"""
return _create_regnet('regnetx_080', pretrained, **kwargs)
@register_model
def regnetx_120(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-12GF"""
return _create_regnet('regnetx_120', pretrained, **kwargs)
@register_model
def regnetx_160(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-16GF"""
return _create_regnet('regnetx_160', pretrained, **kwargs)
@register_model
def regnetx_320(pretrained=False, **kwargs) -> RegNet:
"""RegNetX-32GF"""
return _create_regnet('regnetx_320', pretrained, **kwargs)
@register_model
def regnety_002(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-200MF"""
return _create_regnet('regnety_002', pretrained, **kwargs)
@register_model
def regnety_004(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-400MF"""
return _create_regnet('regnety_004', pretrained, **kwargs)
@register_model
def regnety_006(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-600MF"""
return _create_regnet('regnety_006', pretrained, **kwargs)
@register_model
def regnety_008(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-800MF"""
return _create_regnet('regnety_008', pretrained, **kwargs)
@register_model
def regnety_008_tv(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-800MF w/ torchvision group rounding"""
return _create_regnet('regnety_008_tv', pretrained, **kwargs)
@register_model
def regnety_016(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-1.6GF"""
return _create_regnet('regnety_016', pretrained, **kwargs)
@register_model
def regnety_032(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-3.2GF"""
return _create_regnet('regnety_032', pretrained, **kwargs)
@register_model
def regnety_040(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-4.0GF"""
return _create_regnet('regnety_040', pretrained, **kwargs)
@register_model
def regnety_064(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-6.4GF"""
return _create_regnet('regnety_064', pretrained, **kwargs)
@register_model
def regnety_080(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-8.0GF"""
return _create_regnet('regnety_080', pretrained, **kwargs)
@register_model
def regnety_080_tv(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-8.0GF w/ torchvision group rounding"""
return _create_regnet('regnety_080_tv', pretrained, **kwargs)
@register_model
def regnety_120(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-12GF"""
return _create_regnet('regnety_120', pretrained, **kwargs)
@register_model
def regnety_160(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-16GF"""
return _create_regnet('regnety_160', pretrained, **kwargs)
@register_model
def regnety_320(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-32GF"""
return _create_regnet('regnety_320', pretrained, **kwargs)
@register_model
def regnety_640(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-64GF"""
return _create_regnet('regnety_640', pretrained, **kwargs)
@register_model
def regnety_1280(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-128GF"""
return _create_regnet('regnety_1280', pretrained, **kwargs)
@register_model
def regnety_2560(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-256GF"""
return _create_regnet('regnety_2560', pretrained, **kwargs)
@register_model
def regnety_040_sgn(pretrained=False, **kwargs) -> RegNet:
"""RegNetY-4.0GF w/ GroupNorm """
return _create_regnet('regnety_040_sgn', pretrained, **kwargs)
@register_model
def regnetv_040(pretrained=False, **kwargs) -> RegNet:
"""RegNetV-4.0GF (pre-activation)"""
return _create_regnet('regnetv_040', pretrained, **kwargs)
@register_model
def regnetv_064(pretrained=False, **kwargs) -> RegNet:
"""RegNetV-6.4GF (pre-activation)"""
return _create_regnet('regnetv_064', pretrained, **kwargs)
@register_model
def regnetz_005(pretrained=False, **kwargs) -> RegNet:
"""RegNetZ-500MF
NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py
but it's not clear it is equivalent to paper model as not detailed in the paper.
"""
return _create_regnet('regnetz_005', pretrained, zero_init_last=False, **kwargs)
@register_model
def regnetz_040(pretrained=False, **kwargs) -> RegNet:
"""RegNetZ-4.0GF
NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py
but it's not clear it is equivalent to paper model as not detailed in the paper.
"""
return _create_regnet('regnetz_040', pretrained, zero_init_last=False, **kwargs)
@register_model
def regnetz_040_h(pretrained=False, **kwargs) -> RegNet:
"""RegNetZ-4.0GF
NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py
but it's not clear it is equivalent to paper model as not detailed in the paper.
"""
return _create_regnet('regnetz_040_h', pretrained, zero_init_last=False, **kwargs)
register_model_deprecations(__name__, {
'regnetz_040h': 'regnetz_040_h',
}) | pytorch-image-models/timm/models/regnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/regnet.py",
"repo_id": "pytorch-image-models",
"token_count": 22592
} |
""" Transformer in Transformer (TNT) in PyTorch
A PyTorch implement of TNT as described in
'Transformer in Transformer' - https://arxiv.org/abs/2103.00112
The official mindspore code is released and available at
https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT
"""
import math
from typing import Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import Mlp, DropPath, trunc_normal_, _assert, to_2tuple, resample_abs_pos_embed
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint
from ._registry import register_model
__all__ = ['TNT'] # model_registry will add each entrypoint fn to this
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'pixel_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'tnt_s_patch16_224': _cfg(
url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
'tnt_b_patch16_224': _cfg(
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
}
class Attention(nn.Module):
""" Multi-Head Attention
"""
def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.hidden_dim = hidden_dim
self.num_heads = num_heads
head_dim = hidden_dim // num_heads
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop, inplace=True)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop, inplace=True)
def forward(self, x):
B, N, C = x.shape
qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k = qk.unbind(0) # make torchscript happy (cannot use tensor as tuple)
v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
""" TNT Block
"""
def __init__(
self,
dim,
dim_out,
num_pixel,
num_heads_in=4,
num_heads_out=12,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
# Inner transformer
self.norm_in = norm_layer(dim)
self.attn_in = Attention(
dim,
dim,
num_heads=num_heads_in,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.norm_mlp_in = norm_layer(dim)
self.mlp_in = Mlp(
in_features=dim,
hidden_features=int(dim * 4),
out_features=dim,
act_layer=act_layer,
drop=proj_drop,
)
self.norm1_proj = norm_layer(dim)
self.proj = nn.Linear(dim * num_pixel, dim_out, bias=True)
# Outer transformer
self.norm_out = norm_layer(dim_out)
self.attn_out = Attention(
dim_out,
dim_out,
num_heads=num_heads_out,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm_mlp = norm_layer(dim_out)
self.mlp = Mlp(
in_features=dim_out,
hidden_features=int(dim_out * mlp_ratio),
out_features=dim_out,
act_layer=act_layer,
drop=proj_drop,
)
def forward(self, pixel_embed, patch_embed):
# inner
pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed)))
pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed)))
# outer
B, N, C = patch_embed.size()
patch_embed = torch.cat(
[patch_embed[:, 0:1], patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1))],
dim=1)
patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed)))
patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed)))
return pixel_embed, patch_embed
class PixelEmbed(nn.Module):
""" Image to Pixel Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
# grid_size property necessary for resizing positional embedding
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
num_patches = (self.grid_size[0]) * (self.grid_size[1])
self.img_size = img_size
self.num_patches = num_patches
self.in_dim = in_dim
new_patch_size = [math.ceil(ps / stride) for ps in patch_size]
self.new_patch_size = new_patch_size
self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride)
self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size)
def forward(self, x, pixel_pos):
B, C, H, W = x.shape
_assert(H == self.img_size[0],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
_assert(W == self.img_size[1],
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
x = self.proj(x)
x = self.unfold(x)
x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1])
x = x + pixel_pos
x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2)
return x
class TNT(nn.Module):
""" Transformer in Transformer - https://arxiv.org/abs/2103.00112
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
global_pool='token',
embed_dim=768,
inner_dim=48,
depth=12,
num_heads_inner=4,
num_heads_outer=12,
mlp_ratio=4.,
qkv_bias=False,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=nn.LayerNorm,
first_stride=4,
):
super().__init__()
assert global_pool in ('', 'token', 'avg')
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models
self.grad_checkpointing = False
self.pixel_embed = PixelEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
in_dim=inner_dim,
stride=first_stride,
)
num_patches = self.pixel_embed.num_patches
self.num_patches = num_patches
new_patch_size = self.pixel_embed.new_patch_size
num_pixel = new_patch_size[0] * new_patch_size[1]
self.norm1_proj = norm_layer(num_pixel * inner_dim)
self.proj = nn.Linear(num_pixel * inner_dim, embed_dim)
self.norm2_proj = norm_layer(embed_dim)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pixel_pos = nn.Parameter(torch.zeros(1, inner_dim, new_patch_size[0], new_patch_size[1]))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
blocks = []
for i in range(depth):
blocks.append(Block(
dim=inner_dim,
dim_out=embed_dim,
num_pixel=num_pixel,
num_heads_in=num_heads_inner,
num_heads_out=num_heads_outer,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
))
self.blocks = nn.ModuleList(blocks)
self.norm = norm_layer(embed_dim)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.cls_token, std=.02)
trunc_normal_(self.patch_pos, std=.02)
trunc_normal_(self.pixel_pos, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'patch_pos', 'pixel_pos', 'cls_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^cls_token|patch_pos|pixel_pos|pixel_embed|norm[12]_proj|proj', # stem and embed / pos
blocks=[
(r'^blocks\.(\d+)', None),
(r'^norm', (99999,)),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'token', 'avg')
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
pixel_embed = self.pixel_embed(x, self.pixel_pos)
patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1))))
patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1)
patch_embed = patch_embed + self.patch_pos
patch_embed = self.pos_drop(patch_embed)
if self.grad_checkpointing and not torch.jit.is_scripting():
for blk in self.blocks:
pixel_embed, patch_embed = checkpoint(blk, pixel_embed, patch_embed)
else:
for blk in self.blocks:
pixel_embed, patch_embed = blk(pixel_embed, patch_embed)
patch_embed = self.norm(patch_embed)
return patch_embed
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
if state_dict['patch_pos'].shape != model.patch_pos.shape:
state_dict['patch_pos'] = resample_abs_pos_embed(
state_dict['patch_pos'],
new_size=model.pixel_embed.grid_size,
num_prefix_tokens=1,
)
return state_dict
def _create_tnt(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
TNT, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
@register_model
def tnt_s_patch16_224(pretrained=False, **kwargs) -> TNT:
model_cfg = dict(
patch_size=16, embed_dim=384, inner_dim=24, depth=12, num_heads_outer=6,
qkv_bias=False)
model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def tnt_b_patch16_224(pretrained=False, **kwargs) -> TNT:
model_cfg = dict(
patch_size=16, embed_dim=640, inner_dim=40, depth=12, num_heads_outer=10,
qkv_bias=False)
model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
| pytorch-image-models/timm/models/tnt.py/0 | {
"file_path": "pytorch-image-models/timm/models/tnt.py",
"repo_id": "pytorch-image-models",
"token_count": 6783
} |
""" Optimizer Factory w/ custom Weight Decay & Layer Decay support
Hacked together by / Copyright 2021 Ross Wightman
"""
import logging
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
from fnmatch import fnmatch
import importlib
import torch
import torch.nn as nn
import torch.optim
from ._param_groups import param_groups_layer_decay, param_groups_weight_decay
from ._types import ParamsT, OptimType, OptimizerCallable
from .adabelief import AdaBelief
from .adafactor import Adafactor
from .adafactor_bv import AdafactorBigVision
from .adahessian import Adahessian
from .adamp import AdamP
from .adamw import AdamWLegacy
from .adan import Adan
from .adopt import Adopt
from .kron import Kron
from .lamb import Lamb
from .laprop import LaProp
from .lars import Lars
from .lion import Lion
from .lookahead import Lookahead
from .madgrad import MADGRAD
from .mars import Mars
from .nadam import NAdamLegacy
from .nadamw import NAdamW
from .nvnovograd import NvNovoGrad
from .radam import RAdamLegacy
from .rmsprop_tf import RMSpropTF
from .sgdp import SGDP
from .sgdw import SGDW
_logger = logging.getLogger(__name__)
def _import_class(class_string: str) -> Type:
"""Dynamically import a class from a string."""
try:
module_name, class_name = class_string.rsplit(".", 1)
module = importlib.import_module(module_name)
return getattr(module, class_name)
except (ImportError, AttributeError) as e:
raise ImportError(f"Could not import {class_string}: {e}")
@dataclass(frozen=True)
class OptimInfo:
"""Immutable configuration for an optimizer.
Attributes:
name: Unique identifier for the optimizer
opt_class: The optimizer class
description: Brief description of the optimizer's characteristics and behavior
has_eps: Whether the optimizer accepts epsilon parameter
has_momentum: Whether the optimizer accepts momentum parameter
has_betas: Whether the optimizer accepts a tuple of beta parameters
num_betas: number of betas in tuple (valid IFF has_betas = True)
defaults: Optional default parameters for the optimizer
"""
name: str
opt_class: Union[str, OptimType]
description: str = ''
has_eps: bool = True
has_momentum: bool = False
has_betas: bool = False
num_betas: int = 2
second_order: bool = False
defaults: Optional[Dict[str, Any]] = None
class OptimizerRegistry:
"""Registry managing optimizer configurations and instantiation.
This class provides a central registry for optimizer configurations and handles
their instantiation with appropriate parameter groups and settings.
"""
def __init__(self) -> None:
self._optimizers: Dict[str, OptimInfo] = {}
self._foreach_defaults: Set[str] = {'lion'}
def register(self, info: OptimInfo) -> None:
"""Register an optimizer configuration.
Args:
info: The OptimInfo configuration containing name, type and description
"""
name = info.name.lower()
if name in self._optimizers:
_logger.warning(f'Optimizer {name} already registered, overwriting')
self._optimizers[name] = info
def register_alias(self, alias: str, target: str) -> None:
"""Register an alias for an existing optimizer.
Args:
alias: The alias name
target: The target optimizer name
Raises:
KeyError: If target optimizer doesn't exist
"""
target = target.lower()
if target not in self._optimizers:
raise KeyError(f'Cannot create alias for non-existent optimizer {target}')
self._optimizers[alias.lower()] = self._optimizers[target]
def register_foreach_default(self, name: str) -> None:
"""Register an optimizer as defaulting to foreach=True."""
self._foreach_defaults.add(name.lower())
def list_optimizers(
self,
filter: Union[str, List[str]] = '',
exclude_filters: Optional[List[str]] = None,
with_description: bool = False
) -> List[Union[str, Tuple[str, str]]]:
"""List available optimizer names, optionally filtered.
Args:
filter: Wildcard style filter string (e.g., 'adam*')
exclude_filters: Optional list of wildcard patterns to exclude
with_description: If True, return tuples of (name, description)
Returns:
List of either optimizer names or (name, description) tuples
"""
names = sorted(self._optimizers.keys())
if filter:
if isinstance(filter, str):
filters = [filter]
else:
filters = filter
filtered_names = set()
for f in filters:
filtered_names.update(n for n in names if fnmatch(n, f))
names = sorted(filtered_names)
if exclude_filters:
for exclude_filter in exclude_filters:
names = [n for n in names if not fnmatch(n, exclude_filter)]
if with_description:
return [(name, self._optimizers[name].description) for name in names]
return names
def get_optimizer_info(self, name: str) -> OptimInfo:
"""Get the OptimInfo for an optimizer.
Args:
name: Name of the optimizer
Returns:
OptimInfo configuration
Raises:
ValueError: If optimizer is not found
"""
name = name.lower()
if name not in self._optimizers:
raise ValueError(f'Optimizer {name} not found in registry')
return self._optimizers[name]
def get_optimizer_class(
self,
name_or_info: Union[str, OptimInfo],
bind_defaults: bool = True,
) -> Union[OptimType, OptimizerCallable]:
"""Get the optimizer class with any default arguments applied.
This allows direct instantiation of optimizers with their default configs
without going through the full factory.
Args:
name_or_info: Name of the optimizer
bind_defaults: Bind default arguments to optimizer class via `partial` before returning
Returns:
Optimizer class or partial with defaults applied
Raises:
ValueError: If optimizer not found
"""
if isinstance(name_or_info, str):
opt_info = self.get_optimizer_info(name_or_info)
else:
assert isinstance(name_or_info, OptimInfo)
opt_info = name_or_info
if isinstance(opt_info.opt_class, str):
# Special handling for APEX and BNB optimizers
if opt_info.opt_class.startswith('apex.'):
assert torch.cuda.is_available(), 'CUDA required for APEX optimizers'
try:
opt_class = _import_class(opt_info.opt_class)
except ImportError as e:
raise ImportError('APEX optimizers require apex to be installed') from e
elif opt_info.opt_class.startswith('bitsandbytes.'):
assert torch.cuda.is_available(), 'CUDA required for bitsandbytes optimizers'
try:
opt_class = _import_class(opt_info.opt_class)
except ImportError as e:
raise ImportError('bitsandbytes optimizers require bitsandbytes to be installed') from e
else:
opt_class = _import_class(opt_info.opt_class)
else:
opt_class = opt_info.opt_class
# Return class or partial with defaults
if bind_defaults and opt_info.defaults:
opt_class = partial(opt_class, **opt_info.defaults)
return opt_class
def create_optimizer(
self,
model_or_params: Union[nn.Module, ParamsT],
opt: str,
lr: Optional[float] = None,
weight_decay: float = 0.,
momentum: float = 0.9,
foreach: Optional[bool] = None,
weight_decay_exclude_1d: bool = True,
layer_decay: Optional[float] = None,
param_group_fn: Optional[Callable[[nn.Module], ParamsT]] = None,
**kwargs: Any,
) -> torch.optim.Optimizer:
"""Create an optimizer instance.
Args:
model_or_params: Model or parameters to optimize
opt: Name of optimizer to create
lr: Learning rate
weight_decay: Weight decay factor
momentum: Momentum factor for applicable optimizers
foreach: Enable/disable foreach operation
weight_decay_exclude_1d: Whether to skip weight decay for 1d params (biases and norm affine)
layer_decay: Layer-wise learning rate decay
param_group_fn: Optional custom parameter grouping function
**kwargs: Additional optimizer-specific arguments
Returns:
Configured optimizer instance
Raises:
ValueError: If optimizer not found or configuration invalid
"""
# Get parameters to optimize
if isinstance(model_or_params, nn.Module):
# Extract parameters from a nn.Module, build param groups w/ weight-decay and/or layer-decay applied
no_weight_decay = getattr(model_or_params, 'no_weight_decay', lambda: set())()
if param_group_fn:
# run custom fn to generate param groups from nn.Module
params = param_group_fn(model_or_params)
elif layer_decay is not None:
params = param_groups_layer_decay(
model_or_params,
weight_decay=weight_decay,
layer_decay=layer_decay,
no_weight_decay_list=no_weight_decay,
weight_decay_exclude_1d=weight_decay_exclude_1d,
)
weight_decay = 0.
elif weight_decay and weight_decay_exclude_1d:
params = param_groups_weight_decay(
model_or_params,
weight_decay=weight_decay,
no_weight_decay_list=no_weight_decay,
)
weight_decay = 0.
else:
params = model_or_params.parameters()
else:
# pass parameters / parameter groups through to optimizer
params = model_or_params
# Parse optimizer name
opt_split = opt.lower().split('_')
opt_name = opt_split[-1]
use_lookahead = opt_split[0] == 'lookahead' if len(opt_split) > 1 else False
opt_info = self.get_optimizer_info(opt_name)
# Build optimizer arguments
opt_args: Dict[str, Any] = {'weight_decay': weight_decay, **kwargs}
# Add LR to args, if None optimizer default is used, some optimizers manage LR internally if None.
if lr is not None:
opt_args['lr'] = lr
# Apply optimizer-specific settings
if opt_info.defaults:
for k, v in opt_info.defaults.items():
opt_args.setdefault(k, v)
# timm has always defaulted momentum to 0.9 if optimizer supports momentum, keep for backward compat.
if opt_info.has_momentum:
opt_args.setdefault('momentum', momentum)
# Remove commonly used kwargs that aren't always supported
if not opt_info.has_eps:
opt_args.pop('eps', None)
if not opt_info.has_betas:
opt_args.pop('betas', None)
if foreach is not None:
# Explicitly activate or deactivate multi-tensor foreach impl.
# Not all optimizers support this, and those that do usually default to using
# multi-tensor impl if foreach is left as default 'None' and can be enabled.
opt_args.setdefault('foreach', foreach)
# Create optimizer
opt_class = self.get_optimizer_class(opt_info, bind_defaults=False)
optimizer = opt_class(params, **opt_args)
# Apply Lookahead if requested
if use_lookahead:
optimizer = Lookahead(optimizer)
return optimizer
def _register_sgd_variants(registry: OptimizerRegistry) -> None:
"""Register SGD-based optimizers"""
sgd_optimizers = [
OptimInfo(
name='sgd',
opt_class=torch.optim.SGD,
description='torch.Optim Stochastic Gradient Descent (SGD) with Nesterov momentum',
has_eps=False,
has_momentum=True,
defaults={'nesterov': True}
),
OptimInfo(
name='momentum',
opt_class=torch.optim.SGD,
description='torch.Optim Stochastic Gradient Descent (SGD) with classical momentum',
has_eps=False,
has_momentum=True,
defaults={'nesterov': False}
),
OptimInfo(
name='sgdp',
opt_class=SGDP,
description='SGD with built-in projection to unit norm sphere',
has_momentum=True,
defaults={'nesterov': True}
),
OptimInfo(
name='sgdw',
opt_class=SGDW,
description='SGD with decoupled weight decay and Nesterov momentum',
has_eps=False,
has_momentum=True,
defaults={'nesterov': True}
),
]
for opt in sgd_optimizers:
registry.register(opt)
def _register_adam_variants(registry: OptimizerRegistry) -> None:
"""Register Adam-based optimizers"""
adam_optimizers = [
OptimInfo(
name='adam',
opt_class=torch.optim.Adam,
description='torch.optim.Adam, Adaptive Moment Estimation',
has_betas=True
),
OptimInfo(
name='adamw',
opt_class=torch.optim.AdamW,
description='torch.optim.AdamW, Adam with decoupled weight decay',
has_betas=True
),
OptimInfo(
name='adamwlegacy',
opt_class=AdamWLegacy,
description='legacy impl of AdamW that pre-dates inclusion to torch.optim',
has_betas=True
),
OptimInfo(
name='adamp',
opt_class=AdamP,
description='Adam with built-in projection to unit norm sphere',
has_betas=True,
defaults={'wd_ratio': 0.01, 'nesterov': True}
),
OptimInfo(
name='nadam',
opt_class=torch.optim.NAdam,
description='torch.optim.NAdam, Adam with Nesterov momentum',
has_betas=True
),
OptimInfo(
name='nadamlegacy',
opt_class=NAdamLegacy,
description='legacy impl of NAdam that pre-dates inclusion in torch.optim',
has_betas=True
),
OptimInfo(
name='nadamw',
opt_class=NAdamW,
description='Adam with Nesterov momentum and decoupled weight decay, mlcommons/algorithmic-efficiency impl',
has_betas=True
),
OptimInfo(
name='radam',
opt_class=torch.optim.RAdam,
description='torch.optim.RAdam, Rectified Adam with variance adaptation',
has_betas=True
),
OptimInfo(
name='radamlegacy',
opt_class=RAdamLegacy,
description='legacy impl of RAdam that predates inclusion in torch.optim',
has_betas=True
),
OptimInfo(
name='radamw',
opt_class=torch.optim.RAdam,
description='torch.optim.RAdamW, Rectified Adam with variance adaptation and decoupled weight decay',
has_betas=True,
defaults={'decoupled_weight_decay': True}
),
OptimInfo(
name='adamax',
opt_class=torch.optim.Adamax,
description='torch.optim.Adamax, Adam with infinity norm for more stable updates',
has_betas=True
),
OptimInfo(
name='adafactor',
opt_class=Adafactor,
description='Memory-efficient implementation of Adam with factored gradients',
),
OptimInfo(
name='adafactorbv',
opt_class=AdafactorBigVision,
description='Big Vision variant of Adafactor with factored gradients, half precision momentum',
),
OptimInfo(
name='adopt',
opt_class=Adopt,
description='Modified Adam that can converge with any β2 with the optimal rate',
),
OptimInfo(
name='adoptw',
opt_class=Adopt,
description='Modified AdamW (decoupled decay) that can converge with any β2 with the optimal rate',
defaults={'decoupled': True}
),
]
for opt in adam_optimizers:
registry.register(opt)
def _register_lamb_lars(registry: OptimizerRegistry) -> None:
"""Register LAMB and LARS variants"""
lamb_lars_optimizers = [
OptimInfo(
name='lamb',
opt_class=Lamb,
description='Layer-wise Adaptive Moments for batch optimization',
has_betas=True
),
OptimInfo(
name='lambc',
opt_class=Lamb,
description='LAMB with trust ratio clipping for stability',
has_betas=True,
defaults={'trust_clip': True}
),
OptimInfo(
name='lambw',
opt_class=Lamb,
description='LAMB with decoupled weight decay',
has_betas=True,
defaults={'decoupled_decay': True}
),
OptimInfo(
name='lambcw',
opt_class=Lamb,
description='LAMB with trust ratio clipping for stability and decoupled decay',
has_betas=True,
defaults={'trust_clip': True, 'decoupled_decay': True}
),
OptimInfo(
name='lars',
opt_class=Lars,
description='Layer-wise Adaptive Rate Scaling',
has_momentum=True
),
OptimInfo(
name='larc',
opt_class=Lars,
description='LARS with trust ratio clipping for stability',
has_momentum=True,
defaults={'trust_clip': True}
),
OptimInfo(
name='nlars',
opt_class=Lars,
description='LARS with Nesterov momentum',
has_momentum=True,
defaults={'nesterov': True}
),
OptimInfo(
name='nlarc',
opt_class=Lars,
description='LARS with Nesterov momentum & trust ratio clipping',
has_momentum=True,
defaults={'nesterov': True, 'trust_clip': True}
),
]
for opt in lamb_lars_optimizers:
registry.register(opt)
def _register_cautious_optimizers(registry: OptimizerRegistry) -> None:
cautious_optimizers = [
OptimInfo(
name='cadafactor',
opt_class=Adafactor,
description='Cautious Adafactor',
defaults={'caution': True}
),
OptimInfo(
name='cadafactorbv',
opt_class=AdafactorBigVision,
description='Cautious Big Vision Adafactor',
defaults={'caution': True}
),
OptimInfo(
name='cadamw',
opt_class=AdamWLegacy,
description='Cautious AdamW',
has_betas=True,
defaults={'caution': True}
),
OptimInfo(
name='cadopt',
opt_class=Adopt,
description='Cautious Adopt',
defaults={'caution': True}
),
OptimInfo(
name='cadan',
opt_class=Adan,
description='Cautious Adaptive Nesterov Momentum Algorithm',
defaults={'caution': True, 'no_prox': False},
has_betas=True,
num_betas=3
),
OptimInfo(
name='cadanw',
opt_class=Adan,
description='Cautious Adaptive Nesterov Momentum with decoupled weight decay',
defaults={'caution': True, 'no_prox': True},
has_betas=True,
num_betas=3
),
OptimInfo(
name='cadoptw',
opt_class=Adopt,
description='Cautious AdoptW (decoupled decay)',
defaults={'decoupled': True, 'caution': True}
),
OptimInfo(
name='clamb',
opt_class=Lamb,
description='Cautious LAMB',
has_betas=True,
defaults={'caution': True}
),
OptimInfo(
name='clambw',
opt_class=Lamb,
description='Cautious LAMB with decoupled weight decay',
has_betas=True,
defaults={'caution': True, 'decoupled_decay': True}
),
OptimInfo(
name='claprop',
opt_class=LaProp,
description='Cautious LaProp',
has_betas=True,
defaults={'caution': True}
),
OptimInfo(
name='clion',
opt_class=Lion,
description='Cautious Lion',
has_eps=False,
has_betas=True,
defaults = {'caution': True}
),
OptimInfo(
name='cmars',
opt_class=Mars,
description='Cautious MARS',
has_betas=True,
defaults={'caution': True}
),
OptimInfo(
name='cnadamw',
opt_class=NAdamW,
description='Cautious NAdamW',
has_betas=True,
defaults={'caution': True}
),
OptimInfo(
name='crmsproptf',
opt_class=RMSpropTF,
description='Cautious TensorFlow-style RMSprop',
has_momentum=True,
defaults={'alpha': 0.9, 'caution': True}
),
OptimInfo(
name='csgdw',
opt_class=SGDW,
description='Cautious SGD with decoupled weight decay and Nesterov momentum',
has_eps=False,
has_momentum=True,
defaults={'nesterov': True, 'caution': True}
),
]
for opt in cautious_optimizers:
registry.register(opt)
def _register_other_optimizers(registry: OptimizerRegistry) -> None:
"""Register miscellaneous optimizers"""
other_optimizers = [
OptimInfo(
name='adabelief',
opt_class=AdaBelief,
description='Adapts learning rate based on gradient prediction error',
has_betas=True,
defaults={'rectify': False}
),
OptimInfo(
name='radabelief',
opt_class=AdaBelief,
description='Rectified AdaBelief with variance adaptation',
has_betas=True,
defaults={'rectify': True}
),
OptimInfo(
name='adadelta',
opt_class=torch.optim.Adadelta,
description='torch.optim.Adadelta, Adapts learning rates based on running windows of gradients'
),
OptimInfo(
name='adagrad',
opt_class=torch.optim.Adagrad,
description='torch.optim.Adagrad, Adapts learning rates using cumulative squared gradients',
defaults={'eps': 1e-8}
),
OptimInfo(
name='adan',
opt_class=Adan,
description='Adaptive Nesterov Momentum Algorithm',
defaults={'no_prox': False},
has_betas=True,
num_betas=3
),
OptimInfo(
name='adanw',
opt_class=Adan,
description='Adaptive Nesterov Momentum with decoupled weight decay',
defaults={'no_prox': True},
has_betas=True,
num_betas=3
),
OptimInfo(
name='adahessian',
opt_class=Adahessian,
description='An Adaptive Second Order Optimizer',
has_betas=True,
second_order=True,
),
OptimInfo(
name='kron',
opt_class=Kron,
description='PSGD optimizer with Kronecker-factored preconditioner',
has_momentum=True,
),
OptimInfo(
name='kronw',
opt_class=Kron,
description='PSGD optimizer with Kronecker-factored preconditioner and decoupled weight decay',
has_momentum=True,
defaults={'decoupled_decay': True}
),
OptimInfo(
name='laprop',
opt_class=LaProp,
description='Separating Momentum and Adaptivity in Adam',
has_betas=True,
),
OptimInfo(
name='lion',
opt_class=Lion,
description='Evolved Sign Momentum optimizer for improved convergence',
has_eps=False,
has_betas=True
),
OptimInfo(
name='madgrad',
opt_class=MADGRAD,
description='Momentum-based Adaptive gradient method',
has_momentum=True
),
OptimInfo(
name='madgradw',
opt_class=MADGRAD,
description='MADGRAD with decoupled weight decay',
has_momentum=True,
defaults={'decoupled_decay': True}
),
OptimInfo(
name='mars',
opt_class=Mars,
description='Unleashing the Power of Variance Reduction for Training Large Models',
has_betas=True,
),
OptimInfo(
name='novograd',
opt_class=NvNovoGrad,
description='Normalized Adam with L2 norm gradient normalization',
has_betas=True
),
OptimInfo(
name='rmsprop',
opt_class=torch.optim.RMSprop,
description='torch.optim.RMSprop, Root Mean Square Propagation',
has_momentum=True,
defaults={'alpha': 0.9}
),
OptimInfo(
name='rmsproptf',
opt_class=RMSpropTF,
description='TensorFlow-style RMSprop implementation, Root Mean Square Propagation',
has_momentum=True,
defaults={'alpha': 0.9}
),
]
for opt in other_optimizers:
registry.register(opt)
registry.register_foreach_default('lion')
def _register_apex_optimizers(registry: OptimizerRegistry) -> None:
"""Register APEX optimizers (lazy import)"""
apex_optimizers = [
OptimInfo(
name='fusedsgd',
opt_class='apex.optimizers.FusedSGD',
description='NVIDIA APEX fused SGD implementation for faster training',
has_eps=False,
has_momentum=True,
defaults={'nesterov': True}
),
OptimInfo(
name='fusedadam',
opt_class='apex.optimizers.FusedAdam',
description='NVIDIA APEX fused Adam implementation',
has_betas=True,
defaults={'adam_w_mode': False}
),
OptimInfo(
name='fusedadamw',
opt_class='apex.optimizers.FusedAdam',
description='NVIDIA APEX fused AdamW implementation',
has_betas=True,
defaults={'adam_w_mode': True}
),
OptimInfo(
name='fusedlamb',
opt_class='apex.optimizers.FusedLAMB',
description='NVIDIA APEX fused LAMB implementation',
has_betas=True
),
OptimInfo(
name='fusednovograd',
opt_class='apex.optimizers.FusedNovoGrad',
description='NVIDIA APEX fused NovoGrad implementation',
has_betas=True,
defaults={'betas': (0.95, 0.98)}
),
]
for opt in apex_optimizers:
registry.register(opt)
def _register_bnb_optimizers(registry: OptimizerRegistry) -> None:
"""Register bitsandbytes optimizers (lazy import)"""
bnb_optimizers = [
OptimInfo(
name='bnbsgd',
opt_class='bitsandbytes.optim.SGD',
description='bitsandbytes SGD',
has_eps=False,
has_momentum=True,
defaults={'nesterov': True}
),
OptimInfo(
name='bnbsgd8bit',
opt_class='bitsandbytes.optim.SGD8bit',
description='bitsandbytes 8-bit SGD with dynamic quantization',
has_eps=False,
has_momentum=True,
defaults={'nesterov': True}
),
OptimInfo(
name='bnbadam',
opt_class='bitsandbytes.optim.Adam',
description='bitsandbytes Adam',
has_betas=True
),
OptimInfo(
name='bnbadam8bit',
opt_class='bitsandbytes.optim.Adam',
description='bitsandbytes 8-bit Adam with dynamic quantization',
has_betas=True
),
OptimInfo(
name='bnbadamw',
opt_class='bitsandbytes.optim.AdamW',
description='bitsandbytes AdamW',
has_betas=True
),
OptimInfo(
name='bnbadamw8bit',
opt_class='bitsandbytes.optim.AdamW',
description='bitsandbytes 8-bit AdamW with dynamic quantization',
has_betas=True
),
OptimInfo(
'bnblion',
'bitsandbytes.optim.Lion',
description='bitsandbytes Lion',
has_eps=False,
has_betas=True
),
OptimInfo(
'bnblion8bit',
'bitsandbytes.optim.Lion8bit',
description='bitsandbytes 8-bit Lion with dynamic quantization',
has_eps=False,
has_betas=True
),
OptimInfo(
'bnbademamix',
'bitsandbytes.optim.AdEMAMix',
description='bitsandbytes AdEMAMix',
has_betas=True,
num_betas=3,
),
OptimInfo(
'bnbademamix8bit',
'bitsandbytes.optim.AdEMAMix8bit',
description='bitsandbytes 8-bit AdEMAMix with dynamic quantization',
has_betas=True,
num_betas=3,
),
]
for opt in bnb_optimizers:
registry.register(opt)
default_registry = OptimizerRegistry()
def _register_default_optimizers() -> None:
"""Register all default optimizers to the global registry."""
# Register all optimizer groups
_register_sgd_variants(default_registry)
_register_adam_variants(default_registry)
_register_lamb_lars(default_registry)
_register_other_optimizers(default_registry)
_register_apex_optimizers(default_registry)
_register_bnb_optimizers(default_registry)
_register_cautious_optimizers(default_registry)
# Register aliases
default_registry.register_alias('nesterov', 'sgd')
default_registry.register_alias('nesterovw', 'sgdw')
# Initialize default registry
_register_default_optimizers()
# Public API
def list_optimizers(
filter: Union[str, List[str]] = '',
exclude_filters: Optional[List[str]] = None,
with_description: bool = False,
) -> List[Union[str, Tuple[str, str]]]:
"""List available optimizer names, optionally filtered.
List all registered optimizers, with optional filtering using wildcard patterns.
Optimizers can be filtered using include and exclude patterns, and can optionally
return descriptions with each optimizer name.
Args:
filter: Wildcard style filter string or list of filter strings
(e.g., 'adam*' for all Adam variants, or ['adam*', '*8bit'] for
Adam variants and 8-bit optimizers). Empty string means no filtering.
exclude_filters: Optional list of wildcard patterns to exclude. For example,
['*8bit', 'fused*'] would exclude 8-bit and fused implementations.
with_description: If True, returns tuples of (name, description) instead of
just names. Descriptions provide brief explanations of optimizer characteristics.
Returns:
If with_description is False:
List of optimizer names as strings (e.g., ['adam', 'adamw', ...])
If with_description is True:
List of tuples of (name, description) (e.g., [('adam', 'Adaptive Moment...'), ...])
Examples:
>>> list_optimizers()
['adam', 'adamw', 'sgd', ...]
>>> list_optimizers(['la*', 'nla*']) # List lamb & lars
['lamb', 'lambc', 'larc', 'lars', 'nlarc', 'nlars']
>>> list_optimizers('*adam*', exclude_filters=['bnb*', 'fused*']) # Exclude bnb & apex adam optimizers
['adam', 'adamax', 'adamp', 'adamw', 'nadam', 'nadamw', 'radam']
>>> list_optimizers(with_description=True) # Get descriptions
[('adabelief', 'Adapts learning rate based on gradient prediction error'),
('adadelta', 'torch.optim Adadelta, Adapts learning rates based on running windows of gradients'),
('adafactor', 'Memory-efficient implementation of Adam with factored gradients'),
...]
"""
return default_registry.list_optimizers(filter, exclude_filters, with_description)
def get_optimizer_info(name: str) -> OptimInfo:
"""Get the OptimInfo for an optimizer.
Args:
name: Name of the optimizer
Returns:
OptimInfo configuration
Raises:
ValueError: If optimizer is not found
"""
return default_registry.get_optimizer_info(name)
def get_optimizer_class(
name: str,
bind_defaults: bool = True,
) -> Union[OptimType, OptimizerCallable]:
"""Get optimizer class by name with option to bind default arguments.
Retrieves the optimizer class or a partial function with default arguments bound.
This allows direct instantiation of optimizers with their default configurations
without going through the full factory.
Args:
name: Name of the optimizer to retrieve (e.g., 'adam', 'sgd')
bind_defaults: If True, returns a partial function with default arguments from OptimInfo bound.
If False, returns the raw optimizer class.
Returns:
If bind_defaults is False:
The optimizer class (e.g., torch.optim.Adam)
If bind_defaults is True:
A partial function with default arguments bound
Raises:
ValueError: If optimizer name is not found in registry
Examples:
>>> # Get SGD with nesterov momentum default
>>> SGD = get_optimizer_class('sgd') # nesterov=True bound
>>> opt = SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> # Get raw optimizer class
>>> SGD = get_optimizer_class('sgd')
>>> opt = SGD(model.parameters(), lr=1e-3, momentum=0.9)
"""
return default_registry.get_optimizer_class(name, bind_defaults=bind_defaults)
def create_optimizer_v2(
model_or_params: Union[nn.Module, ParamsT],
opt: str = 'sgd',
lr: Optional[float] = None,
weight_decay: float = 0.,
momentum: float = 0.9,
foreach: Optional[bool] = None,
filter_bias_and_bn: bool = True,
layer_decay: Optional[float] = None,
param_group_fn: Optional[Callable[[nn.Module], ParamsT]] = None,
**kwargs: Any,
) -> torch.optim.Optimizer:
"""Create an optimizer instance via timm registry.
Creates and configures an optimizer with appropriate parameter groups and settings.
Supports automatic parameter group creation for weight decay and layer-wise learning
rates, as well as custom parameter grouping.
Args:
model_or_params: A PyTorch model or an iterable of parameters/parameter groups.
If a model is provided, parameters will be automatically extracted and grouped
based on the other arguments.
opt: Name of the optimizer to create (e.g., 'adam', 'adamw', 'sgd').
Use list_optimizers() to see available options.
lr: Learning rate. If None, will use the optimizer's default.
weight_decay: Weight decay factor. Will be used to create param groups if model_or_params is a model.
momentum: Momentum factor for optimizers that support it. Only used if the
chosen optimizer accepts a momentum parameter.
foreach: Enable/disable foreach (multi-tensor) implementation if available.
If None, will use optimizer-specific defaults.
filter_bias_and_bn: If True, bias, norm layer parameters (all 1d params) will not have
weight decay applied. Only used when model_or_params is a model and
weight_decay > 0.
layer_decay: Optional layer-wise learning rate decay factor. If provided,
learning rates will be scaled by layer_decay^(max_depth - layer_depth).
Only used when model_or_params is a model.
param_group_fn: Optional function to create custom parameter groups.
If provided, other parameter grouping options will be ignored.
**kwargs: Additional optimizer-specific arguments (e.g., betas for Adam).
Returns:
Configured optimizer instance.
Examples:
>>> # Basic usage with a model
>>> optimizer = create_optimizer_v2(model, 'adamw', lr=1e-3)
>>> # SGD with momentum and weight decay
>>> optimizer = create_optimizer_v2(
... model, 'sgd', lr=0.1, momentum=0.9, weight_decay=1e-4
... )
>>> # Adam with layer-wise learning rate decay
>>> optimizer = create_optimizer_v2(
... model, 'adam', lr=1e-3, layer_decay=0.7
... )
>>> # Custom parameter groups
>>> def group_fn(model):
... return [
... {'params': model.backbone.parameters(), 'lr': 1e-4},
... {'params': model.head.parameters(), 'lr': 1e-3}
... ]
>>> optimizer = create_optimizer_v2(
... model, 'sgd', param_group_fn=group_fn
... )
Note:
Parameter group handling precedence:
1. If param_group_fn is provided, it will be used exclusively
2. If layer_decay is provided, layer-wise groups will be created
3. If weight_decay > 0 and filter_bias_and_bn is True, weight decay groups will be created
4. Otherwise, all parameters will be in a single group
"""
return default_registry.create_optimizer(
model_or_params,
opt=opt,
lr=lr,
weight_decay=weight_decay,
momentum=momentum,
foreach=foreach,
weight_decay_exclude_1d=filter_bias_and_bn,
layer_decay=layer_decay,
param_group_fn=param_group_fn,
**kwargs
)
def optimizer_kwargs(cfg):
""" cfg/argparse to kwargs helper
Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn.
"""
kwargs = dict(
opt=cfg.opt,
lr=cfg.lr,
weight_decay=cfg.weight_decay,
momentum=cfg.momentum,
)
if getattr(cfg, 'opt_eps', None) is not None:
kwargs['eps'] = cfg.opt_eps
if getattr(cfg, 'opt_betas', None) is not None:
kwargs['betas'] = cfg.opt_betas
if getattr(cfg, 'layer_decay', None) is not None:
kwargs['layer_decay'] = cfg.layer_decay
if getattr(cfg, 'opt_args', None) is not None:
kwargs.update(cfg.opt_args)
if getattr(cfg, 'opt_foreach', None) is not None:
kwargs['foreach'] = cfg.opt_foreach
return kwargs
def create_optimizer(
args,
model: Union[nn.Module, ParamsT],
filter_bias_and_bn: bool = True,
) -> torch.optim.Optimizer:
""" Legacy optimizer factory for backwards compatibility.
NOTE: Use create_optimizer_v2 for new code.
"""
return create_optimizer_v2(
model,
**optimizer_kwargs(cfg=args),
filter_bias_and_bn=filter_bias_and_bn,
)
| pytorch-image-models/timm/optim/_optim_factory.py/0 | {
"file_path": "pytorch-image-models/timm/optim/_optim_factory.py",
"repo_id": "pytorch-image-models",
"token_count": 18438
} |
""" Lookahead Optimizer Wrapper.
Implementation modified from: https://github.com/alphadl/lookahead.pytorch
Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610
Hacked together by / Copyright 2020 Ross Wightman
"""
from collections import OrderedDict
from typing import Callable, Dict
import torch
from torch.optim.optimizer import Optimizer
from collections import defaultdict
class Lookahead(Optimizer):
def __init__(self, base_optimizer, alpha=0.5, k=6):
# NOTE super().__init__() not called on purpose
self._optimizer_step_pre_hooks: Dict[int, Callable] = OrderedDict()
self._optimizer_step_post_hooks: Dict[int, Callable] = OrderedDict()
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0)
self._base_optimizer = base_optimizer
self.param_groups = base_optimizer.param_groups
self.defaults = base_optimizer.defaults
self.defaults.update(defaults)
self.state = defaultdict(dict)
# manually add our defaults to the param groups
for name, default in defaults.items():
for group in self._base_optimizer.param_groups:
group.setdefault(name, default)
@torch.no_grad()
def update_slow(self, group):
for fast_p in group["params"]:
if fast_p.grad is None:
continue
param_state = self._base_optimizer.state[fast_p]
if 'lookahead_slow_buff' not in param_state:
param_state['lookahead_slow_buff'] = torch.empty_like(fast_p)
param_state['lookahead_slow_buff'].copy_(fast_p)
slow = param_state['lookahead_slow_buff']
slow.add_(fast_p - slow, alpha=group['lookahead_alpha'])
fast_p.copy_(slow)
def sync_lookahead(self):
for group in self._base_optimizer.param_groups:
self.update_slow(group)
@torch.no_grad()
def step(self, closure=None):
loss = self._base_optimizer.step(closure)
for group in self._base_optimizer.param_groups:
group['lookahead_step'] += 1
if group['lookahead_step'] % group['lookahead_k'] == 0:
self.update_slow(group)
return loss
def state_dict(self):
return self._base_optimizer.state_dict()
def load_state_dict(self, state_dict):
self._base_optimizer.load_state_dict(state_dict)
self.param_groups = self._base_optimizer.param_groups
| pytorch-image-models/timm/optim/lookahead.py/0 | {
"file_path": "pytorch-image-models/timm/optim/lookahead.py",
"repo_id": "pytorch-image-models",
"token_count": 1134
} |
""" Misc utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import argparse
import ast
import re
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def add_bool_arg(parser, name, default=False, help=''):
dest_name = name.replace('-', '_')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=dest_name, action='store_true', help=help)
group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help)
parser.set_defaults(**{dest_name: default})
class ParseKwargs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
kw = {}
for value in values:
key, value = value.split('=')
try:
kw[key] = ast.literal_eval(value)
except ValueError:
kw[key] = str(value) # fallback to string (avoid need to escape on command line)
setattr(namespace, self.dest, kw)
| pytorch-image-models/timm/utils/misc.py/0 | {
"file_path": "pytorch-image-models/timm/utils/misc.py",
"repo_id": "pytorch-image-models",
"token_count": 451
} |
<!---
Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Contribute to smolagents
Everyone is welcome to contribute, and we value everybody's contribution. Code
contributions are not the only way to help the community. Answering questions, helping
others, and improving the documentation are also immensely valuable.
It also helps us if you spread the word! Reference the library in blog posts
about the awesome projects it made possible, shout out on Twitter every time it has
helped you, or simply ⭐️ the repository to say thank you.
However you choose to contribute, please be mindful and respect our
[code of conduct](https://github.com/huggingface/smolagents/blob/main/CODE_OF_CONDUCT.md).
**This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md).**
## Ways to contribute
There are several ways you can contribute to smolagents.
* Fix outstanding issues with the existing code.
* Submit issues related to bugs or desired new features.
* Contribute to the examples or to the documentation.
> All contributions are equally valuable to the community. 🥰
## Fixing outstanding issues
If you notice an issue with the existing code and have a fix in mind, feel free to [start contributing](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) and open
a Pull Request!
## Submitting a bug-related issue or feature request
Do your best to follow these guidelines when submitting a bug-related issue or a feature
request. It will make it easier for us to come back to you quickly and with good
feedback.
### Did you find a bug?
The smolagents library is robust and reliable thanks to users who report the problems they encounter.
Before you report an issue, we would really appreciate it if you could **make sure the bug was not
already reported** (use the search bar on GitHub under Issues). Your issue should also be related to bugs in the
library itself, and not your code.
Once you've confirmed the bug hasn't already been reported, please include the following information in your issue so
we can quickly resolve it:
* Your **OS type and version**, as well as your environment versions (versions of rust, python, and dependencies).
* A short, self-contained, code snippet that allows us to reproduce the bug.
* The *full* traceback if an exception is raised.
* Attach any other additional information, like screenshots, you think may help.
### Do you want a new feature?
If there is a new feature you'd like to see in smolagents, please open an issue and describe:
1. What is the *motivation* behind this feature? Is it related to a problem or frustration with the library? Is it
a feature related to something you need for a project? Is it something you worked on and think it could benefit
the community?
Whatever it is, we'd love to hear about it!
2. Describe your requested feature in as much detail as possible. The more you can tell us about it, the better
we'll be able to help you.
3. Provide a *code snippet* that demonstrates the feature's usage.
4. If the feature is related to a paper, please include a link.
If your issue is well written we're already 80% of the way there by the time you create it.
## Do you want to add documentation?
We're always looking for improvements to the documentation that make it more clear and accurate. Please let us know
how the documentation can be improved such as typos and any content that is missing, unclear or inaccurate. We'll be
happy to make the changes or help you make a contribution if you're interested!
## I want to become a maintainer of the project. How do I get there?
smolagents is a project led and managed by Hugging Face. We are more than
happy to have motivated individuals from other organizations join us as maintainers with the goal of helping smolagents
make a dent in the world of Agents.
If you are such an individual (or organization), please reach out to us and let's collaborate. | smolagents/CONTRIBUTING.md/0 | {
"file_path": "smolagents/CONTRIBUTING.md",
"repo_id": "smolagents",
"token_count": 1156
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Agents
<Tip warning={true}>
Smolagents is an experimental API which is subject to change at any time. Results returned by the agents
can vary as the APIs or underlying models are prone to change.
</Tip>
To learn more about agents and tools make sure to read the [introductory guide](../index). This page
contains the API docs for the underlying classes.
## Agents
Our agents inherit from [`MultiStepAgent`], which means they can act in multiple steps, each step consisting of one thought, then one tool call and execution. Read more in [this conceptual guide](../conceptual_guides/react).
We provide two types of agents, based on the main [`Agent`] class.
- [`CodeAgent`] is the default agent, it writes its tool calls in Python code.
- [`ToolCallingAgent`] writes its tool calls in JSON.
Both require arguments `model` and list of tools `tools` at initialization.
### Classes of agents
[[autodoc]] MultiStepAgent
[[autodoc]] CodeAgent
[[autodoc]] ToolCallingAgent
### ManagedAgent
_This class is deprecated since 1.8.0: now you simply need to pass attributes `name` and `description` to a normal agent to make it callable by a manager agent._
### stream_to_gradio
[[autodoc]] stream_to_gradio
### GradioUI
> [!TIP]
> You must have `gradio` installed to use the UI. Please run `pip install smolagents[gradio]` if it's not the case.
[[autodoc]] GradioUI
| smolagents/docs/source/en/reference/agents.md/0 | {
"file_path": "smolagents/docs/source/en/reference/agents.md",
"repo_id": "smolagents",
"token_count": 573
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Agents
<Tip warning={true}>
Smolagents एक experimental API है जो किसी भी समय बदल सकता है। एजेंट्स द्वारा लौटाए गए परिणाम भिन्न हो सकते हैं क्योंकि APIs या underlying मॉडल बदलने की संभावना रखते हैं।
</Tip>
Agents और tools के बारे में अधिक जानने के लिए [introductory guide](../index) पढ़ना सुनिश्चित करें।
यह पेज underlying क्लासेज के लिए API docs को शामिल करता है।
## Agents
हमारे एजेंट्स [`MultiStepAgent`] से इनहेरिट करते हैं, जिसका अर्थ है कि वे कई चरणों में कार्य कर सकते हैं, प्रत्येक चरण में एक विचार, फिर एक टूल कॉल और एक्जीक्यूशन शामिल होता है। [इस कॉन्सेप्चुअल गाइड](../conceptual_guides/react) में अधिक पढ़ें।
हम मुख्य [`Agent`] क्लास पर आधारित दो प्रकार के एजेंट्स प्रदान करते हैं।
- [`CodeAgent`] डिफ़ॉल्ट एजेंट है, यह अपने टूल कॉल्स को Python कोड में लिखता है।
- [`ToolCallingAgent`] अपने टूल कॉल्स को JSON में लिखता है।
दोनों को इनिशियलाइजेशन पर `model` और टूल्स की सूची `tools` आर्गुमेंट्स की आवश्यकता होती है।
### Agents की क्लासेज
[[autodoc]] MultiStepAgent
[[autodoc]] CodeAgent
[[autodoc]] ToolCallingAgent
### ManagedAgent
[[autodoc]] ManagedAgent
### stream_to_gradio
[[autodoc]] stream_to_gradio
### GradioUI
[[autodoc]] GradioUI
## मॉडल्स
आप स्वतंत्र रूप से अपने स्वयं के मॉडल बना सकते हैं और उनका उपयोग कर सकते हैं।
आप अपने एजेंट के लिए कोई भी `model` कॉल करने योग्य उपयोग कर सकते हैं, जब तक कि:
1. यह अपने इनपुट `messages` के लिए [messages format](./chat_templating) (`List[Dict[str, str]]`) का पालन करता है, और यह एक `str` लौटाता है।
2. यह आर्गुमेंट `stop_sequences` में पास किए गए सीक्वेंस से *पहले* आउटपुट जनरेट करना बंद कर देता है।
अपने LLM को परिभाषित करने के लिए, आप एक `custom_model` मेथड बना सकते हैं जो [messages](./chat_templating) की एक सूची स्वीकार करता है और टेक्स्ट युक्त .content विशेषता वाला एक ऑब्जेक्ट लौटाता है। इस कॉलेबल को एक `stop_sequences` आर्गुमेंट भी स्वीकार करने की आवश्यकता होती है जो बताता है कि कब जनरेट करना और बंद करना है।
```python
from huggingface_hub import login, InferenceClient
login("<YOUR_HUGGINGFACEHUB_API_TOKEN>")
model_id = "meta-llama/Llama-3.3-70B-Instruct"
client = InferenceClient(model=model_id)
def custom_model(messages, stop_sequences=["Task"]):
response = client.chat_completion(messages, stop=stop_sequences, max_tokens=1000)
answer = response.choices[0].message
return answer
```
इसके अतिरिक्त, `custom_model` एक `grammar` आर्गुमेंट भी ले सकता है। जिस स्थिति में आप एजेंट इनिशियलाइजेशन पर एक `grammar` निर्दिष्ट करते हैं, यह आर्गुमेंट मॉडल के कॉल्स को आपके द्वारा इनिशियलाइजेशन पर परिभाषित `grammar` के साथ पास किया जाएगा, ताकि [constrained generation](https://huggingface.co/docs/text-generation-inference/conceptual/guidance) की अनुमति मिल सके जिससे उचित-फॉर्मेटेड एजेंट आउटपुट को फोर्स किया जा सके।
### TransformersModel
सुविधा के लिए, हमने एक `TransformersModel` जोड़ा है जो इनिशियलाइजेशन पर दिए गए model_id के लिए एक लोकल `transformers` पाइपलाइन बनाकर ऊपर के बिंदुओं को लागू करता है।
```python
from smolagents import TransformersModel
model = TransformersModel(model_id="HuggingFaceTB/SmolLM-135M-Instruct")
print(model([{"role": "user", "content": "Ok!"}], stop_sequences=["great"]))
```
```text
>>> What a
```
[[autodoc]] TransformersModel
### HfApiModel
`HfApiModel` LLM के एक्जीक्यूशन के लिए [HF Inference API](https://huggingface.co/docs/api-inference/index) क्लाइंट को रैप करता है।
```python
from smolagents import HfApiModel
messages = [
{"role": "user", "content": "Hello, how are you?"},
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
{"role": "user", "content": "No need to help, take it easy."},
]
model = HfApiModel()
print(model(messages))
```
```text
>>> Of course! If you change your mind, feel free to reach out. Take care!
```
[[autodoc]] HfApiModel
### LiteLLMModel
`LiteLLMModel` विभिन्न प्रदाताओं से 100+ LLMs को सपोर्ट करने के लिए [LiteLLM](https://www.litellm.ai/) का लाभ उठाता है।
आप मॉडल इनिशियलाइजेशन पर kwargs पास कर सकते हैं जो तब मॉडल का उपयोग करते समय प्रयोग किए जाएंगे, उदाहरण के लिए नीचे हम `temperature` पास करते हैं।
```python
from smolagents import LiteLLMModel
messages = [
{"role": "user", "content": "Hello, how are you?"},
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
{"role": "user", "content": "No need to help, take it easy."},
]
model = LiteLLMModel("anthropic/claude-3-5-sonnet-latest", temperature=0.2, max_tokens=10)
print(model(messages))
```
[[autodoc]] LiteLLMModel
### OpenAiServerModel
यह क्लास आपको किसी भी OpenAIServer कम्पैटिबल मॉडल को कॉल करने देती है।
यहाँ बताया गया है कि आप इसे कैसे सेट कर सकते हैं (आप दूसरे सर्वर को पॉइंट करने के लिए `api_base` url को कस्टमाइज़ कर सकते हैं):
```py
import os
from smolagents import OpenAIServerModel
model = OpenAIServerModel(
model_id="gpt-4o",
api_base="https://api.openai.com/v1",
api_key=os.environ["OPENAI_API_KEY"],
)
``` | smolagents/docs/source/hi/reference/agents.md/0 | {
"file_path": "smolagents/docs/source/hi/reference/agents.md",
"repo_id": "smolagents",
"token_count": 4332
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Tools
<Tip warning={true}>
Smolagents is an experimental API which is subject to change at any time. Results returned by the agents
can vary as the APIs or underlying models are prone to change.
</Tip>
To learn more about agents and tools make sure to read the [introductory guide](../index). This page
contains the API docs for the underlying classes.
## Tools
### load_tool
[[autodoc]] load_tool
### tool
[[autodoc]] tool
### Tool
[[autodoc]] Tool
### launch_gradio_demo
[[autodoc]] launch_gradio_demo
## Default tools
### PythonInterpreterTool
[[autodoc]] PythonInterpreterTool
### DuckDuckGoSearchTool
[[autodoc]] DuckDuckGoSearchTool
### VisitWebpageTool
[[autodoc]] VisitWebpageTool
## ToolCollection
[[autodoc]] ToolCollection
## Agent Types
Agents can handle any type of object in-between tools; tools, being completely multimodal, can accept and return
text, image, audio, video, among other types. In order to increase compatibility between tools, as well as to
correctly render these returns in ipython (jupyter, colab, ipython notebooks, ...), we implement wrapper classes
around these types.
The wrapped objects should continue behaving as initially; a text object should still behave as a string, an image
object should still behave as a `PIL.Image`.
These types have three specific purposes:
- Calling `to_raw` on the type should return the underlying object
- Calling `to_string` on the type should return the object as a string: that can be the string in case of an `AgentText`
but will be the path of the serialized version of the object in other instances
- Displaying it in an ipython kernel should display the object correctly
### AgentText
[[autodoc]] smolagents.agent_types.AgentText
### AgentImage
[[autodoc]] smolagents.agent_types.AgentImage
### AgentAudio
[[autodoc]] smolagents.agent_types.AgentAudio
| smolagents/docs/source/zh/reference/tools.md/0 | {
"file_path": "smolagents/docs/source/zh/reference/tools.md",
"repo_id": "smolagents",
"token_count": 704
} |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.resources
import inspect
import re
import textwrap
import time
from collections import deque
from logging import getLogger
from typing import Any, Callable, Dict, Generator, List, Optional, Set, Tuple, Union
import yaml
from jinja2 import StrictUndefined, Template
from rich.console import Group
from rich.panel import Panel
from rich.rule import Rule
from rich.text import Text
from smolagents.agent_types import AgentAudio, AgentImage, handle_agent_output_types
from smolagents.memory import ActionStep, AgentMemory, PlanningStep, SystemPromptStep, TaskStep, ToolCall
from smolagents.monitoring import (
YELLOW_HEX,
AgentLogger,
LogLevel,
)
from smolagents.utils import (
AgentError,
AgentExecutionError,
AgentGenerationError,
AgentMaxStepsError,
AgentParsingError,
parse_code_blobs,
parse_json_tool_call,
truncate_content,
)
from .agent_types import AgentType
from .default_tools import TOOL_MAPPING, FinalAnswerTool
from .e2b_executor import E2BExecutor
from .local_python_executor import (
BASE_BUILTIN_MODULES,
LocalPythonInterpreter,
fix_final_answer_code,
)
from .models import (
ChatMessage,
MessageRole,
)
from .monitoring import Monitor
from .tools import Tool
logger = getLogger(__name__)
def get_variable_names(self, template: str) -> Set[str]:
pattern = re.compile(r"\{\{([^{}]+)\}\}")
return {match.group(1).strip() for match in pattern.finditer(template)}
def populate_template(template: str, variables: Dict[str, Any]) -> str:
compiled_template = Template(template, undefined=StrictUndefined)
try:
return compiled_template.render(**variables)
except Exception as e:
raise Exception(f"Error during jinja template rendering: {type(e).__name__}: {e}")
class MultiStepAgent:
"""
Agent class that solves the given task step by step, using the ReAct framework:
While the objective is not reached, the agent will perform a cycle of action (given by the LLM) and observation (obtained from the environment).
Args:
tools (`list[Tool]`): [`Tool`]s that the agent can use.
model (`Callable[[list[dict[str, str]]], ChatMessage]`): Model that will generate the agent's actions.
prompt_templates (`dict`, *optional*): Prompt templates.
max_steps (`int`, default `6`): Maximum number of steps the agent can take to solve the task.
tool_parser (`Callable`, *optional*): Function used to parse the tool calls from the LLM output.
add_base_tools (`bool`, default `False`): Whether to add the base tools to the agent's tools.
verbosity_level (`LogLevel`, default `LogLevel.INFO`): Level of verbosity of the agent's logs.
grammar (`dict[str, str]`, *optional*): Grammar used to parse the LLM output.
managed_agents (`list`, *optional*): Managed agents that the agent can call.
step_callbacks (`list[Callable]`, *optional*): Callbacks that will be called at each step.
planning_interval (`int`, *optional*): Interval at which the agent will run a planning step.
name (`str`, *optional*): Necessary for a managed agent only - the name by which this agent can be called.
description (`str`, *optional*): Necessary for a managed agent only - the description of this agent.
provide_run_summary (`bool`, *optional*): Whether to provide a run summary when called as a managed agent.
final_answer_checks (`list`, *optional*): List of Callables to run before returning a final answer for checking validity.
"""
def __init__(
self,
tools: List[Tool],
model: Callable[[List[Dict[str, str]]], ChatMessage],
prompt_templates: Optional[dict] = None,
max_steps: int = 6,
tool_parser: Optional[Callable] = None,
add_base_tools: bool = False,
verbosity_level: LogLevel = LogLevel.INFO,
grammar: Optional[Dict[str, str]] = None,
managed_agents: Optional[List] = None,
step_callbacks: Optional[List[Callable]] = None,
planning_interval: Optional[int] = None,
name: Optional[str] = None,
description: Optional[str] = None,
provide_run_summary: bool = False,
final_answer_checks: Optional[List[Callable]] = None,
):
if tool_parser is None:
tool_parser = parse_json_tool_call
self.agent_name = self.__class__.__name__
self.model = model
self.prompt_templates = prompt_templates or {}
self.max_steps = max_steps
self.step_number: int = 0
self.tool_parser = tool_parser
self.grammar = grammar
self.planning_interval = planning_interval
self.state = {}
self.name = name
self.description = description
self.provide_run_summary = provide_run_summary
self.managed_agents = {}
if managed_agents is not None:
for managed_agent in managed_agents:
assert managed_agent.name and managed_agent.description, (
"All managed agents need both a name and a description!"
)
self.managed_agents = {agent.name: agent for agent in managed_agents}
for tool in tools:
assert isinstance(tool, Tool), f"This element is not of class Tool: {str(tool)}"
self.tools = {tool.name: tool for tool in tools}
if add_base_tools:
for tool_name, tool_class in TOOL_MAPPING.items():
if tool_name != "python_interpreter" or self.__class__.__name__ == "ToolCallingAgent":
self.tools[tool_name] = tool_class()
self.tools["final_answer"] = FinalAnswerTool()
self.system_prompt = self.initialize_system_prompt()
self.input_messages = None
self.task = None
self.memory = AgentMemory(self.system_prompt)
self.logger = AgentLogger(level=verbosity_level)
self.monitor = Monitor(self.model, self.logger)
self.step_callbacks = step_callbacks if step_callbacks is not None else []
self.step_callbacks.append(self.monitor.update_metrics)
self.final_answer_checks = final_answer_checks
@property
def logs(self):
logger.warning(
"The 'logs' attribute is deprecated and will soon be removed. Please use 'self.memory.steps' instead."
)
return [self.memory.system_prompt] + self.memory.steps
def initialize_system_prompt(self):
"""To be implemented in child classes"""
pass
def write_memory_to_messages(
self,
summary_mode: Optional[bool] = False,
) -> List[Dict[str, str]]:
"""
Reads past llm_outputs, actions, and observations or errors from the memory into a series of messages
that can be used as input to the LLM. Adds a number of keywords (such as PLAN, error, etc) to help
the LLM.
"""
messages = self.memory.system_prompt.to_messages(summary_mode=summary_mode)
for memory_step in self.memory.steps:
messages.extend(memory_step.to_messages(summary_mode=summary_mode))
return messages
def visualize(self):
"""Creates a rich tree visualization of the agent's structure."""
self.logger.visualize_agent_tree(self)
def extract_action(self, model_output: str, split_token: str) -> Tuple[str, str]:
"""
Parse action from the LLM output
Args:
model_output (`str`): Output of the LLM
split_token (`str`): Separator for the action. Should match the example in the system prompt.
"""
try:
split = model_output.split(split_token)
rationale, action = (
split[-2],
split[-1],
) # NOTE: using indexes starting from the end solves for when you have more than one split_token in the output
except Exception:
raise AgentParsingError(
f"No '{split_token}' token provided in your output.\nYour output:\n{model_output}\n. Be sure to include an action, prefaced with '{split_token}'!",
self.logger,
)
return rationale.strip(), action.strip()
def provide_final_answer(self, task: str, images: Optional[list[str]]) -> str:
"""
Provide the final answer to the task, based on the logs of the agent's interactions.
Args:
task (`str`): Task to perform.
images (`list[str]`, *optional*): Paths to image(s).
Returns:
`str`: Final answer to the task.
"""
messages = [{"role": MessageRole.SYSTEM, "content": []}]
if images:
messages[0]["content"] = [
{
"type": "text",
"text": "An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:",
}
]
messages[0]["content"].append({"type": "image"})
messages += self.write_memory_to_messages()[1:]
messages += [
{
"role": MessageRole.USER,
"content": [
{
"type": "text",
"text": f"Based on the above, please provide an answer to the following user request:\n{task}",
}
],
}
]
else:
messages[0]["content"] = [
{
"type": "text",
"text": "An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:",
}
]
messages += self.write_memory_to_messages()[1:]
messages += [
{
"role": MessageRole.USER,
"content": [
{
"type": "text",
"text": f"Based on the above, please provide an answer to the following user request:\n{task}",
}
],
}
]
try:
chat_message: ChatMessage = self.model(messages)
return chat_message.content
except Exception as e:
return f"Error in generating final LLM output:\n{e}"
def execute_tool_call(self, tool_name: str, arguments: Union[Dict[str, str], str]) -> Any:
"""
Execute tool with the provided input and returns the result.
This method replaces arguments with the actual values from the state if they refer to state variables.
Args:
tool_name (`str`): Name of the Tool to execute (should be one from self.tools).
arguments (Dict[str, str]): Arguments passed to the Tool.
"""
available_tools = {**self.tools, **self.managed_agents}
if tool_name not in available_tools:
error_msg = f"Unknown tool {tool_name}, should be instead one of {list(available_tools.keys())}."
raise AgentExecutionError(error_msg, self.logger)
try:
if isinstance(arguments, str):
if tool_name in self.managed_agents:
observation = available_tools[tool_name].__call__(arguments)
else:
observation = available_tools[tool_name].__call__(arguments, sanitize_inputs_outputs=True)
elif isinstance(arguments, dict):
for key, value in arguments.items():
if isinstance(value, str) and value in self.state:
arguments[key] = self.state[value]
if tool_name in self.managed_agents:
observation = available_tools[tool_name].__call__(**arguments)
else:
observation = available_tools[tool_name].__call__(**arguments, sanitize_inputs_outputs=True)
else:
error_msg = f"Arguments passed to tool should be a dict or string: got a {type(arguments)}."
raise AgentExecutionError(error_msg, self.logger)
return observation
except Exception as e:
if tool_name in self.tools:
tool = self.tools[tool_name]
error_msg = (
f"Error whene executing tool {tool_name} with arguments {arguments}: {type(e).__name__}: {e}\nYou should only use this tool with a correct input.\n"
f"As a reminder, this tool's description is the following: '{tool.description}'.\nIt takes inputs: {tool.inputs} and returns output type {tool.output_type}"
)
raise AgentExecutionError(error_msg, self.logger)
elif tool_name in self.managed_agents:
error_msg = (
f"Error in calling team member: {e}\nYou should only ask this team member with a correct request.\n"
f"As a reminder, this team member's description is the following:\n{available_tools[tool_name]}"
)
raise AgentExecutionError(error_msg, self.logger)
def step(self, memory_step: ActionStep) -> Union[None, Any]:
"""To be implemented in children classes. Should return either None if the step is not final."""
pass
def run(
self,
task: str,
stream: bool = False,
reset: bool = True,
images: Optional[List[str]] = None,
additional_args: Optional[Dict] = None,
):
"""
Run the agent for the given task.
Args:
task (`str`): Task to perform.
stream (`bool`): Whether to run in a streaming way.
reset (`bool`): Whether to reset the conversation or keep it going from previous run.
images (`list[str]`, *optional*): Paths to image(s).
additional_args (`dict`): Any other variables that you want to pass to the agent run, for instance images or dataframes. Give them clear names!
Example:
```py
from smolagents import CodeAgent
agent = CodeAgent(tools=[])
agent.run("What is the result of 2 power 3.7384?")
```
"""
self.task = task
if additional_args is not None:
self.state.update(additional_args)
self.task += f"""
You have been provided with these additional arguments, that you can access using the keys as variables in your python code:
{str(additional_args)}."""
self.system_prompt = self.initialize_system_prompt()
self.memory.system_prompt = SystemPromptStep(system_prompt=self.system_prompt)
if reset:
self.memory.reset()
self.monitor.reset()
self.logger.log_task(
content=self.task.strip(),
subtitle=f"{type(self.model).__name__} - {(self.model.model_id if hasattr(self.model, 'model_id') else '')}",
level=LogLevel.INFO,
title=self.name if hasattr(self, "name") else None,
)
self.memory.steps.append(TaskStep(task=self.task, task_images=images))
if stream:
# The steps are returned as they are executed through a generator to iterate on.
return self._run(task=self.task, images=images)
# Outputs are returned only at the end as a string. We only look at the last step
return deque(self._run(task=self.task, images=images), maxlen=1)[0]
def _run(self, task: str, images: List[str] | None = None) -> Generator[ActionStep | AgentType, None, None]:
"""
Run the agent in streaming mode and returns a generator of all the steps.
Args:
task (`str`): Task to perform.
images (`list[str]`): Paths to image(s).
"""
final_answer = None
self.step_number = 1
while final_answer is None and self.step_number <= self.max_steps:
step_start_time = time.time()
memory_step = ActionStep(
step_number=self.step_number,
start_time=step_start_time,
observations_images=images,
)
try:
if self.planning_interval is not None and self.step_number % self.planning_interval == 1:
self.planning_step(
task,
is_first_step=(self.step_number == 1),
step=self.step_number,
)
self.logger.log_rule(f"Step {self.step_number}", level=LogLevel.INFO)
# Run one step!
final_answer = self.step(memory_step)
if final_answer is not None and self.final_answer_checks is not None:
for check_function in self.final_answer_checks:
try:
assert check_function(final_answer, self.memory)
except Exception as e:
final_answer = None
raise AgentError(f"Check {check_function.__name__} failed with error: {e}", self.logger)
except AgentError as e:
memory_step.error = e
finally:
memory_step.end_time = time.time()
memory_step.duration = memory_step.end_time - step_start_time
self.memory.steps.append(memory_step)
for callback in self.step_callbacks:
# For compatibility with old callbacks that don't take the agent as an argument
if len(inspect.signature(callback).parameters) == 1:
callback(memory_step)
else:
callback(memory_step, agent=self)
self.step_number += 1
yield memory_step
if final_answer is None and self.step_number == self.max_steps + 1:
error_message = "Reached max steps."
final_answer = self.provide_final_answer(task, images)
final_memory_step = ActionStep(
step_number=self.step_number, error=AgentMaxStepsError(error_message, self.logger)
)
final_memory_step.action_output = final_answer
final_memory_step.end_time = time.time()
final_memory_step.duration = memory_step.end_time - step_start_time
self.memory.steps.append(final_memory_step)
for callback in self.step_callbacks:
# For compatibility with old callbacks that don't take the agent as an argument
if len(inspect.signature(callback).parameters) == 1:
callback(final_memory_step)
else:
callback(final_memory_step, agent=self)
yield final_memory_step
yield handle_agent_output_types(final_answer)
def planning_step(self, task, is_first_step: bool, step: int) -> None:
"""
Used periodically by the agent to plan the next steps to reach the objective.
Args:
task (`str`): Task to perform.
is_first_step (`bool`): If this step is not the first one, the plan should be an update over a previous plan.
step (`int`): The number of the current step, used as an indication for the LLM.
"""
if is_first_step:
message_prompt_facts = {
"role": MessageRole.SYSTEM,
"content": [{"type": "text", "text": self.prompt_templates["planning"]["initial_facts"]}],
}
input_messages = [message_prompt_facts]
chat_message_facts: ChatMessage = self.model(input_messages)
answer_facts = chat_message_facts.content
message_prompt_plan = {
"role": MessageRole.USER,
"content": [
{
"type": "text",
"text": populate_template(
self.prompt_templates["planning"]["initial_plan"],
variables={
"task": task,
"tools": self.tools,
"managed_agents": self.managed_agents,
"answer_facts": answer_facts,
},
),
}
],
}
chat_message_plan: ChatMessage = self.model(
[message_prompt_plan],
stop_sequences=["<end_plan>"],
)
answer_plan = chat_message_plan.content
final_plan_redaction = f"""Here is the plan of action that I will follow to solve the task:
```
{answer_plan}
```"""
final_facts_redaction = f"""Here are the facts that I know so far:
```
{answer_facts}
```""".strip()
self.memory.steps.append(
PlanningStep(
model_input_messages=input_messages,
plan=final_plan_redaction,
facts=final_facts_redaction,
model_output_message_plan=chat_message_plan,
model_output_message_facts=chat_message_facts,
)
)
self.logger.log(
Rule("[bold]Initial plan", style="orange"),
Text(final_plan_redaction),
level=LogLevel.INFO,
)
else: # update plan
memory_messages = self.write_memory_to_messages(
summary_mode=False
) # This will not log the plan but will log facts
# Redact updated facts
facts_update_pre_messages = {
"role": MessageRole.SYSTEM,
"content": [{"type": "text", "text": self.prompt_templates["planning"]["update_facts_pre_messages"]}],
}
facts_update_post_messages = {
"role": MessageRole.SYSTEM,
"content": [{"type": "text", "text": self.prompt_templates["planning"]["update_facts_post_messages"]}],
}
input_messages = [facts_update_pre_messages] + memory_messages + [facts_update_post_messages]
chat_message_facts: ChatMessage = self.model(input_messages)
facts_update = chat_message_facts.content
# Redact updated plan
update_plan_pre_messages = {
"role": MessageRole.SYSTEM,
"content": [
{
"type": "text",
"text": populate_template(
self.prompt_templates["planning"]["update_plan_pre_messages"], variables={"task": task}
),
}
],
}
update_plan_post_messages = {
"role": MessageRole.SYSTEM,
"content": [
{
"type": "text",
"text": populate_template(
self.prompt_templates["planning"]["update_plan_pre_messages"],
variables={
"task": task,
"tools": self.tools,
"managed_agents": self.managed_agents,
"facts_update": facts_update,
"remaining_steps": (self.max_steps - step),
},
),
}
],
}
chat_message_plan: ChatMessage = self.model(
[update_plan_pre_messages] + memory_messages + [update_plan_post_messages],
stop_sequences=["<end_plan>"],
)
# Log final facts and plan
final_plan_redaction = textwrap.dedent(
f"""I still need to solve the task I was given:
```
{task}
```
Here is my new/updated plan of action to solve the task:
```
{chat_message_plan.content}
```"""
)
final_facts_redaction = textwrap.dedent(
f"""Here is the updated list of the facts that I know:
```
{facts_update}
```"""
)
self.memory.steps.append(
PlanningStep(
model_input_messages=input_messages,
plan=final_plan_redaction,
facts=final_facts_redaction,
model_output_message_plan=chat_message_plan,
model_output_message_facts=chat_message_facts,
)
)
self.logger.log(
Rule("[bold]Updated plan", style="orange"),
Text(final_plan_redaction),
level=LogLevel.INFO,
)
def replay(self, detailed: bool = False):
"""Prints a pretty replay of the agent's steps.
Args:
detailed (bool, optional): If True, also displays the memory at each step. Defaults to False.
Careful: will increase log length exponentially. Use only for debugging.
"""
self.memory.replay(self.logger, detailed=detailed)
def __call__(self, task: str, **kwargs):
"""
This method is called only by a manager agent.
Adds additional prompting for the managed agent, runs it, and wraps the output.
"""
full_task = populate_template(
self.prompt_templates["managed_agent"]["task"],
variables=dict(name=self.name, task=task),
)
report = self.run(full_task, **kwargs)
answer = populate_template(
self.prompt_templates["managed_agent"]["report"], variables=dict(name=self.name, final_answer=report)
)
if self.provide_run_summary:
answer += "\n\nFor more detail, find below a summary of this agent's work:\n<summary_of_work>\n"
for message in self.write_memory_to_messages(summary_mode=True):
content = message["content"]
answer += "\n" + truncate_content(str(content)) + "\n---"
answer += "\n</summary_of_work>"
return answer
class ToolCallingAgent(MultiStepAgent):
"""
This agent uses JSON-like tool calls, using method `model.get_tool_call` to leverage the LLM engine's tool calling capabilities.
Args:
tools (`list[Tool]`): [`Tool`]s that the agent can use.
model (`Callable[[list[dict[str, str]]], ChatMessage]`): Model that will generate the agent's actions.
prompt_templates (`dict`, *optional*): Prompt templates.
planning_interval (`int`, *optional*): Interval at which the agent will run a planning step.
**kwargs: Additional keyword arguments.
"""
def __init__(
self,
tools: List[Tool],
model: Callable[[List[Dict[str, str]]], ChatMessage],
prompt_templates: Optional[dict] = None,
planning_interval: Optional[int] = None,
**kwargs,
):
prompt_templates = prompt_templates or yaml.safe_load(
importlib.resources.files("smolagents.prompts").joinpath("toolcalling_agent.yaml").read_text()
)
super().__init__(
tools=tools,
model=model,
prompt_templates=prompt_templates,
planning_interval=planning_interval,
**kwargs,
)
def initialize_system_prompt(self) -> str:
system_prompt = populate_template(
self.prompt_templates["system_prompt"],
variables={"tools": self.tools, "managed_agents": self.managed_agents},
)
return system_prompt
def step(self, memory_step: ActionStep) -> Union[None, Any]:
"""
Perform one step in the ReAct framework: the agent thinks, acts, and observes the result.
Returns None if the step is not final.
"""
memory_messages = self.write_memory_to_messages()
self.input_messages = memory_messages
# Add new step in logs
memory_step.model_input_messages = memory_messages.copy()
try:
model_message: ChatMessage = self.model(
memory_messages,
tools_to_call_from=list(self.tools.values()),
stop_sequences=["Observation:"],
)
memory_step.model_output_message = model_message
if model_message.tool_calls is None or len(model_message.tool_calls) == 0:
raise Exception("Model did not call any tools. Call `final_answer` tool to return a final answer.")
tool_call = model_message.tool_calls[0]
tool_name, tool_call_id = tool_call.function.name, tool_call.id
tool_arguments = tool_call.function.arguments
except Exception as e:
raise AgentGenerationError(f"Error in generating tool call with model:\n{e}", self.logger) from e
memory_step.tool_calls = [ToolCall(name=tool_name, arguments=tool_arguments, id=tool_call_id)]
# Execute
self.logger.log(
Panel(Text(f"Calling tool: '{tool_name}' with arguments: {tool_arguments}")),
level=LogLevel.INFO,
)
if tool_name == "final_answer":
if isinstance(tool_arguments, dict):
if "answer" in tool_arguments:
answer = tool_arguments["answer"]
else:
answer = tool_arguments
else:
answer = tool_arguments
if (
isinstance(answer, str) and answer in self.state.keys()
): # if the answer is a state variable, return the value
final_answer = self.state[answer]
self.logger.log(
f"[bold {YELLOW_HEX}]Final answer:[/bold {YELLOW_HEX}] Extracting key '{answer}' from state to return value '{final_answer}'.",
level=LogLevel.INFO,
)
else:
final_answer = answer
self.logger.log(
Text(f"Final answer: {final_answer}", style=f"bold {YELLOW_HEX}"),
level=LogLevel.INFO,
)
memory_step.action_output = final_answer
return final_answer
else:
if tool_arguments is None:
tool_arguments = {}
observation = self.execute_tool_call(tool_name, tool_arguments)
observation_type = type(observation)
if observation_type in [AgentImage, AgentAudio]:
if observation_type == AgentImage:
observation_name = "image.png"
elif observation_type == AgentAudio:
observation_name = "audio.mp3"
# TODO: observation naming could allow for different names of same type
self.state[observation_name] = observation
updated_information = f"Stored '{observation_name}' in memory."
else:
updated_information = str(observation).strip()
self.logger.log(
f"Observations: {updated_information.replace('[', '|')}", # escape potential rich-tag-like components
level=LogLevel.INFO,
)
memory_step.observations = updated_information
return None
class CodeAgent(MultiStepAgent):
"""
In this agent, the tool calls will be formulated by the LLM in code format, then parsed and executed.
Args:
tools (`list[Tool]`): [`Tool`]s that the agent can use.
model (`Callable[[list[dict[str, str]]], ChatMessage]`): Model that will generate the agent's actions.
prompt_templates (`dict`, *optional*): Prompt templates.
grammar (`dict[str, str]`, *optional*): Grammar used to parse the LLM output.
additional_authorized_imports (`list[str]`, *optional*): Additional authorized imports for the agent.
planning_interval (`int`, *optional*): Interval at which the agent will run a planning step.
use_e2b_executor (`bool`, default `False`): Whether to use the E2B executor for remote code execution.
max_print_outputs_length (`int`, *optional*): Maximum length of the print outputs.
**kwargs: Additional keyword arguments.
"""
def __init__(
self,
tools: List[Tool],
model: Callable[[List[Dict[str, str]]], ChatMessage],
prompt_templates: Optional[dict] = None,
grammar: Optional[Dict[str, str]] = None,
additional_authorized_imports: Optional[List[str]] = None,
planning_interval: Optional[int] = None,
use_e2b_executor: bool = False,
max_print_outputs_length: Optional[int] = None,
**kwargs,
):
self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else []
self.authorized_imports = list(set(BASE_BUILTIN_MODULES) | set(self.additional_authorized_imports))
prompt_templates = prompt_templates or yaml.safe_load(
importlib.resources.files("smolagents.prompts").joinpath("code_agent.yaml").read_text()
)
super().__init__(
tools=tools,
model=model,
prompt_templates=prompt_templates,
grammar=grammar,
planning_interval=planning_interval,
**kwargs,
)
if "*" in self.additional_authorized_imports:
self.logger.log(
"Caution: you set an authorization for all imports, meaning your agent can decide to import any package it deems necessary. This might raise issues if the package is not installed in your environment.",
0,
)
if use_e2b_executor and len(self.managed_agents) > 0:
raise Exception(
f"You passed both {use_e2b_executor=} and some managed agents. Managed agents is not yet supported with remote code execution."
)
all_tools = {**self.tools, **self.managed_agents}
if use_e2b_executor:
self.python_executor = E2BExecutor(
self.additional_authorized_imports,
list(all_tools.values()),
self.logger,
)
else:
self.python_executor = LocalPythonInterpreter(
self.additional_authorized_imports,
all_tools,
max_print_outputs_length=max_print_outputs_length,
)
def initialize_system_prompt(self) -> str:
system_prompt = populate_template(
self.prompt_templates["system_prompt"],
variables={
"tools": self.tools,
"managed_agents": self.managed_agents,
"authorized_imports": (
"You can import from any package you want."
if "*" in self.authorized_imports
else str(self.authorized_imports)
),
},
)
return system_prompt
def step(self, memory_step: ActionStep) -> Union[None, Any]:
"""
Perform one step in the ReAct framework: the agent thinks, acts, and observes the result.
Returns None if the step is not final.
"""
memory_messages = self.write_memory_to_messages()
self.input_messages = memory_messages.copy()
# Add new step in logs
memory_step.model_input_messages = memory_messages.copy()
try:
additional_args = {"grammar": self.grammar} if self.grammar is not None else {}
chat_message: ChatMessage = self.model(
self.input_messages,
stop_sequences=["<end_code>", "Observation:"],
**additional_args,
)
memory_step.model_output_message = chat_message
model_output = chat_message.content
memory_step.model_output = model_output
except Exception as e:
raise AgentGenerationError(f"Error in generating model output:\n{e}", self.logger) from e
self.logger.log_markdown(
content=model_output,
title="Output message of the LLM:",
level=LogLevel.DEBUG,
)
# Parse
try:
code_action = fix_final_answer_code(parse_code_blobs(model_output))
except Exception as e:
error_msg = f"Error in code parsing:\n{e}\nMake sure to provide correct code blobs."
raise AgentParsingError(error_msg, self.logger)
memory_step.tool_calls = [
ToolCall(
name="python_interpreter",
arguments=code_action,
id=f"call_{len(self.memory.steps)}",
)
]
# Execute
self.logger.log_code(title="Executing parsed code:", content=code_action, level=LogLevel.INFO)
is_final_answer = False
try:
output, execution_logs, is_final_answer = self.python_executor(
code_action,
self.state,
)
execution_outputs_console = []
if len(execution_logs) > 0:
execution_outputs_console += [
Text("Execution logs:", style="bold"),
Text(execution_logs),
]
observation = "Execution logs:\n" + execution_logs
except Exception as e:
if hasattr(self.python_executor, "state") and "_print_outputs" in self.python_executor.state:
execution_logs = str(self.python_executor.state["_print_outputs"])
if len(execution_logs) > 0:
execution_outputs_console = [
Text("Execution logs:", style="bold"),
Text(execution_logs),
]
memory_step.observations = "Execution logs:\n" + execution_logs
self.logger.log(Group(*execution_outputs_console), level=LogLevel.INFO)
error_msg = str(e)
if "Import of " in error_msg and " is not allowed" in error_msg:
self.logger.log(
"[bold red]Warning to user: Code execution failed due to an unauthorized import - Consider passing said import under `additional_authorized_imports` when initializing your CodeAgent.",
level=LogLevel.INFO,
)
raise AgentExecutionError(error_msg, self.logger)
truncated_output = truncate_content(str(output))
observation += "Last output from code snippet:\n" + truncated_output
memory_step.observations = observation
execution_outputs_console += [
Text(
f"{('Out - Final answer' if is_final_answer else 'Out')}: {truncated_output}",
style=(f"bold {YELLOW_HEX}" if is_final_answer else ""),
),
]
self.logger.log(Group(*execution_outputs_console), level=LogLevel.INFO)
memory_step.action_output = output
return output if is_final_answer else None
__all__ = ["MultiStepAgent", "CodeAgent", "ToolCallingAgent", "AgentMemory"]
| smolagents/src/smolagents/agents.py/0 | {
"file_path": "smolagents/src/smolagents/agents.py",
"repo_id": "smolagents",
"token_count": 18519
} |
from unittest.mock import patch
import pytest
from smolagents.agents import MultiStepAgent
from smolagents.monitoring import LogLevel
original_multi_step_agent_init = MultiStepAgent.__init__
@pytest.fixture(autouse=True)
def patch_multi_step_agent_with_suppressed_logging():
with patch.object(MultiStepAgent, "__init__", autospec=True) as mock_init:
def init_with_suppressed_logging(self, *args, verbosity_level=LogLevel.OFF, **kwargs):
original_multi_step_agent_init(self, *args, verbosity_level=verbosity_level, **kwargs)
mock_init.side_effect = init_with_suppressed_logging
yield
| smolagents/tests/conftest.py/0 | {
"file_path": "smolagents/tests/conftest.py",
"repo_id": "smolagents",
"token_count": 230
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import pathlib
import tempfile
import textwrap
import unittest
import pytest
from IPython.core.interactiveshell import InteractiveShell
from smolagents import Tool
from smolagents.tools import tool
from smolagents.utils import get_source, parse_code_blobs
class AgentTextTests(unittest.TestCase):
def test_parse_code_blobs(self):
with pytest.raises(ValueError):
parse_code_blobs("Wrong blob!")
# Parsing mardkwon with code blobs should work
output = parse_code_blobs("""
Here is how to solve the problem:
Code:
```py
import numpy as np
```<end_code>
""")
assert output == "import numpy as np"
# Parsing code blobs should work
code_blob = "import numpy as np"
output = parse_code_blobs(code_blob)
assert output == code_blob
def test_multiple_code_blobs(self):
test_input = """Here's a function that adds numbers:
```python
def add(a, b):
return a + b
```
And here's a function that multiplies them:
```py
def multiply(a, b):
return a * b
```"""
expected_output = """def add(a, b):
return a + b
def multiply(a, b):
return a * b"""
result = parse_code_blobs(test_input)
assert result == expected_output
@pytest.fixture(scope="function")
def ipython_shell():
"""Reset IPython shell before and after each test."""
shell = InteractiveShell.instance()
shell.reset() # Clean before test
yield shell
shell.reset() # Clean after test
@pytest.mark.parametrize(
"obj_name, code_blob",
[
("test_func", "def test_func():\n return 42"),
("TestClass", "class TestClass:\n ..."),
],
)
def test_get_source_ipython(ipython_shell, obj_name, code_blob):
ipython_shell.run_cell(code_blob, store_history=True)
obj = ipython_shell.user_ns[obj_name]
assert get_source(obj) == code_blob
def test_get_source_standard_class():
class TestClass: ...
source = get_source(TestClass)
assert source == "class TestClass: ..."
assert source == textwrap.dedent(inspect.getsource(TestClass)).strip()
def test_get_source_standard_function():
def test_func(): ...
source = get_source(test_func)
assert source == "def test_func(): ..."
assert source == textwrap.dedent(inspect.getsource(test_func)).strip()
def test_get_source_ipython_errors_empty_cells(ipython_shell):
test_code = textwrap.dedent("""class TestClass:\n ...""").strip()
ipython_shell.user_ns["In"] = [""]
exec(test_code)
with pytest.raises(ValueError, match="No code cells found in IPython session"):
get_source(locals()["TestClass"])
def test_get_source_ipython_errors_definition_not_found(ipython_shell):
test_code = textwrap.dedent("""class TestClass:\n ...""").strip()
ipython_shell.user_ns["In"] = ["", "print('No class definition here')"]
exec(test_code)
with pytest.raises(ValueError, match="Could not find source code for TestClass in IPython history"):
get_source(locals()["TestClass"])
def test_get_source_ipython_errors_type_error():
with pytest.raises(TypeError, match="Expected class or callable"):
get_source(None)
def test_e2e_class_tool_save():
class TestTool(Tool):
name = "test_tool"
description = "Test tool description"
inputs = {
"task": {
"type": "string",
"description": "tool input",
}
}
output_type = "string"
def forward(self, task: str):
import IPython # noqa: F401
return task
test_tool = TestTool()
with tempfile.TemporaryDirectory() as tmp_dir:
test_tool.save(tmp_dir)
assert set(os.listdir(tmp_dir)) == {"requirements.txt", "app.py", "tool.py"}
assert (
pathlib.Path(tmp_dir, "tool.py").read_text()
== """from smolagents.tools import Tool
import IPython
class TestTool(Tool):
name = "test_tool"
description = "Test tool description"
inputs = {'task': {'type': 'string', 'description': 'tool input'}}
output_type = "string"
def forward(self, task: str):
import IPython # noqa: F401
return task
def __init__(self, *args, **kwargs):
self.is_initialized = False
"""
)
requirements = set(pathlib.Path(tmp_dir, "requirements.txt").read_text().split())
assert requirements == {"IPython", "smolagents"}
assert (
pathlib.Path(tmp_dir, "app.py").read_text()
== """from smolagents import launch_gradio_demo
from typing import Optional
from tool import TestTool
tool = TestTool()
launch_gradio_demo(tool)
"""
)
def test_e2e_ipython_class_tool_save():
shell = InteractiveShell.instance()
with tempfile.TemporaryDirectory() as tmp_dir:
code_blob = textwrap.dedent(f"""
from smolagents.tools import Tool
class TestTool(Tool):
name = "test_tool"
description = "Test tool description"
inputs = {{"task": {{"type": "string",
"description": "tool input",
}}
}}
output_type = "string"
def forward(self, task: str):
import IPython # noqa: F401
return task
TestTool().save("{tmp_dir}")
""")
assert shell.run_cell(code_blob, store_history=True).success
assert set(os.listdir(tmp_dir)) == {"requirements.txt", "app.py", "tool.py"}
assert (
pathlib.Path(tmp_dir, "tool.py").read_text()
== """from smolagents.tools import Tool
import IPython
class TestTool(Tool):
name = "test_tool"
description = "Test tool description"
inputs = {'task': {'type': 'string', 'description': 'tool input'}}
output_type = "string"
def forward(self, task: str):
import IPython # noqa: F401
return task
def __init__(self, *args, **kwargs):
self.is_initialized = False
"""
)
requirements = set(pathlib.Path(tmp_dir, "requirements.txt").read_text().split())
assert requirements == {"IPython", "smolagents"}
assert (
pathlib.Path(tmp_dir, "app.py").read_text()
== """from smolagents import launch_gradio_demo
from typing import Optional
from tool import TestTool
tool = TestTool()
launch_gradio_demo(tool)
"""
)
def test_e2e_function_tool_save():
@tool
def test_tool(task: str) -> str:
"""
Test tool description
Args:
task: tool input
"""
import IPython # noqa: F401
return task
with tempfile.TemporaryDirectory() as tmp_dir:
test_tool.save(tmp_dir)
assert set(os.listdir(tmp_dir)) == {"requirements.txt", "app.py", "tool.py"}
assert (
pathlib.Path(tmp_dir, "tool.py").read_text()
== """from smolagents import Tool
from typing import Optional
class SimpleTool(Tool):
name = "test_tool"
description = "Test tool description"
inputs = {"task":{"type":"string","description":"tool input"}}
output_type = "string"
def forward(self, task: str) -> str:
\"""
Test tool description
Args:
task: tool input
\"""
import IPython # noqa: F401
return task"""
)
requirements = set(pathlib.Path(tmp_dir, "requirements.txt").read_text().split())
assert requirements == {"smolagents"} # FIXME: IPython should be in the requirements
assert (
pathlib.Path(tmp_dir, "app.py").read_text()
== """from smolagents import launch_gradio_demo
from typing import Optional
from tool import SimpleTool
tool = SimpleTool()
launch_gradio_demo(tool)
"""
)
def test_e2e_ipython_function_tool_save():
shell = InteractiveShell.instance()
with tempfile.TemporaryDirectory() as tmp_dir:
code_blob = textwrap.dedent(f"""
from smolagents import tool
@tool
def test_tool(task: str) -> str:
\"""
Test tool description
Args:
task: tool input
\"""
import IPython # noqa: F401
return task
test_tool.save("{tmp_dir}")
""")
assert shell.run_cell(code_blob, store_history=True).success
assert set(os.listdir(tmp_dir)) == {"requirements.txt", "app.py", "tool.py"}
assert (
pathlib.Path(tmp_dir, "tool.py").read_text()
== """from smolagents import Tool
from typing import Optional
class SimpleTool(Tool):
name = "test_tool"
description = "Test tool description"
inputs = {"task":{"type":"string","description":"tool input"}}
output_type = "string"
def forward(self, task: str) -> str:
\"""
Test tool description
Args:
task: tool input
\"""
import IPython # noqa: F401
return task"""
)
requirements = set(pathlib.Path(tmp_dir, "requirements.txt").read_text().split())
assert requirements == {"smolagents"} # FIXME: IPython should be in the requirements
assert (
pathlib.Path(tmp_dir, "app.py").read_text()
== """from smolagents import launch_gradio_demo
from typing import Optional
from tool import SimpleTool
tool = SimpleTool()
launch_gradio_demo(tool)
"""
)
| smolagents/tests/test_utils.py/0 | {
"file_path": "smolagents/tests/test_utils.py",
"repo_id": "smolagents",
"token_count": 4135
} |
[package]
name = "text-generation-backends-trtllm"
version.workspace = true
edition.workspace = true
authors.workspace = true
homepage.workspace = true
[dependencies]
async-trait = "0.1"
clap = { version = "4.5", features = ["derive"] }
cxx = "1.0"
hashbrown = "0.15"
hf-hub = { workspace = true }
text-generation-router = { path = "../../router" }
tokenizers = { workspace = true }
tokio = { version = "1.43.0", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync"] }
tokio-stream = "0.1.17"
thiserror = "1.0.63"
tracing = "0.1"
pyo3 = { workspace = true }
[build-dependencies]
cmake = "0.1"
cxx-build = { version = "1.0", features = ["parallel"] }
pkg-config = "0.3"
| text-generation-inference/backends/trtllm/Cargo.toml/0 | {
"file_path": "text-generation-inference/backends/trtllm/Cargo.toml",
"repo_id": "text-generation-inference",
"token_count": 275
} |
use std::path::{Path, PathBuf};
use clap::Parser;
use hf_hub::api::tokio::{Api, ApiBuilder};
use hf_hub::{Cache, Repo, RepoType};
use tracing::info;
use text_generation_backends_trtllm::errors::TensorRtLlmBackendError;
use text_generation_backends_trtllm::TensorRtLlmBackendV2;
use text_generation_router::server::{
get_hub_model_info, legacy_tokenizer_handle, py_resolve_tokenizer,
};
use text_generation_router::usage_stats::UsageStatsLevel;
use text_generation_router::{server, Tokenizer};
/// App Configuration
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
#[clap(default_value = "128", long, env)]
max_concurrent_requests: usize,
#[clap(default_value = "2", long, env)]
max_best_of: usize,
#[clap(default_value = "4", long, env)]
max_stop_sequences: usize,
#[clap(default_value = "5", long, env)]
max_top_n_tokens: u32,
#[clap(default_value = "1024", long, env)]
max_input_tokens: usize,
#[clap(default_value = "2048", long, env)]
max_total_tokens: usize,
#[clap(default_value = "4096", long, env)]
max_batch_prefill_tokens: u32,
#[clap(long, env)]
max_batch_total_tokens: Option<u32>,
#[clap(default_value = "0.0.0.0", long, env)]
hostname: String,
#[clap(default_value = "3000", long, short, env)]
port: u16,
#[clap(long, env, required = true)]
tokenizer_name: String,
#[clap(long, env)]
tokenizer_config_path: Option<String>,
#[clap(long, env)]
revision: Option<String>,
#[clap(long, env)]
model_id: String,
#[clap(default_value = "2", long, env)]
validation_workers: usize,
#[clap(long, env)]
json_output: bool,
#[clap(long, env)]
otlp_endpoint: Option<String>,
#[clap(default_value = "text-generation-inference.router", long, env)]
otlp_service_name: String,
#[clap(long, env)]
cors_allow_origin: Option<Vec<String>>,
#[clap(default_value = "4", long, env)]
max_client_batch_size: usize,
#[clap(long, env)]
auth_token: Option<String>,
#[clap(long, env, help = "Path to the TensorRT-LLM Orchestrator worker")]
executor_worker: PathBuf,
#[clap(default_value = "on", long, env)]
usage_stats: UsageStatsLevel,
#[clap(default_value = "2000000", long, env)]
payload_limit: usize,
}
async fn get_tokenizer(tokenizer_name: &str, revision: Option<&str>) -> Option<Tokenizer> {
// Parse Huggingface hub token
let authorization_token = std::env::var("HF_TOKEN")
.or_else(|_| std::env::var("HUGGING_FACE_HUB_TOKEN"))
.ok();
// Tokenizer instance
let local_path = Path::new(tokenizer_name);
// Shared API builder initialization
let api_builder = || {
let mut builder = ApiBuilder::new()
.with_progress(false)
.with_token(authorization_token);
if let Ok(cache_dir) = std::env::var("HUGGINGFACE_HUB_CACHE") {
builder = builder.with_cache_dir(cache_dir.into());
}
builder
};
// Decide if we need to use the API based on the revision and local path
let use_api = revision.is_some() || !local_path.exists() || !local_path.is_dir();
// Initialize API if needed
#[derive(Clone)]
enum Type {
Api(Api),
Cache(Cache),
None,
}
let api = if use_api {
if std::env::var("HF_HUB_OFFLINE") == Ok("1".to_string()) {
let cache = std::env::var("HUGGINGFACE_HUB_CACHE")
.map_err(|_| ())
.map(|cache_dir| Cache::new(cache_dir.into()))
.unwrap_or_else(|_| Cache::default());
tracing::warn!("Offline mode active using cache defaults");
Type::Cache(cache)
} else {
tracing::info!("Using the Hugging Face API");
match api_builder().build() {
Ok(api) => Type::Api(api),
Err(_) => {
tracing::warn!("Unable to build the Hugging Face API");
Type::None
}
}
}
} else {
Type::None
};
// Load tokenizer and model info
let (
config_filename,
_tokenizer_config_filename,
_preprocessor_config_filename,
_processor_config_filename,
_model_info,
) = match api {
Type::None => (
Some(local_path.join("config.json")),
Some(local_path.join("tokenizer_config.json")),
Some(local_path.join("preprocessor_config.json")),
Some(local_path.join("processor_config.json")),
None,
),
Type::Api(api) => {
let api_repo = api.repo(Repo::with_revision(
tokenizer_name.to_string(),
RepoType::Model,
revision.unwrap_or_else(|| "main").to_string(),
));
let config_filename = api_repo.get("config.json").await.ok();
let tokenizer_config_filename = api_repo.get("tokenizer_config.json").await.ok();
let preprocessor_config_filename = api_repo.get("preprocessor_config.json").await.ok();
let processor_config_filename = api_repo.get("processor_config.json").await.ok();
let model_info = if let Some(model_info) = get_hub_model_info(&api_repo).await {
Some(model_info)
} else {
tracing::warn!("Could not retrieve model info from the Hugging Face hub.");
None
};
(
config_filename,
tokenizer_config_filename,
preprocessor_config_filename,
processor_config_filename,
model_info,
)
}
Type::Cache(cache) => {
let repo = cache.repo(Repo::with_revision(
tokenizer_name.to_string(),
RepoType::Model,
revision.clone().unwrap_or_else(|| "main").to_string(),
));
(
repo.get("config.json"),
repo.get("tokenizer_config.json"),
repo.get("preprocessor_config.json"),
repo.get("processor_config.json"),
None,
)
}
};
let tokenizer: Tokenizer = {
use pyo3::prelude::*;
pyo3::Python::with_gil(|py| -> PyResult<()> {
py_resolve_tokenizer(py, &tokenizer_name, revision.as_deref(), false)?;
Ok(())
})
.inspect_err(|err| {
tracing::error!("Failed to import python tokenizer {err}");
})
.or_else(|err| {
let out = legacy_tokenizer_handle(config_filename.as_ref());
out.ok_or(err)
})
.expect("We cannot load a tokenizer");
let filename = "out/tokenizer.json";
if let Ok(tok) = tokenizers::Tokenizer::from_file(filename) {
Tokenizer::Rust(tok)
} else {
Tokenizer::Python {
tokenizer_name: tokenizer_name.to_string(),
revision: revision.map(|revision| revision.to_string()),
trust_remote_code: false,
}
}
};
Some(tokenizer)
}
#[tokio::main]
async fn main() -> Result<(), TensorRtLlmBackendError> {
// Get args
let args = Args::parse();
// Pattern match configuration
let Args {
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
max_batch_prefill_tokens,
max_batch_total_tokens,
hostname,
port,
tokenizer_name,
tokenizer_config_path,
revision,
model_id,
validation_workers,
json_output,
otlp_endpoint,
otlp_service_name,
cors_allow_origin,
max_client_batch_size,
auth_token,
executor_worker,
usage_stats,
payload_limit,
} = args;
// Launch Tokio runtime
text_generation_router::logging::init_logging(otlp_endpoint, otlp_service_name, json_output);
// Validate args
if max_input_tokens >= max_total_tokens {
return Err(TensorRtLlmBackendError::ArgumentValidation(
"`max_input_tokens` must be < `max_total_tokens`".to_string(),
));
}
if max_input_tokens as u32 > max_batch_prefill_tokens {
return Err(TensorRtLlmBackendError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be >= `max_input_tokens`. Given: {max_batch_prefill_tokens} and {max_input_tokens}")));
}
if validation_workers == 0 {
return Err(TensorRtLlmBackendError::ArgumentValidation(
"`validation_workers` must be > 0".to_string(),
));
}
if let Some(ref max_batch_total_tokens) = max_batch_total_tokens {
if max_batch_prefill_tokens > *max_batch_total_tokens {
return Err(TensorRtLlmBackendError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {max_batch_prefill_tokens} and {max_batch_total_tokens}")));
}
if max_total_tokens as u32 > *max_batch_total_tokens {
return Err(TensorRtLlmBackendError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_batch_total_tokens}")));
}
}
if !executor_worker.exists() {
return Err(TensorRtLlmBackendError::ArgumentValidation(format!(
"`executor_work` specified path doesn't exists: {}",
executor_worker.display()
)));
}
// Create the backend
match get_tokenizer(&tokenizer_name, revision.as_deref())
.await
.expect("Failed to retrieve tokenizer implementation")
{
Tokenizer::Python { .. } => Err(TensorRtLlmBackendError::Tokenizer(
"Failed to retrieve Rust based tokenizer".to_string(),
)),
Tokenizer::Rust(tokenizer) => {
info!("Successfully retrieved tokenizer {}", &tokenizer_name);
let backend = TensorRtLlmBackendV2::new(
tokenizer,
model_id,
executor_worker,
max_concurrent_requests,
)?;
info!("Successfully created backend");
// Run server
server::run(
backend,
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
validation_workers,
auth_token,
tokenizer_name,
tokenizer_config_path,
revision,
false,
hostname,
port,
cors_allow_origin,
false,
None,
None,
true,
max_client_batch_size,
usage_stats,
payload_limit,
)
.await?;
Ok(())
}
}
}
| text-generation-inference/backends/trtllm/src/main.rs/0 | {
"file_path": "text-generation-inference/backends/trtllm/src/main.rs",
"repo_id": "text-generation-inference",
"token_count": 5530
} |
use crate::app::Data;
use tabled::settings::Merge;
use tabled::{builder::Builder, settings::Style, Table};
#[allow(clippy::too_many_arguments)]
pub(crate) fn parameters_table(
tokenizer_name: String,
sequence_length: u32,
decode_length: u32,
top_n_tokens: Option<u32>,
n_runs: usize,
warmups: usize,
temperature: Option<f32>,
top_k: Option<u32>,
top_p: Option<f32>,
typical_p: Option<f32>,
repetition_penalty: Option<f32>,
frequency_penalty: Option<f32>,
watermark: bool,
do_sample: bool,
) -> Table {
let mut builder = Builder::default();
builder.set_header(["Parameter", "Value"]);
builder.push_record(["Model", &tokenizer_name]);
builder.push_record(["Sequence Length", &sequence_length.to_string()]);
builder.push_record(["Decode Length", &decode_length.to_string()]);
builder.push_record(["Top N Tokens", &format!("{top_n_tokens:?}")]);
builder.push_record(["N Runs", &n_runs.to_string()]);
builder.push_record(["Warmups", &warmups.to_string()]);
builder.push_record(["Temperature", &format!("{temperature:?}")]);
builder.push_record(["Top K", &format!("{top_k:?}")]);
builder.push_record(["Top P", &format!("{top_p:?}")]);
builder.push_record(["Typical P", &format!("{typical_p:?}")]);
builder.push_record(["Repetition Penalty", &format!("{repetition_penalty:?}")]);
builder.push_record(["Frequency Penalty", &format!("{frequency_penalty:?}")]);
builder.push_record(["Watermark", &watermark.to_string()]);
builder.push_record(["Do Sample", &do_sample.to_string()]);
let mut table = builder.build();
table.with(Style::markdown());
table
}
pub(crate) fn latency_table(data: &Data) -> Table {
let mut builder = Builder::default();
builder.set_header([
"Step",
"Batch Size",
"Average",
"Lowest",
"Highest",
"p50",
"p90",
"p99",
]);
add_latencies(
&mut builder,
"Prefill",
&data.batch_size,
&data.prefill_latencies,
);
add_latencies(
&mut builder,
"Decode (token)",
&data.batch_size,
&data.decode_token_latencies,
);
add_latencies(
&mut builder,
"Decode (total)",
&data.batch_size,
&data.decode_latencies,
);
let mut table = builder.build();
table.with(Style::markdown()).with(Merge::vertical());
table
}
pub(crate) fn throughput_table(data: &Data) -> Table {
let mut builder = Builder::default();
builder.set_header(["Step", "Batch Size", "Average", "Lowest", "Highest"]);
add_throuhgputs(
&mut builder,
"Prefill",
&data.batch_size,
&data.prefill_throughputs,
);
add_throuhgputs(
&mut builder,
"Decode",
&data.batch_size,
&data.decode_throughputs,
);
let mut table = builder.build();
table.with(Style::markdown()).with(Merge::vertical());
table
}
fn add_latencies(
builder: &mut Builder,
step: &'static str,
batch_size: &[u32],
batch_latencies: &[Vec<f64>],
) {
for (i, b) in batch_size.iter().enumerate() {
let latencies = &batch_latencies[i];
let (avg, min, max) = avg_min_max(latencies);
let row = [
step,
&b.to_string(),
&format_value(avg, "ms"),
&format_value(min, "ms"),
&format_value(max, "ms"),
&format_value(px(latencies, 50), "ms"),
&format_value(px(latencies, 90), "ms"),
&format_value(px(latencies, 99), "ms"),
];
builder.push_record(row);
}
}
fn add_throuhgputs(
builder: &mut Builder,
step: &'static str,
batch_size: &[u32],
batch_throughputs: &[Vec<f64>],
) {
for (i, b) in batch_size.iter().enumerate() {
let throughputs = &batch_throughputs[i];
let (avg, min, max) = avg_min_max(throughputs);
let row = [
step,
&b.to_string(),
&format_value(avg, "tokens/secs"),
&format_value(min, "tokens/secs"),
&format_value(max, "tokens/secs"),
];
builder.push_record(row);
}
}
fn avg_min_max(data: &[f64]) -> (f64, f64, f64) {
let average = data.iter().sum::<f64>() / data.len() as f64;
let min = data
.iter()
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN);
let max = data
.iter()
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(&f64::NAN);
(average, *min, *max)
}
fn px(data: &[f64], p: u32) -> f64 {
let i = (f64::from(p) / 100.0 * data.len() as f64) as usize;
*data.get(i).unwrap_or(&f64::NAN)
}
fn format_value(value: f64, unit: &'static str) -> String {
format!("{:.2} {unit}", value)
}
| text-generation-inference/benchmark/src/table.rs/0 | {
"file_path": "text-generation-inference/benchmark/src/table.rs",
"repo_id": "text-generation-inference",
"token_count": 2288
} |
from enum import Enum
from pydantic import BaseModel, field_validator, ConfigDict
from typing import Optional, List, Union, Any
from text_generation.errors import ValidationError
# enum for grammar type
class GrammarType(str, Enum):
Json = "json"
Regex = "regex"
# Grammar type and value
class Grammar(BaseModel):
# Grammar type
type: GrammarType
# Grammar value
value: Union[str, dict]
class ToolCall(BaseModel):
# Id of the tool call
id: int
# Type of the tool call
type: str
# Function details of the tool call
function: dict
class Chunk(BaseModel):
type: str
text: Optional[str] = None
image_url: Any = None
class Message(BaseModel):
# Role of the message sender
role: str
# Content of the message
content: Optional[Union[str, List[Chunk]]] = None
# Optional name of the message sender
name: Optional[str] = None
# Tool calls associated with the chat completion
tool_calls: Optional[Any] = None
class Tool(BaseModel):
# Type of the tool
type: str
# Function details of the tool
function: dict
class Function(BaseModel):
name: Optional[str]
arguments: str
class ChoiceDeltaToolCall(BaseModel):
index: int
id: str
type: str
function: Function
class ChoiceDelta(BaseModel):
role: str
content: Optional[str] = None
tool_calls: Optional[ChoiceDeltaToolCall] = None
class Choice(BaseModel):
index: int
delta: ChoiceDelta
logprobs: Optional[dict] = None
finish_reason: Optional[str] = None
class CompletionRequest(BaseModel):
# Model identifier
model: str
# Prompt
prompt: str
# The parameter for repetition penalty. 1.0 means no penalty.
# See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
repetition_penalty: Optional[float] = None
# The parameter for frequency penalty. 1.0 means no penalty
# Penalize new tokens based on their existing frequency in the text so far,
# decreasing the model's likelihood to repeat the same line verbatim.
frequency_penalty: Optional[float] = None
# Maximum number of tokens to generate
max_tokens: Optional[int] = None
# Flag to indicate streaming response
stream: bool = False
# Random sampling seed
seed: Optional[int] = None
# Sampling temperature
temperature: Optional[float] = None
# Top-p value for nucleus sampling
top_p: Optional[float] = None
# Stop generating tokens if a member of `stop` is generated
stop: Optional[List[str]] = None
class CompletionComplete(BaseModel):
# Index of the chat completion
index: int
# Message associated with the chat completion
text: str
# Log probabilities for the chat completion
logprobs: Optional[Any]
# Reason for completion
finish_reason: str
class Completion(BaseModel):
# Completion details
id: str
object: str
created: int
model: str
system_fingerprint: str
choices: List[CompletionComplete]
class ChatRequest(BaseModel):
# Model identifier
model: str
# List of messages in the conversation
messages: List[Message]
# The parameter for repetition penalty. 1.0 means no penalty.
# See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
repetition_penalty: Optional[float] = None
# The parameter for frequency penalty. 1.0 means no penalty
# Penalize new tokens based on their existing frequency in the text so far,
# decreasing the model's likelihood to repeat the same line verbatim.
frequency_penalty: Optional[float] = None
# Bias values for token selection
logit_bias: Optional[List[float]] = None
# Whether to return log probabilities
logprobs: Optional[bool] = None
# Number of most likely tokens to return at each position
top_logprobs: Optional[int] = None
# Maximum number of tokens to generate
max_tokens: Optional[int] = None
# Number of chat completion choices to generate
n: Optional[int] = None
# Penalty for presence of new tokens
presence_penalty: Optional[float] = None
# Flag to indicate streaming response
stream: bool = False
# Random sampling seed
seed: Optional[int] = None
# Sampling temperature
temperature: Optional[float] = None
# Top-p value for nucleus sampling
top_p: Optional[float] = None
# List of tools to be used
tools: Optional[List[Tool]] = None
# A prompt to be appended before the tools
tool_prompt: Optional[str] = None
# Choice of tool to be used
tool_choice: Optional[str] = None
# Stop generating tokens if a member of `stop` is generated
stop: Optional[List[str]] = None
class ChatCompletionComplete(BaseModel):
# Index of the chat completion
index: int
# Message associated with the chat completion
message: Message
# Log probabilities for the chat completion
logprobs: Optional[Any]
# Reason for completion
finish_reason: Optional[str]
# Usage details of the chat completion
usage: Optional[Any] = None
class ChatComplete(BaseModel):
# Chat completion details
id: str
object: str
created: int
model: str
system_fingerprint: str
choices: List[ChatCompletionComplete]
usage: Any
class ChatCompletionChunk(BaseModel):
id: str
object: str
created: int
model: str
system_fingerprint: str
choices: List[Choice]
usage: Optional[Any] = None
class Parameters(BaseModel):
# Activate logits sampling
do_sample: bool = False
# Maximum number of generated tokens
max_new_tokens: int = 20
# The parameter for repetition penalty. 1.0 means no penalty.
# See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
repetition_penalty: Optional[float] = None
# The parameter for frequency penalty. 1.0 means no penalty
# Penalize new tokens based on their existing frequency in the text so far,
# decreasing the model's likelihood to repeat the same line verbatim.
frequency_penalty: Optional[float] = None
# Whether to prepend the prompt to the generated text
return_full_text: bool = False
# Stop generating tokens if a member of `stop_sequences` is generated
stop: List[str] = []
# Random sampling seed
seed: Optional[int] = None
# The value used to module the logits distribution.
temperature: Optional[float] = None
# The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_k: Optional[int] = None
# If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
# higher are kept for generation.
top_p: Optional[float] = None
# truncate inputs tokens to the given size
truncate: Optional[int] = None
# Typical Decoding mass
# See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
typical_p: Optional[float] = None
# Generate best_of sequences and return the one if the highest token logprobs
best_of: Optional[int] = None
# Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
watermark: bool = False
# Get generation details
details: bool = False
# Get decoder input token logprobs and ids
decoder_input_details: bool = False
# Return the N most likely tokens at each step
top_n_tokens: Optional[int] = None
# grammar to use for generation
grammar: Optional[Grammar] = None
@field_validator("best_of")
def valid_best_of(cls, field_value, values):
if field_value is not None:
if field_value <= 0:
raise ValidationError("`best_of` must be strictly positive")
if field_value > 1 and values.data["seed"] is not None:
raise ValidationError("`seed` must not be set when `best_of` is > 1")
sampling = (
values.data["do_sample"]
| (values.data["temperature"] is not None)
| (values.data["top_k"] is not None)
| (values.data["top_p"] is not None)
| (values.data["typical_p"] is not None)
)
if field_value > 1 and not sampling:
raise ValidationError("you must use sampling when `best_of` is > 1")
return field_value
@field_validator("repetition_penalty")
def valid_repetition_penalty(cls, v):
if v is not None and v <= 0:
raise ValidationError("`repetition_penalty` must be strictly positive")
return v
@field_validator("frequency_penalty")
def valid_frequency_penalty(cls, v):
if v is not None and v <= 0:
raise ValidationError("`frequency_penalty` must be strictly positive")
return v
@field_validator("seed")
def valid_seed(cls, v):
if v is not None and v < 0:
raise ValidationError("`seed` must be positive")
return v
@field_validator("temperature")
def valid_temp(cls, v):
if v is not None and v <= 0:
raise ValidationError("`temperature` must be strictly positive")
return v
@field_validator("top_k")
def valid_top_k(cls, v):
if v is not None and v <= 0:
raise ValidationError("`top_k` must be strictly positive")
return v
@field_validator("top_p")
def valid_top_p(cls, v):
if v is not None and (v <= 0 or v >= 1.0):
raise ValidationError("`top_p` must be > 0.0 and < 1.0")
return v
@field_validator("truncate")
def valid_truncate(cls, v):
if v is not None and v <= 0:
raise ValidationError("`truncate` must be strictly positive")
return v
@field_validator("typical_p")
def valid_typical_p(cls, v):
if v is not None and (v <= 0 or v >= 1.0):
raise ValidationError("`typical_p` must be > 0.0 and < 1.0")
return v
@field_validator("top_n_tokens")
def valid_top_n_tokens(cls, v):
if v is not None and v <= 0:
raise ValidationError("`top_n_tokens` must be strictly positive")
return v
@field_validator("grammar")
def valid_grammar(cls, v):
if v is not None:
if v.type == GrammarType.Regex and not v.value:
raise ValidationError("`value` cannot be empty for `regex` grammar")
if v.type == GrammarType.Json and not v.value:
raise ValidationError("`value` cannot be empty for `json` grammar")
return v
class Request(BaseModel):
# Prompt
inputs: str
# Generation parameters
parameters: Optional[Parameters] = None
# Whether to stream output tokens
stream: bool = False
@field_validator("inputs")
def valid_input(cls, v):
if not v:
raise ValidationError("`inputs` cannot be empty")
return v
@field_validator("stream")
def valid_best_of_stream(cls, field_value, values):
parameters = values.data["parameters"]
if (
parameters is not None
and parameters.best_of is not None
and parameters.best_of > 1
and field_value
):
raise ValidationError(
"`best_of` != 1 is not supported when `stream` == True"
)
return field_value
# Decoder input tokens
class InputToken(BaseModel):
# Token ID from the model tokenizer
id: int
# Token text
text: str
# Logprob
# Optional since the logprob of the first token cannot be computed
logprob: Optional[float] = None
# Generated tokens
class Token(BaseModel):
# Token ID from the model tokenizer
id: int
# Token text
text: str
# Logprob
logprob: Optional[float] = None
# Is the token a special token
# Can be used to ignore tokens when concatenating
special: bool
# Generation finish reason
class FinishReason(str, Enum):
# number of generated tokens == `max_new_tokens`
Length = "length"
# the model generated its end of sequence token
EndOfSequenceToken = "eos_token"
# the model generated a text included in `stop_sequences`
StopSequence = "stop_sequence"
# Additional sequences when using the `best_of` parameter
class BestOfSequence(BaseModel):
# Generated text
generated_text: str
# Generation finish reason
finish_reason: FinishReason
# Number of generated tokens
generated_tokens: int
# Sampling seed if sampling was activated
seed: Optional[int] = None
# Decoder input tokens, empty if decoder_input_details is False
prefill: List[InputToken]
# Generated tokens
tokens: List[Token]
# Most likely tokens
top_tokens: Optional[List[List[Token]]] = None
# `generate` details
class Details(BaseModel):
# Generation finish reason
finish_reason: FinishReason
# Number of generated tokens
generated_tokens: int
# Sampling seed if sampling was activated
seed: Optional[int] = None
# Decoder input tokens, empty if decoder_input_details is False
prefill: List[InputToken]
# Generated tokens
tokens: List[Token]
# Most likely tokens
top_tokens: Optional[List[List[Token]]] = None
# Additional sequences when using the `best_of` parameter
best_of_sequences: Optional[List[BestOfSequence]] = None
# `generate` return value
class Response(BaseModel):
# Generated text
generated_text: str
# Generation details
details: Details
# `generate_stream` details
class StreamDetails(BaseModel):
# Generation finish reason
finish_reason: FinishReason
# Number of generated tokens
generated_tokens: int
# Sampling seed if sampling was activated
seed: Optional[int] = None
# `generate_stream` return value
class StreamResponse(BaseModel):
# Generated token
token: Token
# Most likely tokens
top_tokens: Optional[List[Token]] = None
# Complete generated text
# Only available when the generation is finished
generated_text: Optional[str] = None
# Generation details
# Only available when the generation is finished
details: Optional[StreamDetails] = None
# Inference API currently deployed model
class DeployedModel(BaseModel):
# Disable warning for use of `model_` prefix in `model_id`. Be mindful about adding members
# with model_ prefixes, since this disables guardrails for colliding fields:
# https://github.com/pydantic/pydantic/issues/9177
model_config = ConfigDict(protected_namespaces=())
model_id: str
sha: str
| text-generation-inference/clients/python/text_generation/types.py/0 | {
"file_path": "text-generation-inference/clients/python/text_generation/types.py",
"repo_id": "text-generation-inference",
"token_count": 5255
} |
# Guidance
Text Generation Inference (TGI) now supports [JSON and regex grammars](#grammar-and-constraints) and [tools and functions](#tools-and-functions) to help developers guide LLM responses to fit their needs.
These feature are available starting from version `1.4.3`. They are accessible via the [`huggingface_hub`](https://pypi.org/project/huggingface-hub/) library. The tool support is compatible with OpenAI's client libraries. The following guide will walk you through the new features and how to use them!
_note: guidance is supported as grammar in the `/generate` endpoint and as tools in the `v1/chat/completions` endpoint._
## How it works
TGI leverages the [outlines](https://github.com/outlines-dev/outlines) library to efficiently parse and compile the grammatical structures and tools specified by users. This integration transforms the defined grammars into an intermediate representation that acts as a framework to guide and constrain content generation, ensuring that outputs adhere to the specified grammatical rules.
If you are interested in the technical details on how outlines is used in TGI, you can check out the [conceptual guidance documentation](../conceptual/guidance).
## Table of Contents 📚
### Grammar and Constraints
- [The Grammar Parameter](#the-grammar-parameter): Shape your AI's responses with precision.
- [Constrain with Pydantic](#constrain-with-pydantic): Define a grammar using Pydantic models.
- [JSON Schema Integration](#json-schema-integration): Fine-grained control over your requests via JSON schema.
- [Using the client](#using-the-client): Use TGI's client libraries to shape the AI's responses.
### Tools and Functions
- [The Tools Parameter](#the-tools-parameter): Enhance the AI's capabilities with predefined functions.
- [Via the client](#text-generation-inference-client): Use TGI's client libraries to interact with the Messages API and Tool functions.
- [OpenAI integration](#openai-integration): Use OpenAI's client libraries to interact with TGI's Messages API and Tool functions.
## Grammar and Constraints 🛣️
### The Grammar Parameter
In TGI `1.4.3`, we've introduced the grammar parameter, which allows you to specify the format of the response you want from the LLM.
Using curl, you can make a request to TGI's Messages API with the grammar parameter. This is the most primitive way to interact with the API and using [Pydantic](#constrain-with-pydantic) is recommended for ease of use and readability.
```json
curl localhost:3000/generate \
-X POST \
-H 'Content-Type: application/json' \
-d '{
"inputs": "I saw a puppy a cat and a raccoon during my bike ride in the park",
"parameters": {
"repetition_penalty": 1.3,
"grammar": {
"type": "json",
"value": {
"properties": {
"location": {
"type": "string"
},
"activity": {
"type": "string"
},
"animals_seen": {
"type": "integer",
"minimum": 1,
"maximum": 5
},
"animals": {
"type": "array",
"items": {
"type": "string"
}
}
},
"required": ["location", "activity", "animals_seen", "animals"]
}
}
}
}'
// {"generated_text":"{ \n\n\"activity\": \"biking\",\n\"animals\": [\"puppy\",\"cat\",\"raccoon\"],\n\"animals_seen\": 3,\n\"location\": \"park\"\n}"}
```
### Hugging Face Hub Python Library
The Hugging Face Hub Python library provides a client that makes it easy to interact with the Messages API. Here's an example of how to use the client to send a request with a grammar parameter.
```python
from huggingface_hub import InferenceClient
client = InferenceClient("http://localhost:3000")
schema = {
"properties": {
"location": {"title": "Location", "type": "string"},
"activity": {"title": "Activity", "type": "string"},
"animals_seen": {
"maximum": 5,
"minimum": 1,
"title": "Animals Seen",
"type": "integer",
},
"animals": {"items": {"type": "string"}, "title": "Animals", "type": "array"},
},
"required": ["location", "activity", "animals_seen", "animals"],
"title": "Animals",
"type": "object",
}
user_input = "I saw a puppy a cat and a raccoon during my bike ride in the park"
resp = client.text_generation(
f"convert to JSON: 'f{user_input}'. please use the following schema: {schema}",
max_new_tokens=100,
seed=42,
grammar={"type": "json", "value": schema},
)
print(resp)
# { "activity": "bike ride", "animals": ["puppy", "cat", "raccoon"], "animals_seen": 3, "location": "park" }
```
A grammar can be defined using Pydantic models, JSON schemas, or regular expressions. The LLM will then generate a response that conforms to the specified grammar.
> Note: A grammar must compile to an intermediate representation to constrain the output. Grammar compilation is a computationally expensive and may take a few seconds to complete on the first request. Subsequent requests will use the cached grammar and will be much faster.
### Constrain with Pydantic
Using Pydantic models we can define a similar grammar as the previous example in a shorter and more readable way.
```python
from huggingface_hub import InferenceClient
from pydantic import BaseModel, conint
from typing import List
class Animals(BaseModel):
location: str
activity: str
animals_seen: conint(ge=1, le=5) # Constrained integer type
animals: List[str]
client = InferenceClient("http://localhost:3000")
user_input = "I saw a puppy a cat and a raccoon during my bike ride in the park"
resp = client.text_generation(
f"convert to JSON: 'f{user_input}'. please use the following schema: {Animals.schema()}",
max_new_tokens=100,
seed=42,
grammar={"type": "json", "value": Animals.schema()},
)
print(resp)
# { "activity": "bike ride", "animals": ["puppy", "cat", "raccoon"], "animals_seen": 3, "location": "park" }
```
defining a grammar as regular expressions
```python
from huggingface_hub import InferenceClient
client = InferenceClient("http://localhost:3000")
section_regex = "(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
regexp = f"HELLO\.{section_regex}\.WORLD\.{section_regex}"
# This is a more realistic example of an ip address regex
# regexp = f"{section_regex}\.{section_regex}\.{section_regex}\.{section_regex}"
resp = client.text_generation(
f"Whats Googles DNS? Please use the following regex: {regexp}",
seed=42,
grammar={
"type": "regex",
"value": regexp,
},
)
print(resp)
# HELLO.255.WORLD.255
```
## Tools and Functions 🛠️
### The Tools Parameter
In addition to the grammar parameter, we've also introduced a set of tools and functions to help you get the most out of the Messages API.
Tools are a set of user defined functions that can be used in tandem with the chat functionality to enhance the LLM's capabilities. Functions, similar to grammar are defined as JSON schema and can be passed as part of the parameters to the Messages API.
```json
curl localhost:3000/v1/chat/completions \
-X POST \
-H 'Content-Type: application/json' \
-d '{
"model": "tgi",
"messages": [
{
"role": "user",
"content": "What is the weather like in New York?"
}
],
"tools": [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"format": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The temperature unit to use. Infer this from the users location."
}
},
"required": ["location", "format"]
}
}
}
],
"tool_choice": "get_current_weather"
}'
// {"id":"","object":"text_completion","created":1709051640,"model":"HuggingFaceH4/zephyr-7b-beta","system_fingerprint":"1.4.3-native","choices":[{"index":0,"message":{"role":"assistant","tool_calls":{"id":0,"type":"function","function":{"description":null,"name":"tools","parameters":{"format":"celsius","location":"New York"}}}},"logprobs":null,"finish_reason":"eos_token"}],"usage":{"prompt_tokens":157,"completion_tokens":19,"total_tokens":176}}
```
### Chat Completion with Tools
Grammars are supported in the `/generate` endpoint, while tools are supported in the `/chat/completions` endpoint. Here's an example of how to use the client to send a request with a tool parameter.
```python
from huggingface_hub import InferenceClient
client = InferenceClient("http://localhost:3000")
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"format": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The temperature unit to use. Infer this from the users location.",
},
},
"required": ["location", "format"],
},
},
},
{
"type": "function",
"function": {
"name": "get_n_day_weather_forecast",
"description": "Get an N-day weather forecast",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"format": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The temperature unit to use. Infer this from the users location.",
},
"num_days": {
"type": "integer",
"description": "The number of days to forecast",
},
},
"required": ["location", "format", "num_days"],
},
},
},
]
chat = client.chat_completion(
messages=[
{
"role": "system",
"content": "You're a helpful assistant! Answer the users question best you can.",
},
{
"role": "user",
"content": "What is the weather like in Brooklyn, New York?",
},
],
tools=tools,
seed=42,
max_tokens=100,
)
print(chat.choices[0].message.tool_calls)
# [ChatCompletionOutputToolCall(function=ChatCompletionOutputFunctionDefinition(arguments={'format': 'fahrenheit', 'location': 'Brooklyn, New York', 'num_days': 7}, name='get_n_day_weather_forecast', description=None), id=0, type='function')]
```
### OpenAI integration
TGI exposes an OpenAI-compatible API, which means you can use OpenAI's client libraries to interact with TGI's Messages API and Tool functions.
```python
from openai import OpenAI
# Initialize the client, pointing it to one of the available models
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="_",
)
# NOTE: tools defined above and removed for brevity
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{
"role": "system",
"content": "Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.",
},
{
"role": "user",
"content": "What's the weather like the next 3 days in San Francisco, CA?",
},
],
tools=tools,
tool_choice="auto", # tool selected by model
max_tokens=500,
)
called = chat_completion.choices[0].message.tool_calls
print(called)
# {
# "id": 0,
# "type": "function",
# "function": {
# "description": None,
# "name": "tools",
# "parameters": {
# "format": "celsius",
# "location": "San Francisco, CA",
# "num_days": 3,
# },
# },
# }
```
### Tool Choice Configuration
When configuring how the model interacts with tools during a chat completion, there are several options for determining if or how a tool should be called. These options are controlled by the `tool_choice` parameter, which specifies the behavior of the model in relation to tool usage. The following modes are supported:
1. **`auto`**:
- The model decides whether to call a tool or generate a response message based on the user's input.
- If tools are provided, this is the default mode.
- Example usage:
```python
tool_choice="auto"
```
2. **`none`**:
- The model will never call any tools and will only generate a response message.
- If no tools are provided, this is the default mode.
- Example usage:
```python
tool_choice="none"
```
3. **`required`**:
- The model must call one or more tools and will not generate a response message on its own.
- Example usage:
```python
tool_choice="required"
```
4. **Specific Tool Call by Function Name**:
- You can force the model to call a specific tool either by specifying the tool function directly or by using an object definition.
- Two ways to do this:
1. Provide the function name as a string:
```python
tool_choice="get_current_weather"
```
2. Use the function object format:
```python
tool_choice={
"type": "function",
"function": {
"name": "get_current_weather"
}
}
```
These options allow flexibility when integrating tools with the chat completions endpoint. You can configure the model to either rely on tools automatically or force it to follow a predefined behavior, based on the needs of the task at hand.
---
| **Tool Choice Option** | **Description** | **When to Use** |
| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- |
| `auto` | The model decides whether to call a tool or generate a message. This is the default if tools are provided. | Use when you want the model to decide when a tool is necessary. |
| `none` | The model generates a message without calling any tools. This is the default if no tools are provided. | Use when you do not want the model to call any tools. |
| `required` | The model must call one or more tools and will not generate a message on its own. | Use when a tool call is mandatory, and you do not want a regular message generated. |
| Specific Tool Call (`name` or object) | Force the model to call a specific tool either by specifying its name (`tool_choice="get_current_weather"`) or using an object. | Use when you want to restrict the model to calling a particular tool for the response. |
| text-generation-inference/docs/source/basic_tutorials/using_guidance.md/0 | {
"file_path": "text-generation-inference/docs/source/basic_tutorials/using_guidance.md",
"repo_id": "text-generation-inference",
"token_count": 6730
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 5267,
"logprob": -1.1464844,
"special": false,
"text": "?\n"
},
{
"id": 33464,
"logprob": -0.83203125,
"special": false,
"text": "Deep"
},
{
"id": 20909,
"logprob": -0.5625,
"special": false,
"text": " Learning"
},
{
"id": 320,
"logprob": -2.1464844,
"special": false,
"text": " ("
},
{
"id": 16524,
"logprob": 0.0,
"special": false,
"text": "DL"
},
{
"id": 701,
"logprob": -2.2089844,
"special": false,
"text": "),"
},
{
"id": 476,
"logprob": -0.27368164,
"special": false,
"text": " or"
},
{
"id": 20443,
"logprob": -0.09442139,
"special": false,
"text": " artificial"
},
{
"id": 29728,
"logprob": 0.0,
"special": false,
"text": " neural"
},
{
"id": 14155,
"logprob": 0.0,
"special": false,
"text": " networks"
}
],
"top_tokens": null
},
"generated_text": "What is deep learning?\nDeep Learning (DL), or artificial neural networks"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_w8a8_int_dynamic_weight/test_compressed_tensors_w8a8_int_dynamic_weight_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_w8a8_int_dynamic_weight/test_compressed_tensors_w8a8_int_dynamic_weight_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 862
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 198,
"logprob": -0.68603516,
"special": false,
"text": "\n"
},
{
"id": 198,
"logprob": -0.005393982,
"special": false,
"text": "\n"
},
{
"id": 29744,
"logprob": -0.31079102,
"special": false,
"text": "Deep"
},
{
"id": 4673,
"logprob": -0.08300781,
"special": false,
"text": " learning"
},
{
"id": 318,
"logprob": -0.58984375,
"special": false,
"text": " is"
},
{
"id": 257,
"logprob": -0.953125,
"special": false,
"text": " a"
},
{
"id": 649,
"logprob": -2.0957031,
"special": false,
"text": " new"
},
{
"id": 2214,
"logprob": -1.8095703,
"special": false,
"text": " field"
},
{
"id": 286,
"logprob": -1.0673828,
"special": false,
"text": " of"
},
{
"id": 2267,
"logprob": -0.9375,
"special": false,
"text": " research"
}
],
"top_tokens": null
},
"generated_text": "\n\nDeep learning is a new field of research"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_gpt2/test_flash_gpt2.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gpt2/test_flash_gpt2.json",
"repo_id": "text-generation-inference",
"token_count": 866
} |
{
"choices": [
{
"delta": {
"content": "",
"role": "assistant",
"tool_calls": null
},
"finish_reason": "stop",
"index": 0,
"logprobs": null
}
],
"created": 1737646031,
"id": "",
"model": "Qwen/Qwen2-VL-7B-Instruct",
"object": "chat.completion.chunk",
"system_fingerprint": "3.0.2-dev0-native",
"usage": null
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2_vl/test_flash_qwen2_vl_simple_streaming.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2_vl/test_flash_qwen2_vl_simple_streaming.json",
"repo_id": "text-generation-inference",
"token_count": 201
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 30,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 6377,
"logprob": -0.14916992,
"special": false,
"text": "{\""
},
{
"id": 29888,
"logprob": -0.13598633,
"special": false,
"text": "f"
},
{
"id": 12935,
"logprob": -0.017669678,
"special": false,
"text": "irs"
},
{
"id": 29873,
"logprob": -0.00085639954,
"special": false,
"text": "t"
},
{
"id": 1170,
"logprob": -0.0054016113,
"special": false,
"text": "Name"
},
{
"id": 4710,
"logprob": -0.13549805,
"special": false,
"text": "\":\""
},
{
"id": 19504,
"logprob": -0.8852539,
"special": false,
"text": "David"
},
{
"id": 3284,
"logprob": -0.16394043,
"special": false,
"text": "\",\""
},
{
"id": 29882,
"logprob": -0.08862305,
"special": false,
"text": "h"
},
{
"id": 711,
"logprob": -0.66259766,
"special": false,
"text": "ob"
},
{
"id": 1609,
"logprob": -5.51939e-05,
"special": false,
"text": "by"
},
{
"id": 4710,
"logprob": -0.23120117,
"special": false,
"text": "\":\""
},
{
"id": 29911,
"logprob": -2.3730469,
"special": false,
"text": "T"
},
{
"id": 11003,
"logprob": -0.032104492,
"special": false,
"text": "rees"
},
{
"id": 3284,
"logprob": -0.22021484,
"special": false,
"text": "\",\""
},
{
"id": 4230,
"logprob": -0.06726074,
"special": false,
"text": "last"
},
{
"id": 1170,
"logprob": -0.003501892,
"special": false,
"text": "Name"
},
{
"id": 4710,
"logprob": -0.0045661926,
"special": false,
"text": "\":\""
},
{
"id": 29950,
"logprob": -0.12512207,
"special": false,
"text": "H"
},
{
"id": 14339,
"logprob": -0.009552002,
"special": false,
"text": "olt"
},
{
"id": 29920,
"logprob": -0.00042438507,
"special": false,
"text": "z"
},
{
"id": 3284,
"logprob": -0.11651611,
"special": false,
"text": "\",\""
},
{
"id": 29876,
"logprob": -0.29736328,
"special": false,
"text": "n"
},
{
"id": 398,
"logprob": -0.003030777,
"special": false,
"text": "um"
},
{
"id": 29907,
"logprob": -0.3774414,
"special": false,
"text": "C"
},
{
"id": 1446,
"logprob": -0.0003130436,
"special": false,
"text": "ats"
},
{
"id": 1115,
"logprob": -0.0021514893,
"special": false,
"text": "\":"
},
{
"id": 29906,
"logprob": -0.071899414,
"special": false,
"text": "2"
},
{
"id": 29913,
"logprob": -0.018997192,
"special": false,
"text": "}"
},
{
"id": 2,
"logprob": 0.0,
"special": true,
"text": "</s>"
}
],
"top_tokens": null
},
"generated_text": "{\"firstName\":\"David\",\"hobby\":\"Trees\",\"lastName\":\"Holtz\",\"numCats\":2}"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_grammar_llama/test_non_flash_llama_grammar_json.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_grammar_llama/test_non_flash_llama_grammar_json.json",
"repo_id": "text-generation-inference",
"token_count": 2413
} |
{
"details": {
"finish_reason": "length",
"generated_tokens": 40,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -0.31347656,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.27441406,
"special": false,
"text": "\n"
},
{
"id": 28737,
"logprob": -2.2285156,
"special": false,
"text": "I"
},
{
"id": 28809,
"logprob": -1.4677734,
"special": false,
"text": "’"
},
{
"id": 28719,
"logprob": -0.31762695,
"special": false,
"text": "m"
},
{
"id": 264,
"logprob": -1.6865234,
"special": false,
"text": " a"
},
{
"id": 1215,
"logprob": -3.2695312,
"special": false,
"text": " very"
},
{
"id": 20640,
"logprob": -3.1230469,
"special": false,
"text": " passionate"
},
{
"id": 1338,
"logprob": -0.48339844,
"special": false,
"text": " person"
},
{
"id": 28723,
"logprob": -0.9970703,
"special": false,
"text": "."
},
{
"id": 315,
"logprob": -0.5498047,
"special": false,
"text": " I"
},
{
"id": 28809,
"logprob": -1.1923828,
"special": false,
"text": "’"
},
{
"id": 28719,
"logprob": -0.080444336,
"special": false,
"text": "m"
},
{
"id": 1215,
"logprob": -1.8271484,
"special": false,
"text": " very"
},
{
"id": 12215,
"logprob": -2.8847656,
"special": false,
"text": " driven"
},
{
"id": 28723,
"logprob": -1.0927734,
"special": false,
"text": "."
},
{
"id": 315,
"logprob": -0.4584961,
"special": false,
"text": " I"
},
{
"id": 28809,
"logprob": -0.5019531,
"special": false,
"text": "’"
},
{
"id": 28719,
"logprob": -0.030715942,
"special": false,
"text": "m"
},
{
"id": 1215,
"logprob": -0.96972656,
"special": false,
"text": " very"
},
{
"id": 7798,
"logprob": -2.8847656,
"special": false,
"text": " determined"
},
{
"id": 28723,
"logprob": -0.27319336,
"special": false,
"text": "."
},
{
"id": 13,
"logprob": -0.56396484,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.011016846,
"special": false,
"text": "\n"
},
{
"id": 3195,
"logprob": -0.7163086,
"special": false,
"text": "What"
},
{
"id": 349,
"logprob": -1.1611328,
"special": false,
"text": " is"
},
{
"id": 574,
"logprob": -0.515625,
"special": false,
"text": " your"
},
{
"id": 6656,
"logprob": -1.0253906,
"special": false,
"text": " favorite"
},
{
"id": 1970,
"logprob": -2.1738281,
"special": false,
"text": " thing"
},
{
"id": 684,
"logprob": -0.48364258,
"special": false,
"text": " about"
},
{
"id": 1250,
"logprob": -1.8876953,
"special": false,
"text": " being"
},
{
"id": 264,
"logprob": -0.41967773,
"special": false,
"text": " a"
},
{
"id": 8626,
"logprob": -2.9160156,
"special": false,
"text": " teacher"
},
{
"id": 28804,
"logprob": -0.11920166,
"special": false,
"text": "?"
},
{
"id": 13,
"logprob": -0.023727417,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.010848999,
"special": false,
"text": "\n"
},
{
"id": 28737,
"logprob": -1.0566406,
"special": false,
"text": "I"
},
{
"id": 2016,
"logprob": -0.7163086,
"special": false,
"text": " love"
},
{
"id": 272,
"logprob": -1.9169922,
"special": false,
"text": " the"
},
{
"id": 1639,
"logprob": -2.03125,
"special": false,
"text": " fact"
}
]
},
"generated_text": "\n\nI’m a very passionate person. I’m very driven. I’m very determined.\n\nWhat is your favorite thing about being a teacher?\n\nI love the fact"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_lora_mistral/test_lora_mistral_without_customer_support_adapter.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_lora_mistral/test_lora_mistral_without_customer_support_adapter.json",
"repo_id": "text-generation-inference",
"token_count": 3126
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 29899,
"logprob": -1.4980469,
"special": false,
"text": "-"
},
{
"id": 1454,
"logprob": -0.19433594,
"special": false,
"text": "for"
},
{
"id": 29899,
"logprob": 0.0,
"special": false,
"text": "-"
},
{
"id": 9342,
"logprob": 0.0,
"special": false,
"text": "comment"
},
{
"id": 29901,
"logprob": 0.0,
"special": false,
"text": ":"
},
{
"id": 396,
"logprob": -0.27392578,
"special": false,
"text": " #"
},
{
"id": 29906,
"logprob": -0.49389648,
"special": false,
"text": "2"
},
{
"id": 29900,
"logprob": -0.81103516,
"special": false,
"text": "0"
},
{
"id": 29896,
"logprob": 0.0,
"special": false,
"text": "1"
},
{
"id": 29955,
"logprob": -1.0800781,
"special": false,
"text": "7"
}
],
"top_tokens": null
},
"generated_text": "Test request-for-comment: #2017"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_server_gptq_quantized/test_server_gptq_quantized_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_server_gptq_quantized/test_server_gptq_quantized_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 853
} |
import pytest
@pytest.fixture(scope="module")
def bloom_560m_sharded_handle(launcher):
with launcher("bigscience/bloom-560m", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def bloom_560m_sharded(bloom_560m_sharded_handle):
await bloom_560m_sharded_handle.health(240)
return bloom_560m_sharded_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_bloom_560m_sharded(bloom_560m_sharded, response_snapshot):
response = await bloom_560m_sharded.generate(
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
top_p=0.9,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_bloom_560m_sharded_load(
bloom_560m_sharded, generate_load, response_snapshot
):
responses = await generate_load(
bloom_560m_sharded,
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py",
"repo_id": "text-generation-inference",
"token_count": 527
} |
import pytest
@pytest.fixture(scope="module")
def flash_gpt2_handle(launcher):
with launcher("openai-community/gpt2", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_gpt2(flash_gpt2_handle):
await flash_gpt2_handle.health(300)
return flash_gpt2_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_gpt2(flash_gpt2, response_snapshot):
response = await flash_gpt2.generate(
"What is deep learning?",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_gpt2_load(flash_gpt2, generate_load, response_snapshot):
responses = await generate_load(
flash_gpt2,
"What is deep learning?",
max_new_tokens=10,
n=4,
)
generated_texts = [r.generated_text for r in responses]
assert len(generated_texts) == 4
assert all(
[text == generated_texts[0] for text in generated_texts]
), generated_texts
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_gpt2.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_gpt2.py",
"repo_id": "text-generation-inference",
"token_count": 476
} |
import pytest
@pytest.fixture(scope="module")
def flash_neox_handle(launcher):
with launcher("stabilityai/stablelm-tuned-alpha-3b", num_shard=1) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_neox(flash_neox_handle):
await flash_neox_handle.health(300)
return flash_neox_handle.client
@pytest.mark.release
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_neox(flash_neox, response_snapshot):
response = await flash_neox.generate(
"<|USER|>What's your mood today?<|ASSISTANT|>",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_neox_load(flash_neox, generate_load, response_snapshot):
responses = await generate_load(
flash_neox,
"<|USER|>What's your mood today?<|ASSISTANT|>",
max_new_tokens=10,
n=4,
)
generated_texts = [r.generated_text for r in responses]
assert len(generated_texts) == 4
assert all(
[text == generated_texts[0] for text in generated_texts]
), generated_texts
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_neox.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_neox.py",
"repo_id": "text-generation-inference",
"token_count": 514
} |
import pytest
@pytest.fixture(scope="module")
def flash_idefics2_next_handle(launcher):
with launcher(
"HuggingFaceM4/idefics2-8b",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_idefics2_next(flash_idefics2_next_handle):
await flash_idefics2_next_handle.health(300)
return flash_idefics2_next_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_idefics2_next_simple(
flash_idefics2_next, response_snapshot, chicken
):
response = await flash_idefics2_next.generate(
f"User:Write me a short story<end_of_utterance> \nAssistant:",
max_new_tokens=10,
)
assert (
response.generated_text == " A chicken is sitting on a pile of money."
), f"{repr(response.generated_text)}"
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_idefics2_two_images(
flash_idefics2_next, response_snapshot, chicken, cow_beach
):
response = await flash_idefics2_next.generate(
f"User:Where are the cow and chicken?<end_of_utterance> \nAssistant:",
max_new_tokens=20,
)
assert (
response.generated_text
== " The cow is standing on the beach and the chicken is sitting on a pile of money."
), f"{repr(response.generated_text)}"
assert response.details.generated_tokens == 19
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_idefics2_next_all_params(flash_idefics2_next, response_snapshot):
response = await flash_idefics2_next.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_idefics2_next_load(
flash_idefics2_next, generate_load, response_snapshot, chicken
):
responses = await generate_load(
flash_idefics2_next,
f"User:Write me a short story<end_of_utterance> \nAssistant:",
max_new_tokens=10,
n=4,
)
generated_texts = [r.generated_text for r in responses]
assert generated_texts[0] == " A chicken is sitting on a pile of money."
assert len(generated_texts) == 4
assert all([r.generated_text == generated_texts[0] for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_idefics2.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_idefics2.py",
"repo_id": "text-generation-inference",
"token_count": 1159
} |
[tool.poetry]
name = "text-generation-inference-benchmarks"
version = "0.1.0"
description = ""
authors = ["Hugo Larcher <[email protected]>"]
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.11"
docker = "^7.1.0"
loguru = "^0.7.2"
psutil = "^6.0.0"
gputil = "^1.4.0"
pandas = "^2.2.3"
pyarrow = "^17.0.0"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
| text-generation-inference/load_tests/pyproject.toml/0 | {
"file_path": "text-generation-inference/load_tests/pyproject.toml",
"repo_id": "text-generation-inference",
"token_count": 195
} |
use crate::infer::Infer;
use crate::{
default_parameters,
server::{generate_internal, ComputeType},
Deserialize, ErrorResponse, GenerateParameters, GenerateRequest, Serialize, ToSchema,
};
use axum::extract::{Extension, Path};
use axum::http::{HeaderMap, StatusCode};
use axum::response::IntoResponse;
use axum::Json;
use futures::stream::FuturesUnordered;
use futures::TryStreamExt;
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct OutputChunk {
pub name: String,
pub shape: Vec<usize>,
pub datatype: String,
pub data: Vec<u8>,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct InferenceOutput {
pub id: String,
pub outputs: Vec<OutputChunk>,
}
#[derive(Debug, Deserialize, ToSchema)]
pub(crate) struct InferenceRequest {
pub id: String,
#[serde(default = "default_parameters")]
pub parameters: GenerateParameters,
pub inputs: Vec<Input>,
pub outputs: Vec<Output>,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub(crate) struct Input {
pub name: String,
pub shape: Vec<usize>,
pub datatype: String,
pub data: Vec<u8>,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub(crate) struct Output {
pub name: String,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct LiveResponse {
pub live: bool,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct ReadyResponse {
pub live: bool,
}
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct MetadataServerResponse {
pub name: String,
pub version: String,
pub extensions: Vec<String>,
}
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/v2/health/live",
responses(
(status = 200, description = "Service is live", body = LiveReponse),
(status = 404, description = "Service not found", body = ErrorResponse,
example = json!({"error": "No response"}))
)
)]
pub async fn kserve_health_live() -> Json<LiveResponse> {
let data = LiveResponse { live: true };
Json(data)
}
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/v2/health/ready",
responses(
(status = 200, description = "Service is ready", body = ReadyResponse),
(status = 404, description = "Service not found", body = ErrorResponse,
example = json!({"error": "No response"}))
)
)]
pub async fn kserve_health_ready() -> Json<ReadyResponse> {
let data = ReadyResponse { live: true };
Json(data)
}
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/v2",
responses(
(status = 200, description = "Metadata retrieved", body = MetadataServerResponse),
(status = 404, description = "Service not found", body = ErrorResponse,
example = json!({"error": "No response"}))
)
)]
pub async fn kerve_server_metadata() -> Json<MetadataServerResponse> {
let data = MetadataServerResponse {
name: "text-generation-inference".to_string(),
version: env!("CARGO_PKG_VERSION").to_string(),
extensions: vec![
"health".to_string(),
"models".to_string(),
"metrics".to_string(),
],
};
Json(data)
}
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/v2/models/{model_name}/versions/{model_version}",
responses(
(status = 200, description = "Model version metadata retrieved", body = MetadataServerResponse),
(status = 404, description = "Model or version not found", body = ErrorResponse,
example = json!({"error": "No response"}))
)
)]
pub async fn kserve_model_metadata(
Path((model_name, model_version)): Path<(String, String)>,
) -> Json<MetadataServerResponse> {
let data = MetadataServerResponse {
name: model_name,
version: model_version,
extensions: vec!["infer".to_string(), "ready".to_string()],
};
Json(data)
}
#[utoipa::path(
get,
tag = "Text Generation Inference",
path = "/v2/models/{model_name}/versions/{model_version}/ready",
responses(
(status = 200, description = "Model version is ready", body = ReadyResponse),
(status = 404, description = "Model or version not found", body = ErrorResponse,
example = json!({"error": "No response"}))
)
)]
pub async fn kserve_model_metadata_ready(
Path((_model_name, _model_version)): Path<(String, String)>,
) -> Json<ReadyResponse> {
let data = ReadyResponse { live: true };
Json(data)
}
#[utoipa::path(
post,
tag = "Text Generation Inference",
path = "/v2/models/{model_name}/versions/{model_version}/infer",
request_body = Json<InferenceRequest>,
responses(
(status = 200, description = "Inference executed successfully", body = InferenceOutput),
(status = 404, description = "Model or version not found", body = ErrorResponse,
example = json!({"error": "No response"}))
)
)]
pub async fn kserve_model_infer(
infer: Extension<Infer>,
Extension(compute_type): Extension<ComputeType>,
Json(payload): Json<InferenceRequest>,
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
let id = payload.id.clone();
let str_inputs = payload
.inputs
.iter()
.map(|input| {
std::str::from_utf8(&input.data).map_err(|e| {
(
StatusCode::UNPROCESSABLE_ENTITY,
Json(ErrorResponse {
error: e.to_string(),
error_type: "utf8".to_string(),
}),
)
})
})
.collect::<Result<Vec<_>, _>>()?;
if str_inputs.len() != payload.outputs.len() {
return Err((
StatusCode::UNPROCESSABLE_ENTITY,
Json(ErrorResponse {
error: "Inputs and outputs length mismatch".to_string(),
error_type: "length mismatch".to_string(),
}),
));
}
let output_chunks = str_inputs
.iter()
.zip(&payload.outputs)
.map(|(str_input, output)| {
let generate_request = GenerateRequest {
inputs: str_input.to_string(),
parameters: payload.parameters.clone(),
add_special_tokens: true,
};
let infer = infer.clone();
let compute_type = compute_type.clone();
let span = tracing::Span::current();
async move {
generate_internal(infer, compute_type, Json(generate_request), span)
.await
.map(|(_, _, Json(generation))| {
let generation_as_bytes = generation.generated_text.as_bytes().to_vec();
OutputChunk {
name: output.name.clone(),
shape: vec![1, generation_as_bytes.len()],
datatype: "BYTES".to_string(),
data: generation_as_bytes,
}
})
.map_err(|_| {
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: "Incomplete generation".into(),
error_type: "Incomplete generation".into(),
}),
)
})
}
})
.collect::<FuturesUnordered<_>>()
.try_collect::<Vec<_>>()
.await?;
let inference_output = InferenceOutput {
id: id.clone(),
outputs: output_chunks,
};
Ok((HeaderMap::new(), Json(inference_output)))
}
| text-generation-inference/router/src/kserve.rs/0 | {
"file_path": "text-generation-inference/router/src/kserve.rs",
"repo_id": "text-generation-inference",
"token_count": 3533
} |
flash_att_v2_commit_cuda := v2.6.1
flash_att_v2_commit_rocm := 47bd46e0204a95762ae48712fd1a3978827c77fd
build-flash-attention-v2-cuda:
pip install -U packaging wheel
pip install flash-attn==$(flash_att_v2_commit_cuda)
install-flash-attention-v2-cuda: build-flash-attention-v2-cuda
echo "Flash v2 installed"
build-flash-attention-v2-rocm:
if [ ! -d 'flash-attention-v2' ]; then \
pip install -U packaging ninja --no-cache-dir && \
git clone https://github.com/mht-sharma/flash-attention.git flash-attention-v2 && \
cd flash-attention-v2 && git fetch && git checkout $(flash_att_v2_commit_rocm) && \
git submodule update --init --recursive && GPU_ARCHS="gfx90a;gfx942" PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py build; \
fi
install-flash-attention-v2-rocm: build-flash-attention-v2-rocm
cd flash-attention-v2 && \
GPU_ARCHS="gfx90a;gfx942" PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py install
| text-generation-inference/server/Makefile-flash-att-v2/0 | {
"file_path": "text-generation-inference/server/Makefile-flash-att-v2",
"repo_id": "text-generation-inference",
"token_count": 397
} |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#ifndef _q4_matmul_cuh
#define _q4_matmul_cuh
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
#include <cstdio>
#include <ATen/cuda/CUDAContext.h>
#include "q4_matrix.cuh"
#include "../tuning.h"
void q4_matmul_cuda
(
ExLlamaTuning* tuningParams,
const half* x,
const int x_height,
const Q4Matrix* w,
half* out,
bool no_zero,
cudaStream_t alt_stream
);
void q4_matmul_recons_cuda
(
ExLlamaTuning* tuningParams,
const half* x,
const int x_height,
Q4Matrix* w,
half* out,
bool no_zero,
const cublasHandle_t handle
);
#endif
| text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh",
"repo_id": "text-generation-inference",
"token_count": 322
} |
#include "compat.cuh"
__forceinline__ __device__ half2 dot22_8(half2(&dq)[4], const half* a_ptr, const half2 g_result)
{
half2 result = {};
const half2* a2_ptr = (const half2*)a_ptr;
#pragma unroll
for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result);
return __hadd2(result, g_result);
}
__forceinline__ __device__ float dot22_8_f(half2(&dq)[4], const half* a_ptr)
{
half2 result = {};
const half2* a2_ptr = (const half2*)a_ptr;
#pragma unroll
for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result);
return __half2float(__low2half(result)) + __half2float(__high2half(result));
}
__forceinline__ __device__ half2 dot22_8_h2(half2(&dq)[4], const half* a_ptr)
{
half2 result = {};
const half2* a2_ptr = (const half2*)a_ptr;
#pragma unroll
for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result);
return result;
}
typedef void (*fp_gemm_half_q_half_gptq_kernel)
(
const half*,
const uint32_t*,
const uint32_t*,
const half*,
half*,
const int,
const int,
const int,
const int,
const int,
const uint16_t*,
const int,
const bool,
const half*,
const int
);
template <int m_count, bool use_r_weights, bool mul_r_weights>
__global__ void gemm_half_q_half_gptq_kernel
(
const half* __restrict__ a,
const uint32_t* __restrict__ b_q_weight,
const uint32_t* __restrict__ b_gptq_qzeros,
const half* __restrict__ b_gptq_scales,
half* __restrict__ c,
const int size_m,
const int size_n,
const int size_k,
const int groups,
const int groupsize,
const uint16_t* __restrict__ b_q_perm,
const int rows_4,
const bool clear,
const half* r_weights,
const int r_weights_stride
)
{
MatrixView_half a_(a, size_m, size_k);
MatrixView_half_rw c_(c, size_m, size_n);
MatrixView_q4_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n);
MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n);
int t = threadIdx.x;
// Block
int offset_n = blockIdx.x * GPTQ_BLOCK_KN_SIZE * 4;
int offset_m = blockIdx.y * m_count;
int offset_k = blockIdx.z * GPTQ_BLOCK_KN_SIZE;
int end_n = min(offset_n + GPTQ_BLOCK_KN_SIZE * 4, size_n);
int end_m = min(offset_m + m_count, size_m);
int end_k = min(offset_k + GPTQ_BLOCK_KN_SIZE, size_k);
int n = offset_n + t * 4;
// Read weights
half_uint16 weights[MAX_Q_GEMM_WEIGHTS];
if constexpr (use_r_weights)
{
uint16_t any_w = 0;
const half* w_ptr = r_weights;
for (int m = 0; m < m_count; ++m)
{
weights[m].as_half = *w_ptr;
w_ptr += r_weights_stride;
any_w |= weights[m].as_uint16;
}
if (!any_w) return; // Early exit if all weights are zero -- does not zero output (!!!)
}
// Preload block_a
__shared__ half block_a[m_count][GPTQ_BLOCK_KN_SIZE];
if (offset_k + t < end_k)
{
for (int m = 0; m < m_count; ++m)
{
const half* a_ptr = a_.item_ptr(offset_m + m, 0);
half* block_a_ptr = block_a[m];
half a0;
if (b_q_perm) a0 = a_ptr[b_q_perm[offset_k + t]];
else a0 = a_ptr[offset_k + t];
block_a_ptr[t] = a0;
}
}
// Zero output
if (n >= size_n) return;
if (clear && blockIdx.z == 0) // && (threadIdx.x & 1) == 0)
{
for (int m = 0; m < m_count; m++)
*((uint64_t*)c_.item_ptr(offset_m + m, n)) = 0;
}
__syncthreads();
// Find initial group
int group = offset_k / groupsize;
int nextgroup = offset_k + groupsize;
// a, b offset
int qk = offset_k / (32 / 4);
const uint32_t* b_ptr = b_q_weight + qk * size_n + n;
const half* a_ptr = &block_a[0][0];
int a_stride = GPTQ_BLOCK_KN_SIZE;
// Initial group
int zeros[4];
half2 scales[4];
half2 z1z16[4][2];
half2 y1y16[4][2];
b_gptq_qzeros_.item4(zeros, group, n);
b_gptq_scales_.item4_h2(scales, group, n);
dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]);
dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]);
dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]);
dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]);
// __syncthreads();
// Column result
half2 block_c[m_count][4] = {};
// Dequantize and multiply
int k = offset_k;
while (k < end_k)
{
if (k == nextgroup)
{
group++;
nextgroup += groupsize;
b_gptq_qzeros_.item4(zeros, group, n);
b_gptq_scales_.item4_h2(scales, group, n);
dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]);
dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]);
dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]);
dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]);
}
#pragma unroll
for (int j = 0; j < 4; j++)
{
const int4* b_ptr4 = (int4*) b_ptr;
int4 load_int4 = *b_ptr4;
half2 dq[4][4];
dequant_4bit_8_gptq(load_int4.x, dq[0], z1z16[0], y1y16[0], size_n, false);
dequant_4bit_8_gptq(load_int4.y, dq[1], z1z16[1], y1y16[1], size_n, false);
dequant_4bit_8_gptq(load_int4.z, dq[2], z1z16[2], y1y16[2], size_n, false);
dequant_4bit_8_gptq(load_int4.w, dq[3], z1z16[3], y1y16[3], size_n, false);
#pragma unroll
for (int m = 0; m < m_count; m++)
{
if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; }
block_c[m][0] = __hfma2(dot22_8_h2(dq[0], a_ptr + m * a_stride), scales[0], block_c[m][0]);
block_c[m][1] = __hfma2(dot22_8_h2(dq[1], a_ptr + m * a_stride), scales[1], block_c[m][1]);
block_c[m][2] = __hfma2(dot22_8_h2(dq[2], a_ptr + m * a_stride), scales[2], block_c[m][2]);
block_c[m][3] = __hfma2(dot22_8_h2(dq[3], a_ptr + m * a_stride), scales[3], block_c[m][3]);
}
b_ptr += size_n;
a_ptr += 8;
}
k += 32;
}
for (int m = 0; m < m_count; m++)
{
half2 *out = (half2*) c_.item_ptr(offset_m + m, n);
half result0 = __hadd(__low2half(block_c[m][0]), __high2half(block_c[m][0]));
half result1 = __hadd(__low2half(block_c[m][1]), __high2half(block_c[m][1]));
half result2 = __hadd(__low2half(block_c[m][2]), __high2half(block_c[m][2]));
half result3 = __hadd(__low2half(block_c[m][3]), __high2half(block_c[m][3]));
half2 result01 = __halves2half2(result0, result1);
half2 result23 = __halves2half2(result2, result3);
if constexpr (mul_r_weights)
{
half2 w_mul2 = __half2half2(weights[m].as_half);
result01 = __hmul2(result01, w_mul2);
result23 = __hmul2(result23, w_mul2);
}
atomicAdd(out , result01);
atomicAdd(out + 1, result23);
}
}
template <bool use_r_weights, bool mul_r_weights>
struct map_m_count_gptq {
static constexpr fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(int m_count)
{
#if GPTQ_BLOCK_M_SIZE_MAX >= 1
if (m_count == 1) return gemm_half_q_half_gptq_kernel<1, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 2
if (m_count == 2) return gemm_half_q_half_gptq_kernel<2, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 3
if (m_count == 3) return gemm_half_q_half_gptq_kernel<3, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 4
if (m_count == 4) return gemm_half_q_half_gptq_kernel<4, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 5
if (m_count == 5) return gemm_half_q_half_gptq_kernel<5, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 6
if (m_count == 6) return gemm_half_q_half_gptq_kernel<6, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 7
if (m_count == 7) return gemm_half_q_half_gptq_kernel<7, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 8
if (m_count == 8) return gemm_half_q_half_gptq_kernel<8, use_r_weights, mul_r_weights>;
#endif
return NULL;
}
};
fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(const int m_count, bool r_weights, bool mul_r_weights)
{
if (!r_weights && !mul_r_weights) return map_m_count_gptq<false, false>::pick_gemm_half_q_half_gptq_kernel(m_count);
if (!r_weights && mul_r_weights) return map_m_count_gptq<false, true>::pick_gemm_half_q_half_gptq_kernel(m_count);
if ( r_weights && !mul_r_weights) return map_m_count_gptq< true, false>::pick_gemm_half_q_half_gptq_kernel(m_count);
if ( r_weights && mul_r_weights) return map_m_count_gptq< true, true>::pick_gemm_half_q_half_gptq_kernel(m_count);
return NULL;
}
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh",
"repo_id": "text-generation-inference",
"token_count": 4839
} |
import os
from typing import Optional
import torch
from text_generation_server.layers.attention.kv_cache import KVCache, KVScales
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.layers.attention import Seqlen
from text_generation_server.utils.log import log_master
from text_generation_server.models.globals import (
ATTENTION,
BLOCK_SIZE,
)
from loguru import logger
import vllm._custom_ops as ops
major, minor = torch.cuda.get_device_capability()
is_sm75 = major == 7 and minor == 5
_PARTITION_SIZE_V1V2 = 1024
_PARTITION_SIZE_CUSTOM = 256
_GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName
_ON_MI250_MI300 = any(
arch in _GPU_ARCH for arch in ["gfx90a", "gfx940", "gfx941", "gfx942"]
)
use_triton = os.getenv("ROCM_USE_FLASH_ATTN_V2_TRITON", "").lower() in {"true", "1"}
ENGINE = "triton" if use_triton else "ck"
use_rocm_custom_paged_attn = os.getenv("ROCM_USE_CUSTOM_PAGED_ATTN", "1") != "0"
def _use_rocm_custom_paged_attention(
qtype: torch.dtype,
head_size: int,
block_size: int,
gqa_ratio: int,
max_seq_len: int,
) -> bool:
# rocm custom page attention not support on navi (gfx1*)
return (
use_rocm_custom_paged_attn
and _ON_MI250_MI300
and (qtype == torch.half or qtype == torch.bfloat16)
and (head_size == 64 or head_size == 128)
and (block_size == 16 or block_size == 32)
and (gqa_ratio >= 1 and gqa_ratio <= 16)
and max_seq_len <= 131072
)
def paged_attention(
query: torch.Tensor,
kv_cache: KVCache,
kv_head_mapping: torch.Tensor,
softmax_scale: float,
block_tables: torch.Tensor,
seqlen: Seqlen,
max_s: int,
*,
kv_scales: KVScales,
softcap: Optional[float] = None,
):
# Adapted from: https://github.com/vllm-project/vllm/blob/f8a1e39fae05ca610be8d5a78be9d40f5274e5fc/vllm/model_executor/layers/attention.py
# Copyright 2023 The vLLM team. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
if ATTENTION == "flashdecoding":
max_q = 1
max_k = max_s
import flash_attn_2_cuda
if softcap is None:
softcap = 0.0
out = flash_attn_2_cuda.varlen_fwd(
query,
kv_cache.key,
kv_cache.value,
None,
seqlen.cu_seqlen_q,
seqlen.cu_seqlen_k,
None, # pad_k
None,
block_tables,
None,
max_q,
max_k,
0.0, # dropout
softmax_scale,
False, # zero_tensors
True, # causal
-1, # Window_left
-1, # Window right
softcap,
False, # return softmax
None, # generator
)
return out[0]
if softcap is not None:
raise RuntimeError("Paged attention doesn't support softcapping")
# value_cache => [num_blocks, num_heads, head_size, block_size]
# block_size = kv_cache.value.shape[3]
block_size = BLOCK_SIZE
num_seqs, num_heads, head_size = query.shape
num_kv_heads = kv_cache.key.shape[1]
gqa_ratio = num_heads // num_kv_heads
use_custom = _use_rocm_custom_paged_attention(
query.dtype, head_size, block_size, gqa_ratio, max_s
)
if not use_custom:
_PARTITION_SIZE = _PARTITION_SIZE_V1V2
else:
_PARTITION_SIZE = _PARTITION_SIZE_CUSTOM
max_num_partitions = (max_s + _PARTITION_SIZE - 1) // _PARTITION_SIZE
input_lengths = seqlen.input_lengths + seqlen.cache_lengths
out = torch.empty_like(query)
if kv_cache.dtype == torch.float8_e4m3fn:
key = kv_cache.key.view(torch.uint8)
value = kv_cache.value.view(torch.uint8)
kv_cache_dtype = "fp8"
else:
key = kv_cache.key
value = kv_cache.value
kv_cache_dtype = "auto"
# NOTE(woosuk): We use a simple heuristic to decide whether to use
# PagedAttention V1 or V2. If the number of partitions is 1, we use
# V1 to avoid the overhead of reduction. Also, if the number of
# sequences or heads is large, we use V1 since there is enough work
# to parallelize.
use_v1 = (
max_s <= 8192
and (max_num_partitions == 1 or num_seqs * num_heads > 512)
and not use_custom
)
if use_v1:
ops.paged_attention_v1(
out,
query,
key,
value,
num_kv_heads,
softmax_scale,
block_tables,
input_lengths,
block_size,
max_s,
None,
kv_cache_dtype,
kv_scales.key_scale_cpu,
kv_scales.value_scale_cpu,
)
else:
# Run PagedAttention V2.
assert _PARTITION_SIZE % block_size == 0
tmp_output = torch.zeros(
size=(num_seqs, num_heads, max_num_partitions, head_size),
dtype=out.dtype,
device=out.device,
)
exp_sums = torch.zeros(
size=(num_seqs, num_heads, max_num_partitions),
dtype=torch.float32,
device=out.device,
)
max_logits = torch.zeros_like(exp_sums)
if not use_custom:
ops.paged_attention_v2(
out,
exp_sums,
max_logits,
tmp_output,
query,
key,
value,
num_kv_heads,
softmax_scale,
block_tables,
input_lengths,
block_size,
max_s,
None,
kv_cache_dtype,
kv_scales.key_scale_cpu,
kv_scales.value_scale_cpu,
)
else:
ops.paged_attention_rocm(
out,
exp_sums,
max_logits,
tmp_output,
query,
key,
value,
num_kv_heads,
softmax_scale,
block_tables,
input_lengths,
block_size,
max_s,
None,
kv_cache_dtype,
kv_scales.key_scale_cpu,
kv_scales.value_scale_cpu,
None,
_PARTITION_SIZE,
)
return out
if ENGINE != "triton":
try:
import flash_attn_2_cuda
log_master(
logger.info,
"ROCm: using Flash Attention 2 Composable Kernel implementation.",
)
except ImportError as e:
if major >= 8:
architecture_suffix = f"-{SYSTEM}"
raise ImportError(
"Flash Attention V2 is not installed.\n"
"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
f"or install flash attention v2 with `cd server && make install install-flash-attention-v2{architecture_suffix}`"
)
elif is_sm75:
raise ImportError(
"Flash Attention is not installed.\n"
"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
"or install flash attention with `cd server && make install install-flash-attention`"
) from e
else:
for idx in range(torch.cuda.device_count()):
name = torch.cuda.get_device_name(idx)
if "MI210" not in name and "MI250" not in name:
raise ImportError(
f"AMD GPU {torch.cuda.get_device_name(idx)} does not support flash-attention"
)
raise ImportError(
f"AMD GPU with ROCm capability {major} {minor} is not supported"
) from e
SUPPORTS_WINDOWING = False
def attention(
*,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
kv_cache: KVCache,
kv_scales: KVScales,
seqlen: Seqlen,
block_tables: torch.Tensor,
softmax_scale: float,
window_size_left: int = -1,
causal: bool = True,
softcap: Optional[float] = None,
):
if ENGINE == "ck":
if window_size_left <= 0 and window_size_left != -1:
raise ValueError("`window_size_left` must be > 0 or -1")
out = torch.empty_like(query)
if softcap is None:
softcap = 0.0
# We do not need to check window_size_left (not supported) here, so it is already checked ahead of time at model load.
return flash_attn_2_cuda.varlen_fwd(
query,
# flashdecoding: pass the KV caches, paged: pass the KV.
kv_cache.key if ATTENTION == "flashdecoding" else key,
kv_cache.value if ATTENTION == "flashdecoding" else value,
out,
seqlen.cu_seqlen_q,
seqlen.cu_seqlen_k,
None,
None,
block_tables if ATTENTION == "flashdecoding" else None,
None,
seqlen.max_q,
seqlen.max_k,
0.0,
softmax_scale,
False,
causal,
window_size_left,
0,
softcap,
False,
None,
)[0]
elif ENGINE == "triton":
from .flash_attn_triton import triton_attention
if softcap is not None:
raise NotImplementedError("softcap is only available with CK flash attn")
out = torch.empty_like(query)
# We do not need to check window_size_left (not supported) here, so it is already checked ahead of time at model load.
output, _ = triton_attention(
query,
key,
value,
out,
seqlen.cu_seqlen_q,
seqlen.cu_seqlen_q,
seqlen.max_q,
seqlen.max_k,
causal,
softmax_scale,
)
return output
else:
raise RuntimeError(f"Unknown attention engine {ENGINE}")
__all__ = [
"SUPPORTS_WINDOWING",
"attention",
"paged_attention",
]
| text-generation-inference/server/text_generation_server/layers/attention/rocm.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/attention/rocm.py",
"repo_id": "text-generation-inference",
"token_count": 5501
} |
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import torch
from loguru import logger
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.utils.log import log_once
from text_generation_server.utils.weights import Weight, Weights, WeightsLoader
if SYSTEM == "ipex":
from .ipex import QuantLinear
elif SYSTEM in {"cuda", "rocm"}:
from .triton import QuantLinear
@dataclass
class GPTQWeight(Weight):
qweight: torch.Tensor
qzeros: torch.Tensor
scales: torch.Tensor
g_idx: Optional[torch.Tensor]
bits: int
groupsize: int
use_awq_kernel: bool
use_exllama: bool
def __post_init__(self):
if self.scales.dtype == torch.float:
self.scales = self.scales.half()
@property
def device(self) -> torch.device:
return self.qweight.device
def get_linear(self, bias: torch.Tensor):
if self.use_awq_kernel:
if SYSTEM == "rocm":
raise NotImplementedError(
"AWQ GEMM kernel can't be used on ROCm systems, please use `--quantize gptq` instead "
"to use Exllama/GPTQ kernels for AWQ inference."
)
try:
from text_generation_server.layers.awq.quantize import WQLinear
return WQLinear(
w_bit=self.bits,
group_size=self.groupsize,
qweight=self.qweight,
qzeros=self.qzeros,
scales=self.scales,
bias=bias,
)
except ImportError:
raise NotImplementedError(
"You do not seem to have awq installed, either install it (cd server && make install-awq), or try using GPTQ `---quantize gptq` a conversion AWQ->GPTQ will happen on the fly"
)
elif self.use_exllama:
try:
from text_generation_server.layers.gptq import ExllamaQuantLinear
except ImportError:
raise NotImplementedError(
"Exllama gptq kernels are not installed. Install them `cd server/exllama_kernels && python setup.py install && cd ../exllamav2_kernels && python setup.py install`"
)
return ExllamaQuantLinear(self, bias)
else:
return QuantLinear(
self.qweight,
self.qzeros,
self.scales,
self.g_idx,
bias,
self.bits,
self.groupsize,
)
class GPTQWeightsLoader(WeightsLoader):
"""
Loader for GPTQ- and AWQ-quantized weights.
"""
def __init__(
self,
*,
bits: int,
desc_act: bool,
groupsize: int,
quant_method: str,
quantize: str,
sym: bool,
):
self.bits = bits
self.desc_act = desc_act
self.groupsize = groupsize
self.quant_method = quant_method
self.quantize = quantize
self.sym = sym
def get_weights(self, weights: Weights, prefix: str):
self._get_gptq_params(weights)
use_exllama = True
if self.bits != 4:
use_exllama = False
if self.desc_act:
log_once(logger.warning, "Disabling exllama because desc_act=True")
use_exllama = False
try:
qweight = weights.get_tensor(f"{prefix}.qweight")
except RuntimeError:
raise RuntimeError(
"Cannot load `gptq` weight, make sure the model is already quantized, or quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`"
)
if self.quantize == "gptq" and self.quant_method == "gptq":
g_idx = weights.get_tensor(f"{prefix}.g_idx")
else:
g_idx = None
from text_generation_server.layers.gptq import (
HAS_EXLLAMA,
CAN_EXLLAMA,
GPTQWeight,
)
if use_exllama:
if not HAS_EXLLAMA:
if CAN_EXLLAMA:
log_once(
logger.warning,
"Exllama GPTQ cuda kernels (which are faster) could have been used, but are not currently installed, try using BUILD_EXTENSIONS=True",
)
use_exllama = False
else:
log_once(logger.info, f"Using exllama kernels v{HAS_EXLLAMA}")
qzeros = weights.get_tensor(f"{prefix}.qzeros")
scales = weights.get_tensor(f"{prefix}.scales")
if use_exllama and g_idx is not None:
g_idx = g_idx - g_idx[0]
if self.quantize == "gptq" and self.quant_method == "awq":
log_once(
logger.info, "Converting AWQ model to Exllama/GPTQ packing format."
)
from text_generation_server.layers.awq.conversion_utils import (
fast_awq_to_gptq,
)
qweight, qzeros = fast_awq_to_gptq(qweight, qzeros)
if use_exllama:
g_idx = None
else:
g_idx = (
torch.arange(
qweight.shape[0] * (32 // self.bits),
device=qweight.device,
)
// self.groupsize
).to(dtype=torch.int32)
return GPTQWeight(
qweight=qweight,
qzeros=qzeros,
scales=scales,
g_idx=g_idx,
bits=self.bits,
groupsize=self.groupsize,
use_exllama=use_exllama,
)
def get_weights_col_packed(
self,
weights: Weights,
prefix: str,
block_sizes: Union[int, List[int]],
):
try:
qweight = weights.get_packed_sharded(
f"{prefix}.qweight", dim=1, block_sizes=block_sizes
)
except RuntimeError:
raise RuntimeError(
f"Cannot load `{self.quantize}` weight, make sure the model is already quantized."
)
scales = weights.get_packed_sharded(
f"{prefix}.scales", dim=1, block_sizes=block_sizes
)
scales = scales.to(dtype=weights.dtype)
self._get_gptq_params(weights)
qzeros = weights.get_packed_sharded(
f"{prefix}.qzeros", dim=1, block_sizes=block_sizes
)
if self.quantize == "gptq" and self.quant_method == "gptq":
g_idx = weights.get_tensor(f"{prefix}.g_idx")
elif self.quantize == "gptq" and self.quant_method == "awq":
log_once(
logger.info, "Converting AWQ model to Exllama/GPTQ packing format."
)
from text_generation_server.layers.awq.conversion_utils import (
fast_awq_to_gptq,
)
qweight, qzeros = fast_awq_to_gptq(qweight, qzeros)
g_idx = (
torch.arange(
qweight.shape[0] * (32 // self.bits),
device=qweight.device,
)
// self.groupsize
).to(dtype=torch.int32)
else:
g_idx = None
return GPTQWeight(
qweight=qweight,
qzeros=qzeros,
scales=scales,
g_idx=g_idx,
bits=self.bits,
groupsize=self.groupsize,
use_awq_kernel=self.quantize == "awq",
use_exllama=False,
)
def get_multi_weights_col(self, weights: Weights, prefixes: List[str], dim: int):
try:
qweight = torch.cat(
[weights.get_sharded(f"{p}.qweight", dim=1) for p in prefixes], dim=1
)
except RuntimeError:
raise RuntimeError(
f"Cannot load `{self.quantize}` weight, make sure the model is already quantized"
)
scales = torch.cat(
[weights.get_sharded(f"{p}.scales", dim=1) for p in prefixes], dim=1
)
self._get_gptq_params(weights)
qzeros = torch.cat(
[weights.get_sharded(f"{p}.qzeros", dim=1) for p in prefixes], dim=1
)
from text_generation_server.layers.gptq import HAS_EXLLAMA
use_exllama = (
self.bits == 4
and HAS_EXLLAMA
and self.quantize == "gptq"
and not self.desc_act
)
if self.quantize == "gptq" and self.quant_method == "gptq":
w = [weights.get_tensor(f"{p}.g_idx") for p in prefixes]
for w2 in w[1:]:
torch.testing.assert_close(w2, w[0])
g_idx = w[0]
elif self.quantize == "gptq" and self.quant_method == "awq":
log_once(
logger.info, "Converting AWQ model to Exllama/GPTQ packing format."
)
from text_generation_server.layers.awq.conversion_utils import (
fast_awq_to_gptq,
)
qweight, qzeros = fast_awq_to_gptq(qweight, qzeros)
if use_exllama:
g_idx = None
else:
g_idx = (
torch.arange(
qweight.shape[0] * (32 // self.bits),
device=qweight.device,
)
// self.groupsize
).to(dtype=torch.int32)
else:
g_idx = None
return GPTQWeight(
qweight=qweight,
qzeros=qzeros,
scales=scales,
g_idx=g_idx,
bits=self.bits,
groupsize=self.groupsize,
use_awq_kernel=self.quantize == "awq",
use_exllama=use_exllama,
)
def get_weights_row(self, weights: Weights, prefix: str):
self._get_gptq_params(weights)
use_exllama = True
desc_act = self.desc_act
if self.bits != 4:
use_exllama = False
if self.desc_act:
log_once(logger.warning, "Disabling exllama because desc_act=True")
use_exllama = False
try:
qweight = weights.get_sharded(f"{prefix}.qweight", dim=0)
except RuntimeError:
raise RuntimeError(
"Cannot load `gptq` weight, make sure the model is already quantized, or quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`"
)
if self.quantize == "gptq" and self.quant_method == "gptq":
g_idx = weights.get_sharded(f"{prefix}.g_idx", dim=0)
else:
g_idx = None
if weights.process_group.size() > 1:
if g_idx is not None:
if (
not torch.equal(
# Remove g_idx[0] to adapt the check with TP>1.
(g_idx - g_idx[0]).cpu(),
torch.tensor(
[i // self.groupsize for i in range(g_idx.shape[0])],
dtype=torch.int32,
),
)
and not (g_idx == 0).all()
):
# Exllama implementation does not support row tensor parallelism with act-order, as
# it would require to reorder input activations that are split unto several GPUs
use_exllama = False
desc_act = True
from text_generation_server.layers.gptq import (
CAN_EXLLAMA,
HAS_EXLLAMA,
GPTQWeight,
)
if use_exllama:
if not HAS_EXLLAMA:
if CAN_EXLLAMA:
log_once(
logger.warning,
"Exllama GPTQ cuda kernels (which are faster) could have been used, but are not currently installed, try using BUILD_EXTENSIONS=True",
)
use_exllama = False
else:
log_once(logger.info, f"Using exllama kernels v{HAS_EXLLAMA}")
if not desc_act and self.groupsize != -1:
qzeros = weights.get_sharded(f"{prefix}.qzeros", dim=0)
scales = weights.get_sharded(f"{prefix}.scales", dim=0)
if g_idx is not None:
# qzeros, scales sharded, and g_idx must be adjusted accordingly
g_idx = g_idx - g_idx[0]
else:
qzeros = weights.get_tensor(f"{prefix}.qzeros")
scales = weights.get_tensor(f"{prefix}.scales")
if self.quantize == "gptq" and self.quant_method == "awq":
log_once(
logger.info, "Converting AWQ model to Exllama/GPTQ packing format."
)
from text_generation_server.layers.awq.conversion_utils import (
fast_awq_to_gptq,
)
qweight, qzeros = fast_awq_to_gptq(qweight, qzeros)
if use_exllama:
g_idx = None
else:
g_idx = (
torch.arange(
qweight.shape[0] * (32 // self.bits),
device=qweight.device,
)
// self.groupsize
).to(dtype=torch.int32)
return GPTQWeight(
qweight=qweight,
qzeros=qzeros,
scales=scales,
g_idx=g_idx,
bits=self.bits,
groupsize=self.groupsize,
use_awq_kernel=self.quantize == "awq",
use_exllama=use_exllama,
)
def _get_gptq_params(self, weights: Weights):
if weights.has_tensor("gptq_bits") and weights.has_tensor("gptq_groupsize"):
self.bits = weights.get_tensor("gptq_bits").item()
self.groupsize = weights.get_tensor("gptq_groupsize").item()
self.desc_act = False
# `server quantize` used asymmetric quantization unconditionally
# before the `gptq_sym` setting tensor was added.
self.sym = (
weights.get_tensor("gptq_sym").item()
if weights.has_tensor("gptq_sym")
else False
)
self.quant_method = "gptq"
# Needs to be at the end because circular import.
try:
major, _minor = torch.cuda.get_device_capability()
except Exception:
major = 1
HAS_EXLLAMA = False
CAN_EXLLAMA = major >= 8 or SYSTEM == "rocm"
V2 = os.getenv("EXLLAMA_VERSION", "2") == "2"
if os.getenv("DISABLE_EXLLAMA") == "True":
HAS_EXLLAMA = False
elif CAN_EXLLAMA:
try:
if V2:
from text_generation_server.layers.gptq.exllamav2 import (
QuantLinear as ExllamaQuantLinear, # noqa: F401
create_exllama_buffers, # noqa: F401
set_device, # noqa: F401
)
HAS_EXLLAMA = "2"
else:
from text_generation_server.layers.gptq.exllama import (
Ex4bitLinear as ExllamaQuantLinear, # noqa: F401
create_exllama_buffers, # noqa: F401
set_device, # noqa: F401
)
HAS_EXLLAMA = "1"
except ImportError:
pass
| text-generation-inference/server/text_generation_server/layers/gptq/__init__.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/gptq/__init__.py",
"repo_id": "text-generation-inference",
"token_count": 8349
} |
import torch
from torch import nn
from typing import Tuple, Optional
from text_generation_server.utils.speculate import get_speculate
from text_generation_server.layers.linear import FastLinear
from text_generation_server.layers.tensor_parallel import (
TensorParallelHead,
TensorParallelColumnLinear,
)
class ResBlock(torch.nn.Module):
def __init__(self, config, prefix, weights):
super().__init__()
self.linear = FastLinear.load(
config, prefix=f"{prefix}.linear", weights=weights, bias=True
)
self.act = torch.nn.SiLU()
def forward(self, x):
return x + self.act(self.linear(x))
class MedusaModel(torch.nn.Module):
def __init__(self, config, medusa_config, weights):
super().__init__()
self.heads = torch.nn.ModuleList(
[
MedusaHead(config, medusa_config, prefix=f"{i}", weights=weights)
for i in range(get_speculate())
]
)
def forward(self, x):
if not self.heads:
return None
speculative_logits = torch.stack([head(x) for head in self.heads], dim=1)
return speculative_logits
class MedusaHead(torch.nn.Module):
def __init__(self, config, medusa_config, prefix, weights):
super().__init__()
self.blocks = torch.nn.ModuleList(
[
ResBlock(config, prefix=f"{prefix}.{i}", weights=weights)
for i in range(medusa_config["medusa_num_layers"])
]
)
n = len(self.blocks)
self.out = FastLinear.load(
config, prefix=f"{prefix}.{n}", weights=weights, bias=False
)
def forward(self, x):
for block in self.blocks:
x = block(x)
x = self.out(x)
return x
class MedusaHeadV1(nn.Module):
def __init__(self, lm_head, medusa):
super().__init__()
self.lm_head = lm_head
self.medusa = medusa
@staticmethod
def load(config, prefix: str, weights):
from pathlib import Path
from safetensors import safe_open
import json
speculator = config.speculator
path = speculator["path"]
medusa_config = str(Path(path) / "config.json")
for fname in speculator["model_paths"]:
filename = str(Path(path) / fname)
with open(medusa_config, "r") as f:
medusa_config = json.load(f)
routing = weights.routing
with safe_open(filename, framework="pytorch") as f:
for k in f.keys():
if k in routing and routing[k] != filename:
raise RuntimeError(
f"Key {k} was found in multiple files: {filename} and {routing[k]}"
)
routing[k] = filename
medusa = MedusaModel(config, medusa_config, weights)
lm_head = TensorParallelHead.load(config, prefix, weights)
return MedusaHeadV1(lm_head, medusa)
def forward(
self, input: torch.Tensor
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
logits = self.lm_head(input)
# If we have too many tokens, we skip speculative logits
if input.shape[0] > 128:
return logits, None
speculative_logits = self.medusa(input)
return logits, speculative_logits
class MedusaHeadV2(nn.Module):
def __init__(self, config, prefix, weights):
super().__init__()
from pathlib import Path
from safetensors import safe_open
import json
speculator_path = config.speculator["path"]
medusa_config = str(Path(speculator_path) / "config.json")
filename = str(Path(speculator_path) / "medusa_lm_head.safetensors")
with open(medusa_config, "r") as f:
medusa_config = json.load(f)
routing = weights.routing
with safe_open(filename, framework="pytorch") as f:
for k in f.keys():
if k in routing and routing[k] != filename:
raise RuntimeError(
f"Key {k} was found in multiple files: {filename} and {routing[k]}"
)
routing[k] = filename
self.n_medusa_heads = get_speculate()
assert medusa_config["medusa_num_layers"] == 1
self.linear = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{i}.0.linear" for i in range(self.n_medusa_heads)],
dim=0,
weights=weights,
bias=True,
)
self.process_group = weights.process_group
self.world_size = self.process_group.size()
self.rank = self.process_group.rank()
self.act = torch.nn.SiLU()
self.lm_head = TensorParallelHead.load(config, prefix, weights)
def forward(self, x):
# If we have too many tokens, we skip speculative logits
if x.shape[0] > 128:
logits = self.lm_head(x)
return logits, None
size = x.shape[-1]
block_size = (size + self.world_size - 1) // self.world_size
start = self.rank * block_size
stop = (self.rank + 1) * block_size
x_block = x[:, start:stop]
# Compute all medusa heads at the same time, then reshape and move the n_medusa_heads dim to dim 1
medusa_res = self.act(self.linear(x)).reshape(
*x_block.shape[:-1], self.n_medusa_heads, x_block.shape[-1]
)
# Apply all residual medusa heads
output = x[:, start:stop].unsqueeze(-2) + medusa_res
# Gather medusa heads
world_output = [
torch.empty_like(output) for _ in range(self.process_group.size())
]
torch.distributed.all_gather(world_output, output, group=self.process_group)
world_output = torch.cat(world_output, dim=-1)
# Stack x and medusa residual x
stacked_x = torch.cat([x.unsqueeze(-2), world_output], dim=-2)
# Compute lm head on x + medusa residual x
logits = self.lm_head(stacked_x)
# Finally, split logits from speculative logits
logits, speculative_logits = torch.split(
logits, [1, self.n_medusa_heads], dim=-2
)
# Squeeze added dimension
logits = logits.squeeze(-2)
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/layers/medusa.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/medusa.py",
"repo_id": "text-generation-inference",
"token_count": 2975
} |
# coding=utf-8
# Copyright 2024 Cohere team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from typing import Optional, List, Tuple
from text_generation_server.layers.attention import (
paged_attention,
attention,
Seqlen,
)
from text_generation_server.layers.attention.kv_cache import get_kv_scales
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
get_linear,
)
from text_generation_server.layers.layernorm import (
FastLayerNorm,
)
from text_generation_server.layers.rotary import (
PositionRotaryEmbedding,
)
from text_generation_server.utils.weights import UnquantizedWeight
if SYSTEM == "cuda":
import dropout_layer_norm
else:
dropout_layer_norm = None
class CohereRotary(PositionRotaryEmbedding):
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
):
# Such controlflows may add some overhead.
if SYSTEM == "cuda":
import rotary_emb
q1 = query[..., ::2]
q2 = query[..., 1::2]
rotary_emb.apply_rotary(q1, q2, cos, sin, q1, q2, False)
k1 = key[..., ::2]
k2 = key[..., 1::2]
rotary_emb.apply_rotary(k1, k2, cos, sin, k1, k2, False)
elif SYSTEM == "rocm":
import vllm._custom_ops as ops
# NOTE: On RoCm systems, we use a ROPE implementatation adapted from VLLM which launches a single kernel for both query/key, contrary to flash-attn implementation used on NVIDIA systems.
# Compiling flash-attn rotary on RoCm, it appears hipcc is unable to unroll loops, resulting in an even slower inference compared to eager: https://github.com/pytorch/pytorch/issues/113773
head_size = query.shape[-1]
# Inplace operation, updating query and key.
ops.rotary_embedding(query, key, head_size, cos, sin, False)
elif SYSTEM == "ipex":
import intel_extension_for_pytorch as ipex
ipex.llm.functional.rotary_embedding(
query, key, sin, cos, query.size(-1), False
)
else:
raise ValueError(
"Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction."
)
class CohereLayerNorm(nn.Module):
def __init__(self, prefix, weights, eps):
super().__init__()
weight = weights.get_sharded(f"{prefix}.weight", dim=0)
self.weight = nn.Parameter(weight)
# Fake weights
self.ones = weight.new_ones(weight.shape[1])
self.eps = eps
def forward(self, hidden_states):
if hidden_states.shape[-1] > 8192 or SYSTEM != "cuda":
hidden_states = hidden_states.reshape(
-1, self.weight.shape[0], self.weight.shape[1]
)
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
mean = hidden_states.mean(-1, keepdim=True)
hidden_states_minus_mean = hidden_states - mean
variance = hidden_states_minus_mean.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states_minus_mean * torch.rsqrt(variance + self.eps)
hidden_states = self.weight.to(torch.float32) * hidden_states
hidden_states = hidden_states.view(-1, self.weight.shape[1])
return hidden_states.to(input_dtype)
(
hidden_states,
*rest,
) = dropout_layer_norm.dropout_add_ln_fwd(
hidden_states,
None,
self.ones,
None,
None,
None,
None,
None,
0.0,
self.eps,
1.0,
0,
None,
False,
False,
)
# Required to apply one weight matrix per head
hidden_states = hidden_states.view(
-1, self.weight.shape[0], self.weight.shape[1]
)
hidden_states = self.weight * hidden_states
hidden_states = hidden_states.view(-1, self.weight.shape[1])
return hidden_states
def load_attention(config, prefix, weights):
if config.num_attention_heads != config.num_key_value_heads:
return _load_gqa(config, prefix, weights)
else:
return TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=config.attention_bias,
)
def _load_gqa(config, prefix: str, weights):
assert config.hidden_size % config.num_attention_heads == 0
assert config.num_attention_heads % weights.process_group.size() == 0
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
)
if isinstance(weight, UnquantizedWeight):
weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.hidden_size // config.num_attention_heads
num_heads = config.num_attention_heads // weights.process_group.size()
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
assert list(weight.weight.shape) == [
(num_heads + 2 * num_key_value_heads) * head_size,
config.hidden_size,
], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
if config.attention_bias:
w = [
weights.get_sharded(f"{p}.bias", dim=0)
for p in [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"]
]
bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device)
else:
bias = None
return TensorParallelColumnLinear(get_linear(weight, bias=bias))
class FlashCohereAttention(torch.nn.Module):
def __init__(
self,
prefix: str,
config,
weights,
):
super().__init__()
self.num_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
self.rotary_emb = CohereRotary.static(
config=config,
dim=self.head_size,
base=config.rope_theta,
device=weights.device,
)
self.softmax_scale = self.head_size**-0.5
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
self.use_qk_norm = config.use_qk_norm
if self.use_qk_norm:
self.q_norm = CohereLayerNorm(
prefix=f"{prefix}.q_norm",
weights=weights,
eps=config.layer_norm_eps,
)
self.k_norm = CohereLayerNorm(
prefix=f"{prefix}.k_norm",
weights=weights,
eps=config.layer_norm_eps,
)
else:
self.q_norm = None
self.k_norm = None
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=config.attention_bias,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
qkv = self.query_key_value(hidden_states)
query, key, value = qkv.split(
[
self.head_size * self.num_heads,
self.head_size * self.num_key_value_heads,
self.head_size * self.num_key_value_heads,
],
dim=1,
)
if self.use_qk_norm:
query = query.reshape(-1, self.head_size)
key = key.reshape(-1, self.head_size)
query = self.q_norm(query.contiguous())
key = self.k_norm(key.contiguous())
query = query.view(-1, self.num_heads, self.head_size)
key = key.view(-1, self.num_key_value_heads, self.head_size)
value = value.view(-1, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, key, cos, sin)
kv_cache.store(
key=key,
value=value,
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query=query,
key=key,
value=value,
kv_cache=kv_cache,
kv_scales=self.kv_scales,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=self.kv_scales,
)
return self.o_proj(
attn_output.view(-1, self.num_heads * self.head_size), reduce=False
)
class CohereMLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
act = config.hidden_act
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
# Fuse gate and up proj
self.gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=False,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=False,
)
self.intermediate_size = (
config.intermediate_size // weights.process_group.size()
)
def forward(self, hidden_states):
gate_up_states = self.gate_up_proj(hidden_states)
gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
return self.down_proj(
self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], reduce=False
)
class FlashCohereLayer(nn.Module):
def __init__(self, prefix: str, layer_id, config, weights):
super().__init__()
prefix = f"{prefix}.layers.{layer_id}"
self.self_attn = FlashCohereAttention(
prefix=f"{prefix}.self_attn", config=config, weights=weights
)
self.mlp = CohereMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
self.input_layernorm = FastLayerNorm.load_no_bias(
prefix=f"{prefix}.input_layernorm",
weights=weights,
eps=config.layer_norm_eps,
)
self.process_group = weights.process_group
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
mlp_output = self.mlp(normed_hidden_states)
output = attn_output + mlp_output
if self.process_group.size() > 1:
torch.distributed.all_reduce(output, group=self.process_group)
return output, res
class FlashCohereModel(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
process_group = weights.process_group
self.tp_rank = process_group.rank()
self.tp_world_size = process_group.size()
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.embed_tokens", weights=weights
)
self.layers = nn.ModuleList(
[
FlashCohereLayer(
prefix,
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = FastLayerNorm.load_no_bias(
prefix=f"{prefix}.norm", weights=weights, eps=config.layer_norm_eps
)
self.gradient_checkpointing = False
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: torch.Tensor,
max_s: int,
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashCohereForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
if not prefix:
prefix = "model"
else:
prefix = f"{prefix}.model"
self.model = FlashCohereModel(prefix, config, weights)
try:
self.lm_head = SpeculativeHead.load(
config,
prefix="lm_head",
weights=weights,
)
except RuntimeError:
self.lm_head = SpeculativeHead.load(
config,
prefix=f"{prefix}.embed_tokens",
weights=weights,
)
self.logit_scale = config.logit_scale
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.lm_head(hidden_states)
logits *= self.logit_scale
if speculative_logits is not None:
speculative_logits *= self.logit_scale
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 8924
} |
from typing import List, Optional, Tuple
import torch
import torch.distributed
from torch import nn
from transformers.configuration_utils import PretrainedConfig
from transformers.modeling_utils import PreTrainedModel
from text_generation_server.layers import (
SpeculativeHead,
TensorParallelColumnLinear,
TensorParallelEmbedding,
TensorParallelRowLinear,
get_linear,
)
from text_generation_server.layers.attention.kv_cache import get_kv_scales
from text_generation_server.layers.layernorm import FastLayerNorm
from text_generation_server.layers.rotary import PositionRotaryEmbedding
from text_generation_server.layers.attention import (
attention,
paged_attention,
Seqlen,
)
def load_row(config, prefix: str, weights, bias: bool):
weight = weights.get_weights_row(prefix)
if bias and weights.process_group.rank() == 0:
# Rank is only on the first rank process
bias = weights.get_tensor(f"{prefix}.bias")
else:
bias = None
linear = get_linear(weight, bias)
if config.parallel_attn:
return linear
else:
return TensorParallelRowLinear(linear, process_group=weights.process_group)
class RWConfig(PretrainedConfig):
attribute_map = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
"num_key_value_heads": "n_head_kv",
}
def __init__(
self,
model_type="RefinedWeb",
vocab_size=250880,
hidden_size=64,
num_hidden_layers=None,
num_attention_heads=None,
num_ln_in_prallel_attention=None,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
use_cache=True,
bos_token_id=1,
eos_token_id=2,
hidden_dropout=0.0,
attention_dropout=0.0,
num_kv_heads=None,
multi_query=False,
alibi=False,
new_decoder_architecture=None,
bias=False,
parallel_attn=False,
rope_theta=10_000.0,
**kwargs,
):
if alibi:
raise NotImplementedError(
"alibi is not supported by this version of the model"
)
self.model_type = model_type
self.alibi = False
self.rotary = True
self.rope_theta = rope_theta
self.max_position_embeddings = 2048
self.vocab_size = vocab_size
# Backward compatibility with n_embed kwarg
n_embed = kwargs.pop("n_embed", None)
self.hidden_size = hidden_size if n_embed is None else n_embed
self.n_layer = (
num_hidden_layers
if num_hidden_layers is not None
else kwargs.pop("n_layer", 2)
)
self.n_head = (
num_attention_heads
if num_attention_heads is not None
else kwargs.pop("n_head", 8)
)
self.layer_norm_epsilon = layer_norm_epsilon
self.num_ln_in_parallel_attn = num_ln_in_prallel_attention
self.initializer_range = initializer_range
self.use_cache = use_cache
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.bias = bias
self.parallel_attn = parallel_attn
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
if num_kv_heads is not None:
self.n_head_kv = num_kv_heads
else:
old_n_head_kv = kwargs.pop("n_head_kv", None)
if old_n_head_kv is not None:
self.n_head_kv = old_n_head_kv
else:
self.n_head_kv = 1 if multi_query else self.n_head
if new_decoder_architecture is not None:
self.new_decoder_architecture = new_decoder_architecture
elif model_type == "RefinedWeb":
self.new_decoder_architecture = True
else:
self.new_decoder_architecture = False
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
class FlashRWAttention(torch.nn.Module):
def __init__(
self,
config,
prefix: str,
weights,
):
super().__init__()
self.num_heads = config.n_head
self.num_heads_kv = config.n_head_kv
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
self.rope_theta = config.rope_theta
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,
base=self.rope_theta,
device=weights.device,
)
self.softmax_scale = self.head_size ** (-0.5)
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.query_key_value = TensorParallelColumnLinear.load(
config,
prefix=f"{prefix}.query_key_value",
weights=weights,
bias=config.bias,
)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
self.dense = load_row(
config, prefix=f"{prefix}.dense", weights=weights, bias=config.bias
)
if self.num_heads_kv == 1:
self.kv_head_mapping = torch.zeros(
self.num_heads, dtype=torch.int32, device=weights.device
)
else:
self.kv_head_mapping = torch.arange(
0, self.num_heads, dtype=torch.int32, device=weights.device
)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
qkv = self.query_key_value(hidden_states)
# Split query from key_value
query, kv = qkv.split(
[self.head_size * self.num_heads, 2 * self.head_size * self.num_heads_kv],
dim=1,
)
# Prepare query and key_value for indexing
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_heads_kv, self.head_size)
# Inplace rotary
self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)
kv_cache.store(
key=kv[:, 0],
value=kv[:, 1],
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query=query,
key=kv[:, 0],
value=kv[:, 1],
kv_cache=kv_cache,
kv_scales=self.kv_scales,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=self.kv_scales,
)
return self.dense(attn_output.view(-1, self.num_heads * self.head_size))
class FlashRWLargeAttention(torch.nn.Module):
def __init__(
self,
config,
prefix: str,
weights,
):
super().__init__()
hidden_size = config.hidden_size
num_heads = config.n_head
# num_heads_kv = config.n_head_kv
num_groups = config.n_head_kv
self.hidden_size = hidden_size
self.head_size = hidden_size // num_heads
self.num_groups = num_groups
self.rope_theta = config.rope_theta
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,
base=self.rope_theta,
device=weights.device,
)
self.softmax_scale = self.head_size ** (-0.5)
# self.num_groups = num_heads // (num_heads_kv * 2)
self.num_heads = num_heads // self.num_groups
# self.num_heads_kv = num_heads_kv // self.num_groups
process_group = weights.process_group
if process_group.size() > self.num_groups:
raise NotImplementedError(
"Tensor Parallelism is not implemented for world_size > n groups"
)
if self.num_groups % process_group.size() != 0:
raise NotImplementedError(
f"Tensor Parallelism is not implemented for {self.num_groups} not divisible by {process_group.size()}"
)
self.num_groups = self.num_groups // process_group.size()
self.query_key_value = TensorParallelColumnLinear.load(
config,
prefix=f"{prefix}.query_key_value",
weights=weights,
bias=config.bias,
)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
self.dense = load_row(
config, prefix=f"{prefix}.dense", weights=weights, bias=config.bias
)
self.kv_head_mapping = torch.arange(
0, self.num_groups, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_heads)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
qkv = self.query_key_value(hidden_states)
qkv = qkv.view(-1, self.num_groups, self.num_heads + 2, self.head_size)
# Split on group dimension
query, kv = qkv.split(
[self.num_heads, 2],
dim=2,
)
# Merge groups and heads
query = query.reshape(-1, self.num_groups * self.num_heads, self.head_size)
# Inplace rotary
self.rotary_emb(query, torch.select(kv, dim=2, index=0), cos, sin)
kv_cache.store(
key=kv[:, :, 0].contiguous(),
value=kv[:, :, 1].contiguous(),
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query=query,
key=kv[:, :, 0],
value=kv[:, :, 1],
kv_cache=kv_cache,
kv_scales=self.kv_scales,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=self.kv_scales,
)
return self.dense(
attn_output.view(-1, self.num_groups * self.num_heads * self.head_size)
)
class FlashMLP(nn.Module):
def __init__(self, config, prefix: str, weights):
super().__init__()
self.act = torch.nn.functional.gelu
self.dense_h_to_4h = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.dense_h_to_4h", weights=weights, bias=config.bias
)
self.dense_4h_to_h = load_row(
config, prefix=f"{prefix}.dense_4h_to_h", weights=weights, bias=config.bias
)
def forward(self, hidden_states):
hidden_states = self.dense_h_to_4h(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dense_4h_to_h(hidden_states)
return hidden_states
class FlashRWLayer(nn.Module):
def __init__(
self,
layer_id,
prefix: str,
config,
weights,
):
super().__init__()
parallel_attn = config.parallel_attn
self.parallel_attn = parallel_attn
prefix = f"{prefix}.h.{layer_id}"
# NOTE: Falcon 180B uses the ln_attn prefix
ln_prefix = "input_layernorm"
if config.num_hidden_layers == 80:
ln_prefix = "ln_attn"
self.input_layernorm = FastLayerNorm.load(
prefix=f"{prefix}.{ln_prefix}",
weights=weights,
eps=config.layer_norm_epsilon,
)
self.self_attention = FlashRWAttention(
config,
prefix=f"{prefix}.self_attention",
weights=weights,
)
self.post_attention_layernorm = (
FastLayerNorm.load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.layer_norm_epsilon,
)
if not parallel_attn
else None
)
self.mlp = FlashMLP(
config,
prefix=f"{prefix}.mlp",
weights=weights,
)
self.process_group = weights.process_group
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
if self.parallel_attn:
ln_hidden_states, residual = self.input_layernorm(hidden_states, residual)
attn_output = self.self_attention(
ln_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
mlp_output = self.mlp(ln_hidden_states)
intermediate = mlp_output + attn_output
if self.process_group.size() > 1:
torch.distributed.all_reduce(intermediate, group=self.process_group)
return intermediate, residual
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
hidden_states = self.self_attention(
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
if self.post_attention_layernorm is not None:
hidden_states, residual = self.post_attention_layernorm(
hidden_states, residual
)
mlp_output = self.mlp(hidden_states)
return mlp_output, residual
class FlashRWLayerNorm(nn.Module):
def __init__(self, config, prefix: str, weights):
super().__init__()
# Falcon2 includes the number of layer norms in the config
# in the case no number of layer norms is provided, we default to 1
self.num_ln = getattr(config, "num_ln_in_parallel_attn", 1)
# Falcon 180B uses the ln_attn prefix and has 2 layer norms
if config.num_hidden_layers == 80:
self.num_ln = 2
if self.num_ln == 1:
self.input_ln = FastLayerNorm.load(
prefix=f"{prefix}.input_layernorm",
weights=weights,
eps=config.layer_norm_epsilon,
)
elif self.num_ln == 2:
self.ln_attn = FastLayerNorm.load(
prefix=f"{prefix}.ln_attn",
weights=weights,
eps=config.layer_norm_epsilon,
)
self.ln_mlp = FastLayerNorm.load(
prefix=f"{prefix}.ln_mlp",
weights=weights,
eps=config.layer_norm_epsilon,
)
else:
raise ValueError("Number of layer norms can either be 1 or 2.")
def forward(
self,
hidden_states,
residual,
):
if self.num_ln == 1:
ln_hidden_states, residual = self.input_ln(hidden_states, residual)
return ln_hidden_states, ln_hidden_states, residual
elif self.num_ln == 2:
ln_attn, residual = self.ln_attn(hidden_states, residual)
ln_mlp, _ = self.ln_mlp(residual)
return ln_attn, ln_mlp, residual
class FlashRWLargeLayer(nn.Module):
def __init__(self, layer_id, prefix: str, config, weights):
super().__init__()
prefix = f"{prefix}.h.{layer_id}"
self.ln_layer = FlashRWLayerNorm(config, prefix, weights)
self.self_attention = FlashRWLargeAttention(
config,
prefix=f"{prefix}.self_attention",
weights=weights,
)
assert config.parallel_attn, "This version doesn't support non parallel_attn"
self.mlp = FlashMLP(config, prefix=f"{prefix}.mlp", weights=weights)
self.process_group = weights.process_group
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
# Layer norm.
ln_attn, ln_mlp, residual = self.ln_layer(hidden_states, residual)
# Self attention.
attn_output = self.self_attention(
ln_attn,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
# MLP.
mlp_output = self.mlp(ln_mlp)
intermediate = attn_output + mlp_output
if self.process_group.size() > 1:
torch.distributed.all_reduce(intermediate, group=self.process_group)
return intermediate, residual
class FlashRWPreTrainedModel(PreTrainedModel):
config_class = RWConfig
class FlashRWModel(FlashRWPreTrainedModel):
def __init__(self, prefix: str, config, weights):
super().__init__(config)
self.config = config
self.word_embeddings = TensorParallelEmbedding(
prefix=f"{prefix}.word_embeddings", weights=weights
)
if config.new_decoder_architecture:
self.h = nn.ModuleList(
[
FlashRWLargeLayer(layer_id, prefix, config, weights)
for layer_id in range(config.num_hidden_layers)
]
)
self.cache_size = self.h[0].self_attention.num_groups
else:
self.h = nn.ModuleList(
[
FlashRWLayer(layer_id, prefix, config, weights)
for layer_id in range(config.num_hidden_layers)
]
)
self.cache_size = self.h[0].self_attention.num_heads_kv
self.ln_f = FastLayerNorm.load(
prefix=f"{prefix}.ln_f",
weights=weights,
eps=config.layer_norm_epsilon,
)
self.head_size = self.h[0].self_attention.head_size
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
) -> torch.Tensor:
hidden_states = self.word_embeddings(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.h[0].self_attention.rotary_emb.get_cos_sin(
position_ids, max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.h):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
)
hidden_states, _ = self.ln_f(hidden_states, residual)
return hidden_states
class FlashRWForCausalLM(FlashRWPreTrainedModel):
def __init__(self, prefix: str, config, weights):
super().__init__(config)
if not prefix:
prefix = "transformer"
else:
prefix = f"{prefix}.transformer"
self.transformer = FlashRWModel(prefix, config, weights)
self.lm_head = SpeculativeHead.load(config, prefix="lm_head", weights=weights)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> torch.Tensor:
hidden_states = self.transformer(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits = self.lm_head(hidden_states)
return logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 11278
} |
# coding=utf-8
# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OPT model."""
import random
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from transformers import OPTConfig
from text_generation_server.layers import (
FastLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
TensorParallelRowLinear,
SpeculativeHead,
)
EPS = 1e-5
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size,
dtype: torch.dtype,
device: torch.device,
past_key_values_length: int = 0,
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full(
(tgt_len, tgt_len),
torch.tensor(torch.finfo(dtype).min, device=device),
device=device,
)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat(
[
torch.zeros(
tgt_len, past_key_values_length, dtype=dtype, device=device
),
mask,
],
dim=-1,
)
return mask[None, None, :, :].expand(
bsz, 1, tgt_len, tgt_len + past_key_values_length
)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(
inverted_mask.to(torch.bool), torch.finfo(dtype).min
)
class OPTLearnedPositionalEmbedding(nn.Module):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, prefix: str, weights):
super().__init__()
self.offset = 2
self.weight = nn.Parameter(
weights.get_tensor(
f"{prefix if prefix else ''}decoder.embed_positions.weight"
)
)
def forward(
self, attention_mask: torch.LongTensor, past_key_values_length: int = 0
):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
attention_mask = attention_mask.long()
# create positions depending on attention_mask
positions = (
torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
).long() - 1
# cut positions if `past_key_values_length` is > 0
positions = positions[:, past_key_values_length:]
return torch.nn.functional.embedding(positions + self.offset, self.weight)
class OPTAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
config,
prefix,
weights,
is_decoder: bool = False,
bias: bool = True,
process_group=None,
):
super().__init__()
hidden_size = config.hidden_size
num_heads = config.num_attention_heads
self.hidden_size = hidden_size
self.num_heads = num_heads
self.dropout = config.dropout
self.head_dim = hidden_size // num_heads
if (self.head_dim * num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
process_group = weights.process_group
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // process_group.size()
self.hidden_size = self.hidden_size // process_group.size()
self.q_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.q_proj", weights=weights, bias=bias
)
self.k_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.k_proj", weights=weights, bias=bias
)
self.v_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.v_proj", weights=weights, bias=bias
)
self.out_proj = TensorParallelRowLinear.load(
config, prefix=f"{prefix}.out_proj", weights=weights, bias=bias
)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return (
tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
.contiguous()
)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = (
attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attention_mask
)
attn_weights = torch.max(
attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
if attn_weights.dtype == torch.float16:
attn_weights = nn.functional.softmax(
attn_weights, dim=-1, dtype=torch.float32
).to(torch.float16)
else:
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights_reshaped.view(
bsz * self.num_heads, tgt_len, src_len
)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `hidden_size` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.hidden_size)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class OPTDecoderLayer(nn.Module):
def __init__(self, layer_id: int, prefix: str, config: OPTConfig, weights):
super().__init__()
self.process_group = weights.process_group
self.hidden_size = config.hidden_size
self.self_attn = OPTAttention(
config,
prefix=f"{prefix}.self_attn",
weights=weights,
is_decoder=True,
bias=config.enable_bias,
)
self.do_layer_norm_before = config.do_layer_norm_before
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.self_attn_layer_norm = nn.LayerNorm.load(
prefix=f"{prefix}.self_attn_layer_norm", weights=weights, eps=EPS
)
self.fc1 = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.fc1", weights=weights, bias=config.enable_bias
)
self.fc2 = TensorParallelRowLinear.load(
config, prefix=f"{prefix}.fc2", weights=weights, bias=config.enable_bias
)
self.final_layer_norm = nn.LayerNorm.load(
prefix=f"{prefix}.final_layer_norm", weights=weights, eps=EPS
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
) -> Tuple[
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, hidden_size)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
residual = hidden_states
# 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
if self.do_layer_norm_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(
hidden_states, p=self.dropout, training=self.training
)
hidden_states = residual + hidden_states
# 350m applies layer norm AFTER attention
if not self.do_layer_norm_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
# Fully Connected
hidden_states_shape = hidden_states.shape
hidden_states = hidden_states.reshape(-1, hidden_states.size(-1))
residual = hidden_states
# 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
if self.do_layer_norm_before:
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(
hidden_states, p=self.dropout, training=self.training
)
hidden_states = (residual + hidden_states).view(hidden_states_shape)
# 350m applies layer norm AFTER attention
if not self.do_layer_norm_before:
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
class OPTPreTrainedModel(PreTrainedModel):
config_class = OPTConfig
class OPTDecoder(OPTPreTrainedModel):
def __init__(self, prefix: str, config: OPTConfig, weights):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.vocab_size = config.vocab_size
prefix = prefix + "." if prefix else ""
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}decoder.embed_tokens", weights=weights
)
self.embed_positions = OPTLearnedPositionalEmbedding(prefix, weights)
if config.word_embed_proj_dim != config.hidden_size:
self.project_out = FastLinear.load(
config,
prefix=f"{prefix}decoder.project_out",
weights=weights,
bias=False,
)
else:
self.project_out = None
if config.word_embed_proj_dim != config.hidden_size:
self.project_in = FastLinear.load(
config,
prefix=f"{prefix}decoder.project_in",
weights=weights,
bias=False,
)
else:
self.project_in = None
# Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility
# with checkpoints that have been fine-tuned before transformers v4.20.1
# see https://github.com/facebookresearch/metaseq/pull/164
if config.do_layer_norm_before and not config._remove_final_layer_norm:
self.final_layer_norm = nn.LayerNorm.load(
prefix=f"{prefix}decoder.final_layer_norm", weights=weights, eps=EPS
)
else:
self.final_layer_norm = None
self.layers = nn.ModuleList(
[
OPTDecoderLayer(
layer_id,
prefix=f"{prefix}decoder.layers.{layer_id}",
config=config,
weights=weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
past_key_values_length=past_key_values_length,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
).to(inputs_embeds.device)
combined_attention_mask = (
expanded_attn_mask
if combined_attention_mask is None
else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError(
"You have to specify either decoder_input_ids or decoder_inputs_embeds"
)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
past_key_values_length = (
past_key_values[0][0].shape[2] if past_key_values is not None else 0
)
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values_length + seq_length
# embed positions
if attention_mask is None:
attention_mask = torch.ones(
batch_size, mask_seq_length, device=inputs_embeds.device
)
causal_attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
pos_embeds = self.embed_positions(attention_mask, past_key_values_length)
if self.project_in is not None:
inputs_embeds = self.project_in(inputs_embeds)
hidden_states = inputs_embeds + pos_embeds
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = () if use_cache else None
# check if head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask], ["head_mask"]):
if attn_mask is not None:
if attn_mask.size()[0] != (len(self.layers)):
raise ValueError(
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = (
past_key_values[idx] if past_key_values is not None else None
)
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if self.final_layer_norm is not None:
hidden_states = self.final_layer_norm(hidden_states)
if self.project_out is not None:
hidden_states = self.project_out(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
class OPTModel(OPTPreTrainedModel):
def __init__(self, prefix: str, config: OPTConfig, weights):
super().__init__(config)
self.decoder = OPTDecoder(prefix, config, weights)
# Initialize weights and apply final processing
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs
return BaseModelOutputWithPast(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
)
class OPTForCausalLM(OPTPreTrainedModel):
def __init__(self, prefix, config, weights):
super().__init__(config)
if not prefix and any(s.startswith("model") for s in weights.routing.keys()):
prefix = "model"
self.model = OPTModel(prefix, config, weights)
self.lm_head = SpeculativeHead.load(
config,
prefix=f"{prefix + '.' if prefix else ''}decoder.embed_tokens",
weights=weights,
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits, speculative_logits = self.lm_head(outputs.last_hidden_state)
loss = None
return (
CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
),
speculative_logits,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
**kwargs,
):
if past_key_values:
input_ids = input_ids[:, -1:]
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (
tuple(
past_state.index_select(0, beam_idx) for past_state in layer_past
),
)
return reordered_past
| text-generation-inference/server/text_generation_server/models/custom_modeling/opt_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/opt_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 15911
} |
import math
from typing import List, Optional
import torch
from opentelemetry import trace
from transformers import AutoTokenizer, AutoModelForCausalLM
import transformers.modeling_utils
from text_generation_server.models.flash_causal_lm import FlashCausalLM
from text_generation_server.utils import initialize_torch_distributed
from text_generation_server.layers.attention import paged_attention, attention, Seqlen
from text_generation_server.layers.attention.kv_cache import KVScales, KVCache
from text_generation_server.models.globals import ATTENTION
tracer = trace.get_tracer(__name__)
def tgi_flash_attention_forward(
module,
query_states: torch.Tensor,
key_states: torch.Tensor,
value_states: torch.Tensor,
attention_mask: Optional[torch.Tensor], # This is a positional arg in Transformers
kv_cache: List[KVCache],
kv_head_mapping: torch.Tensor,
slots: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
seqlen: Seqlen,
block_tables: torch.Tensor,
max_s: int,
kv_scales: KVScales,
softmax_scale: Optional[float] = None,
sliding_window: Optional[int] = None,
softcap: Optional[float] = None,
**kwargs, # This is needed to "absorb" other args passed by Transformers modeling
):
kv_cache = kv_cache[module.layer_idx]
query_states = query_states.transpose(1, 2).squeeze(dim=0)
key_states = key_states.transpose(1, 2).squeeze(dim=0)
value_states = value_states.transpose(1, 2).squeeze(dim=0)
# Take care of updating the cache in-place
kv_cache.store(key=key_states, value=value_states, slots=slots, kv_scales=kv_scales)
_, num_heads, head_dim = query_states.shape
softmax_scale = 1 / math.sqrt(head_dim) if softmax_scale is None else softmax_scale
sliding_window = -1 if sliding_window is None else sliding_window
if cu_seqlen_prefill is not None:
attn_output = attention(
query=query_states,
key=key_states,
value=value_states,
kv_cache=kv_cache,
kv_scales=kv_scales,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=softmax_scale,
window_size_left=sliding_window,
softcap=softcap,
)
else:
attn_output = paged_attention(
query_states,
kv_cache,
kv_head_mapping,
softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=kv_scales,
softcap=softcap,
)
attn_output = attn_output.view(-1, num_heads * head_dim)
return attn_output, None
transformers.modeling_utils.ALL_ATTENTION_FUNCTIONS["tgi"] = tgi_flash_attention_forward
class TransformersFlashCausalLM(FlashCausalLM):
def __init__(
self,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
speculator: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
default_dtype=torch.float16,
trust_remote_code: bool = False,
tokenizer_class=AutoTokenizer,
kv_cache_dtype: Optional[torch.dtype] = None,
):
self.quantize = quantize
self.process_group, rank, world_size = initialize_torch_distributed()
if speculator:
raise RuntimeError("Speculator decoding is not enabled for AutoModel")
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
dtype = default_dtype if dtype is None else dtype
elif hasattr(torch, "xpu") and torch.xpu.is_available():
device = torch.device("xpu")
dtype = default_dtype if dtype is None else dtype
else:
raise ValueError(
"Flash `Transformers` modeling backend is not available on cpu."
)
tokenizer = tokenizer_class.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
model = AutoModelForCausalLM.from_pretrained(
model_id,
revision=revision,
torch_dtype=dtype,
load_in_8bit=quantize == "bitsandbytes",
trust_remote_code=trust_remote_code,
attn_implementation="tgi",
device_map=device if world_size == 1 else None,
tp_plan="auto" if world_size > 1 else None,
)
if tokenizer.pad_token_id is None:
if model.config.pad_token_id is not None:
tokenizer.pad_token_id = model.config.pad_token_id
elif model.config.eos_token_id is not None and isinstance(
model.config.eos_token_id, int
):
tokenizer.pad_token_id = model.config.eos_token_id
elif tokenizer.eos_token_id is not None:
tokenizer.pad_token_id = tokenizer.eos_token_id
else:
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
self.num_layers = model.config.num_hidden_layers
self.num_heads = model.config.num_attention_heads // self.process_group.size()
self.num_kv_heads = model.config.num_key_value_heads
self.num_kv_heads = (
self.num_kv_heads // self.process_group.size()
if self.num_kv_heads > 1
else self.num_kv_heads
)
self.head_size = model.config.hidden_size // model.config.num_attention_heads
self.cuda_graphs = {}
self.kv_cache = []
self.kv_cache_dtype = dtype if kv_cache_dtype is None else kv_cache_dtype
if ATTENTION == "flashinfer":
from text_generation_server.layers.attention.flashinfer import (
create_prefill_state,
create_decode_state,
create_prefill_with_paged_kv_state,
)
self.prefill_state = create_prefill_state(device=device)
self.prefill_with_paged_kv_state = create_prefill_with_paged_kv_state(
device=device
)
self.decode_state = create_decode_state(
device=device,
num_heads=self.num_heads,
num_kv_heads=self.num_kv_heads,
)
self.num_groups = self.num_heads // self.num_kv_heads
# Those will never change and will be used in the forwards
self.kv_head_mapping = torch.arange(
0, self.num_kv_heads, dtype=torch.int32, device=device
).repeat_interleave(self.num_groups)
# This means no scale
self.kv_scales = KVScales(
torch.tensor(1.0, device=device),
torch.tensor(1.0, device=device),
)
torch.distributed.barrier(group=self.process_group)
# Skip FlashCausalLM init.
super(FlashCausalLM, self).__init__(
model_id=model_id,
model=model,
tokenizer=tokenizer,
requires_padding=False,
dtype=dtype,
device=device,
rank=rank,
world_size=world_size,
)
# Monkey patch of `self.model.forward` to match `FlashCausalLM`. It avoids duplicating a lot of code
# We first copy the original model.forward because we still need it in the monkey patch
self.model.original_forward = self.model.forward
self.model.forward = self._model_forward
@classmethod
def fallback(
cls,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
speculator: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
return cls(
model_id=model_id,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
def _model_forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[KVCache],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
lm_head_indices: Optional[torch.Tensor],
prefill_cache_indices=None, # not used, but passed to match original signature
adapter_data=None, # not supported, but passed to match original signature
):
hidden_states = self.model.model.forward(
input_ids=input_ids.unsqueeze(0), # expand dim to fit Transformers
position_ids=position_ids.unsqueeze(0), # expand dim to fit Transformers
past_key_values=None, # we use self.kv_cache instead of transformers cache object
use_cache=False, # we use self.kv_cache instead of transformers cache object
return_dict=True,
cu_seqlen_prefill=cu_seqlen_prefill,
kv_cache=kv_cache,
block_tables=block_tables,
slots=slots,
seqlen=seqlen,
max_s=max_s,
kv_head_mapping=self.kv_head_mapping,
kv_scales=self.kv_scales,
)[0].squeeze(dim=0)
# And compute logits from the lm_head, slicing correctly the indices
# NOTE: some logits post-processing (e.g. in gemma2) may be absent here with the split of the modules
# To update with full Transformers support asap
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits = self.model.lm_head(hidden_states)
# For Granite while next transformers version is released and we can use `lm_head_indices` natively
if hasattr(self.model.config, "logits_scaling"):
logits = logits / self.model.config.logits_scaling
# For Cohere for similar reasons
elif hasattr(self.model, "logit_scale"):
logits = logits * self.model.logit_scale
return logits, None
| text-generation-inference/server/text_generation_server/models/transformers_flash_causal_lm.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/transformers_flash_causal_lm.py",
"repo_id": "text-generation-inference",
"token_count": 4705
} |
# coding=utf-8
# From: https://github.com/huggingface/peft/pull/1364
# Copyright 2024-present the HuggingFace Inc. team.
# Modifications by Predibase, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Literal
import torch
def magnitude_based_pruning(tensor: torch.Tensor, density: float) -> torch.Tensor:
"""
Prune the smallest values of the task tensors and retain the top-k values based on the specified fraction
`density`.
Args:
tensor (`torch.Tensor`):The tensor to prune.
density (`float`):The fraction of values to preserve. Should be in [0,1].
"""
mask = torch.zeros_like(tensor).reshape(-1)
k = int(density * tensor.reshape(-1).shape[0])
top_k = torch.topk(tensor.abs().reshape(-1), k=k, largest=True)
mask[top_k[1]] = 1
return tensor * mask.reshape(tensor.shape)
def random_pruning(tensor: torch.Tensor, density: float, rescale: bool) -> torch.Tensor:
"""
Prune the smallest values of the task tensors and retain the top-k values based on the specified fraction
`density`.
Args:
tensor (`torch.Tensor`):The tensor to prune.
density (`float`):The fraction of values to preserve. Should be in [0,1].
rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor.
"""
mask = torch.bernoulli(torch.full_like(input=tensor, fill_value=density))
pruned_tensor = tensor * mask
if rescale:
torch.div(input=pruned_tensor, other=density)
return pruned_tensor
def prune(
tensor: torch.Tensor,
density: float,
method: Literal["magnitude", "random"],
rescale: bool = False,
) -> torch.Tensor:
"""
Prune the values of task tensors based on the `method`.
Args:
tensor (`torch.Tensor`):The tensor to prune.
density (`float`):The fraction of values to preserve. Should be in [0,1].
method (`str`):The method to use to prune. Should be one of ["magnitude", "random"].
rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor.
"""
if density >= 1:
return tensor
elif density < 0:
raise ValueError("Density should be >= 0, got {density}")
if method == "magnitude":
return magnitude_based_pruning(tensor, density)
elif method == "random":
return random_pruning(tensor, density, rescale=rescale)
else:
raise ValueError(f"Unknown method {method}")
def calculate_majority_sign_mask(
tensor: torch.Tensor, method: Literal["total", "frequency"] = "total"
):
"""
Get the mask of the majority sign across the task tensors. Task tensors are stacked on dimension 0.
Args:
tensor (`torch.Tensor`):The tensor to get the mask from.
method (`str`):The method to use to get the mask. Should be one of ["total", "frequency"].
"""
sign = tensor.sign()
if method == "total":
sign_magnitude = (sign * tensor.abs()).sum(dim=0)
elif method == "frequency":
sign_magnitude = sign.sum(dim=0)
else:
raise RuntimeError(f'Unimplemented mask method "{method}"')
majority_sign = torch.where(sign_magnitude >= 0, 1, -1)
return sign == majority_sign
def disjoint_merge(task_tensors, majority_sign_mask):
mixed_task_tensors = (task_tensors * majority_sign_mask).sum(dim=0)
num_params_preserved = majority_sign_mask.sum(dim=0)
return mixed_task_tensors / torch.clamp(num_params_preserved, min=1.0)
| text-generation-inference/server/text_generation_server/utils/merges/utils.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/merges/utils.py",
"repo_id": "text-generation-inference",
"token_count": 1422
} |
## How to release
# Before the release
Simple checklist on how to make releases for `tokenizers`.
- Freeze `master` branch.
- Run all tests (Check CI has properly run)
- If any significant work, check benchmarks:
- `cd tokenizers && cargo bench` (needs to be run on latest release tag to measure difference if it's your first time)
- Run all `transformers` tests. (`transformers` is a big user of `tokenizers` we need
to make sure we don't break it, testing is one way to make sure nothing unforeseen
has been done.)
- Run all fast tests at the VERY least (not just the tokenization tests). (`RUN_PIPELINE_TESTS=1 CUDA_VISIBLE_DEVICES=-1 pytest -sv tests/`)
- When all *fast* tests work, then we can also (it's recommended) run the whole `transformers`
test suite.
- Rebase this [PR](https://github.com/huggingface/transformers/pull/16708).
This will create new docker images ready to run the tests suites with `tokenizers` from the main branch.
- Wait for actions to finish
- Rebase this [PR](https://github.com/huggingface/transformers/pull/16712)
This will run the actual full test suite.
- Check the results.
- **If any breaking change has been done**, make sure the version can safely be increased for transformers users (`tokenizers` version need to make sure users don't upgrade before `transformers` has). [link](https://github.com/huggingface/transformers/blob/main/setup.py#L154)
For instance `tokenizers>=0.10,<0.11` so we can safely upgrade to `0.11` without impacting
current users
- Then start a new PR containing all desired code changes from the following steps.
- You will `Create release` after the code modifications are on `master`.
# Rust
- `tokenizers` (rust, python & node) versions don't have to be in sync but it's
very common to release for all versions at once for new features.
- Edit `Cargo.toml` to reflect new version
- Edit `CHANGELOG.md`:
- Add relevant PRs that were added (python PRs do not belong for instance).
- Add links at the end of the files.
- Go to [Releases](https://github.com/huggingface/tokenizers/releases)
- Create new Release:
- Mark it as pre-release
- Use new version name with a new tag (create on publish) `vX.X.X`.
- Copy paste the new part of the `CHANGELOG.md`
- ⚠️ Click on `Publish release`. This will start the whole process of building a uploading
the new version on `crates.io`, there's no going back after this
- Go to the [Actions](https://github.com/huggingface/tokenizers/actions) tab and check everything works smoothly.
- If anything fails, you need to fix the CI/CD to make it work again. Since your package was not uploaded to the repository properly, you can try again.
# Python
- Edit `bindings/python/setup.py` to reflect new version.
- Edit `bindings/python/py_src/tokenizers/__init__.py` to reflect new version.
- Edit `CHANGELOG.md`:
- Add relevant PRs that were added (node PRs do not belong for instance).
- Add links at the end of the files.
- Go to [Releases](https://github.com/huggingface/tokenizers/releases)
- Create new Release:
- Mark it as pre-release
- Use new version name with a new tag (create on publish) `python-vX.X.X`.
- Copy paste the new part of the `CHANGELOG.md`
- ⚠️ Click on `Publish release`. This will start the whole process of building a uploading
the new version on `pypi`, there's no going back after this
- Go to the [Actions](https://github.com/huggingface/tokenizers/actions) tab and check everything works smoothly.
- If anything fails, you need to fix the CI/CD to make it work again. Since your package was not uploaded to the repository properly, you can try again.
- This CI/CD has 3 distinct builds, `Pypi`(normal), `conda` and `extra`. `Extra` is REALLY slow (~4h), this is normal since it has to rebuild many things, but enables the wheel to be available for old Linuxes
# Node
- Edit `bindings/node/package.json` to reflect new version.
- Edit `CHANGELOG.md`:
- Add relevant PRs that were added (python PRs do not belong for instance).
- Add links at the end of the files.
- Go to [Releases](https://github.com/huggingface/tokenizers/releases)
- Create new Release:
- Mark it as pre-release
- Use new version name with a new tag (create on publish) `node-vX.X.X`.
- Copy paste the new part of the `CHANGELOG.md`
- ⚠️ Click on `Publish release`. This will start the whole process of building a uploading
the new version on `npm`, there's no going back after this
- Go to the [Actions](https://github.com/huggingface/tokenizers/actions) tab and check everything works smoothly.
- If anything fails, you need to fix the CI/CD to make it work again. Since your package was not uploaded to the repository properly, you can try again.
# Testing the CI/CD for release
If you want to make modifications to the CI/CD of the release GH actions, you need
to :
- **Comment the part that uploads the artifacts** to `crates.io`, `PyPi` or `npm`.
- Change the trigger mechanism so it can trigger every time you push to your branch.
- Keep pushing your changes until the artifacts are properly created.
| tokenizers/RELEASE.md/0 | {
"file_path": "tokenizers/RELEASE.md",
"repo_id": "tokenizers",
"token_count": 1519
} |
/* eslint-disable */
var globRequire = require
console.log = (..._args: any[]) => {}
describe('quicktourExample', () => {
function require(mod: string) {
if (mod.startsWith('tokenizers')) {
return globRequire('../../')
} else {
return globRequire(mod)
}
}
it.skip('trains the tokenizer', async () => {
// START init_tokenizer
let { Tokenizer } = require('tokenizers')
let { BPE } = require('tokenizers')
let tokenizer = new Tokenizer(BPE.init({}, [], { unkToken: '[UNK]' }))
// END init_tokenizer
// START init_trainer
let { bpeTrainer } = require('tokenizers')
let trainer = bpeTrainer({
specialTokens: ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]'],
})
// END init_trainer
// START init_pretok
let { whitespacePreTokenizer } = require('tokenizers')
tokenizer.setPreTokenizer(whitespacePreTokenizer())
// END init_pretok
// START train
let files = ['test', 'train', 'valid'].map((split) => `data/wikitext-103-raw/wiki.${split}.raw`)
tokenizer.train(files, trainer)
// END train
// START save
tokenizer.save('data/tokenizer-wiki.json')
// END save
})
it('shows a quicktour example', async () => {
let { Tokenizer } = require('tokenizers')
// START reload_tokenizer
let tokenizer = Tokenizer.fromFile('data/tokenizer-wiki.json')
// END reload_tokenizer
// START encode
var output = await tokenizer.encode("Hello, y'all! How are you 😁 ?")
// END encode
// START print_tokens
console.log(output.getTokens())
// ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"]
// END print_tokens
expect(output.getTokens()).toEqual(['Hello', ',', 'y', "'", 'all', '!', 'How', 'are', 'you', '[UNK]', '?'])
// START print_ids
console.log(output.getIds())
// [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]
// END print_ids
expect(output.getIds()).toEqual([27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35])
// START print_offsets
let offsets = output.getOffsets()
console.log(offsets[9])
// (26, 27)
// END print_offsets
expect(offsets[9]).toEqual([26, 27])
// START use_offsets
let { slice } = require('tokenizers')
let sentence = "Hello, y'all! How are you 😁 ?"
let [start, end] = offsets[9]
console.log(slice(sentence, start, end))
// "😁"
// END use_offsets
expect(slice(sentence, start, end)).toEqual('😁')
// START check_sep
console.log(tokenizer.tokenToId('[SEP]'))
// 2
// END check_sep
expect(tokenizer.tokenToId('[SEP]')).toEqual(2)
// START init_template_processing
let { templateProcessing } = require('tokenizers')
tokenizer.setPostProcessor(
templateProcessing('[CLS] $A [SEP]', '[CLS] $A [SEP] $B:1 [SEP]:1', [
['[CLS]', tokenizer.tokenToId('[CLS]')],
['[SEP]', tokenizer.tokenToId('[SEP]')],
]),
)
// END init_template_processing
// START print_special_tokens
var output = await tokenizer.encode("Hello, y'all! How are you 😁 ?")
console.log(output.getTokens())
// ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"]
// END print_special_tokens
expect(output.getTokens()).toEqual([
'[CLS]',
'Hello',
',',
'y',
"'",
'all',
'!',
'How',
'are',
'you',
'[UNK]',
'?',
'[SEP]',
])
// START print_special_tokens_pair
var output = await tokenizer.encode("Hello, y'all!", 'How are you 😁 ?')
console.log(output.getTokens())
// ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"]
// END print_special_tokens_pair
expect(output.getTokens()).toEqual([
'[CLS]',
'Hello',
',',
'y',
"'",
'all',
'!',
'[SEP]',
'How',
'are',
'you',
'[UNK]',
'?',
'[SEP]',
])
// START print_type_ids
console.log(output.getTypeIds())
// [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
// END print_type_ids
expect(output.getTypeIds()).toEqual([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
// START encode_batch
var output = await tokenizer.encodeBatch(["Hello, y'all!", 'How are you 😁 ?'])
// END encode_batch
// START encode_batch_pair
// var output = await tokenizer.encodeBatch(
// [["Hello, y'all!", "How are you 😁 ?"], ["Hello to you too!", "I'm fine, thank you!"]]
// );
// END encode_batch_pair
// START enable_padding
tokenizer.setPadding({ padId: 3, padToken: '[PAD]' })
// END enable_padding
// START print_batch_tokens
var output = await tokenizer.encodeBatch(["Hello, y'all!", 'How are you 😁 ?'])
console.log(output[1].getTokens())
// ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]
// END print_batch_tokens
expect(output[1].getTokens()).toEqual(['[CLS]', 'How', 'are', 'you', '[UNK]', '?', '[SEP]', '[PAD]'])
// START print_attention_mask
console.log(output[1].getAttentionMask())
// [1, 1, 1, 1, 1, 1, 1, 0]
// END print_attention_mask
expect(output[1].getAttentionMask()).toEqual([1, 1, 1, 1, 1, 1, 1, 0])
})
})
| tokenizers/bindings/node/examples/documentation/quicktour.test.ts/0 | {
"file_path": "tokenizers/bindings/node/examples/documentation/quicktour.test.ts",
"repo_id": "tokenizers",
"token_count": 2324
} |
{
"name": "tokenizers-linux-x64-gnu",
"version": "0.13.4-rc1",
"os": [
"linux"
],
"cpu": [
"x64"
],
"main": "tokenizers.linux-x64-gnu.node",
"files": [
"tokenizers.linux-x64-gnu.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers",
"libc": [
"glibc"
]
} | tokenizers/bindings/node/npm/linux-x64-gnu/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/linux-x64-gnu/package.json",
"repo_id": "tokenizers",
"token_count": 289
} |
use crate::arc_rwlock_serde;
use napi::bindgen_prelude::*;
use napi_derive::napi;
use serde::{Deserialize, Serialize};
use std::sync::{Arc, RwLock};
use tk::normalizers::NormalizerWrapper;
use tk::NormalizedString;
use tokenizers as tk;
/// Normalizer
#[derive(Debug, Clone, Serialize, Deserialize)]
#[napi]
pub struct Normalizer {
#[serde(flatten, with = "arc_rwlock_serde")]
normalizer: Option<Arc<RwLock<NormalizerWrapper>>>,
}
#[napi]
impl Normalizer {
#[napi]
pub fn normalize_string(&self, sequence: String) -> Result<String> {
use tk::Normalizer;
let mut normalized = NormalizedString::from(sequence);
self
.normalize(&mut normalized)
.map_err(|e| Error::from_reason(format!("{}", e)))?;
Ok(normalized.get().to_string())
}
}
impl tk::Normalizer for Normalizer {
fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> {
self
.normalizer
.as_ref()
.ok_or("Uninitialized Normalizer")?
.read()
.unwrap()
.normalize(normalized)?;
Ok(())
}
}
#[napi]
pub fn prepend_normalizer(prepend: String) -> Normalizer {
Normalizer {
normalizer: Some(Arc::new(RwLock::new(
tk::normalizers::prepend::Prepend::new(prepend).into(),
))),
}
}
#[napi]
pub fn strip_accents_normalizer() -> Normalizer {
Normalizer {
normalizer: Some(Arc::new(RwLock::new(
tk::normalizers::strip::StripAccents.into(),
))),
}
}
#[napi(object)]
#[derive(Default)]
pub struct BertNormalizerOptions {
pub clean_text: Option<bool>,
pub handle_chinese_chars: Option<bool>,
pub strip_accents: Option<bool>,
pub lowercase: Option<bool>,
}
/// bert_normalizer(options?: {
/// cleanText?: bool = true,
/// handleChineseChars?: bool = true,
/// stripAccents?: bool = true,
/// lowercase?: bool = true
/// })
#[napi]
pub fn bert_normalizer(options: Option<BertNormalizerOptions>) -> Normalizer {
let options = options.unwrap_or_default();
Normalizer {
normalizer: Some(Arc::new(RwLock::new(
tk::normalizers::bert::BertNormalizer::new(
options.clean_text.unwrap_or(true),
options.handle_chinese_chars.unwrap_or(true),
options.strip_accents,
options.lowercase.unwrap_or(true),
)
.into(),
))),
}
}
#[napi]
pub fn nfd_normalizer() -> Normalizer {
Normalizer {
normalizer: Some(Arc::new(RwLock::new(tk::normalizers::unicode::NFD.into()))),
}
}
#[napi]
pub fn nfkd_normalizer() -> Normalizer {
Normalizer {
normalizer: Some(Arc::new(RwLock::new(tk::normalizers::unicode::NFKD.into()))),
}
}
#[napi]
pub fn nfc_normalizer() -> Normalizer {
Normalizer {
normalizer: Some(Arc::new(RwLock::new(tk::normalizers::unicode::NFC.into()))),
}
}
#[napi]
pub fn nfkc_normalizer() -> Normalizer {
Normalizer {
normalizer: Some(Arc::new(RwLock::new(tk::normalizers::unicode::NFKC.into()))),
}
}
// /// strip(left?: boolean, right?: boolean)
#[napi]
pub fn strip_normalizer(left: Option<bool>, right: Option<bool>) -> Normalizer {
let left = left.unwrap_or(true);
let right = right.unwrap_or(true);
Normalizer {
normalizer: Some(Arc::new(RwLock::new(
tk::normalizers::strip::Strip::new(left, right).into(),
))),
}
}
#[napi]
pub fn sequence_normalizer(normalizers: Vec<&Normalizer>) -> Normalizer {
let mut sequence: Vec<NormalizerWrapper> = Vec::with_capacity(normalizers.len());
normalizers.into_iter().for_each(|normalizer| {
if let Some(normalizer) = &normalizer.normalizer {
sequence.push((**normalizer).read().unwrap().clone())
}
});
Normalizer {
normalizer: Some(Arc::new(RwLock::new(NormalizerWrapper::Sequence(
tk::normalizers::Sequence::new(sequence),
)))),
}
}
#[napi]
pub fn lowercase() -> Normalizer {
Normalizer {
normalizer: Some(Arc::new(RwLock::new(
tk::normalizers::utils::Lowercase.into(),
))),
}
}
#[napi]
pub fn replace(pattern: String, content: String) -> Result<Normalizer> {
Ok(Normalizer {
normalizer: Some(Arc::new(RwLock::new(
tk::normalizers::replace::Replace::new(pattern, content)
.map_err(|e| Error::from_reason(e.to_string()))?
.into(),
))),
})
}
#[napi]
pub fn nmt() -> Normalizer {
Normalizer {
normalizer: Some(Arc::new(RwLock::new(tk::normalizers::unicode::Nmt.into()))),
}
}
#[napi]
pub fn precompiled(bytes: Vec<u8>) -> Result<Normalizer> {
Ok(Normalizer {
normalizer: Some(Arc::new(RwLock::new(
tk::normalizers::precompiled::Precompiled::from(&bytes)
.map_err(|e| Error::from_reason(e.to_string()))?
.into(),
))),
})
}
| tokenizers/bindings/node/src/normalizers.rs/0 | {
"file_path": "tokenizers/bindings/node/src/normalizers.rs",
"repo_id": "tokenizers",
"token_count": 1886
} |
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer
from tokenizers.decoders import Decoder
from tokenizers.models import Model
from tokenizers.normalizers import Normalizer
from tokenizers.pre_tokenizers import PreTokenizer
from tokenizers.processors import PostProcessor
Offsets = Tuple[int, int]
class BaseTokenizer:
def __init__(self, tokenizer: Tokenizer, parameters=None):
self._tokenizer = tokenizer
self._parameters = parameters if parameters is not None else {}
def __repr__(self):
return "Tokenizer(vocabulary_size={}, {})".format(
self._tokenizer.get_vocab_size(),
", ".join(k + "=" + str(v) for k, v in self._parameters.items()),
)
def num_special_tokens_to_add(self, is_pair: bool) -> int:
"""
Return the number of special tokens that would be added for single/pair sentences.
:param is_pair: Boolean indicating if the input would be a single sentence or a pair
:return:
"""
return self._tokenizer.num_special_tokens_to_add(is_pair)
def get_vocab(self, with_added_tokens: bool = True) -> Dict[str, int]:
"""Returns the vocabulary
Args:
with_added_tokens: boolean:
Whether to include the added tokens in the vocabulary
Returns:
The vocabulary
"""
return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens)
def get_added_tokens_decoder(self) -> Dict[int, AddedToken]:
"""Returns the added reverse vocabulary
Returns:
The added vocabulary mapping ints to AddedTokens
"""
return self._tokenizer.get_added_tokens_decoder()
def get_vocab_size(self, with_added_tokens: bool = True) -> int:
"""Return the size of vocabulary, with or without added tokens.
Args:
with_added_tokens: (`optional`) bool:
Whether to count in added special tokens or not
Returns:
Size of vocabulary
"""
return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens)
def enable_padding(
self,
direction: Optional[str] = "right",
pad_to_multiple_of: Optional[int] = None,
pad_id: Optional[int] = 0,
pad_type_id: Optional[int] = 0,
pad_token: Optional[str] = "[PAD]",
length: Optional[int] = None,
):
"""Change the padding strategy
Args:
direction: (`optional`) str:
Can be one of: `right` or `left`
pad_to_multiple_of: (`optional`) unsigned int:
If specified, the padding length should always snap to the next multiple of
the given value. For example if we were going to pad with a length of 250 but
`pad_to_multiple_of=8` then we will pad to 256.
pad_id: (`optional`) unsigned int:
The indice to be used when padding
pad_type_id: (`optional`) unsigned int:
The type indice to be used when padding
pad_token: (`optional`) str:
The pad token to be used when padding
length: (`optional`) unsigned int:
If specified, the length at which to pad. If not specified
we pad using the size of the longest sequence in a batch
"""
return self._tokenizer.enable_padding(
direction=direction,
pad_to_multiple_of=pad_to_multiple_of,
pad_id=pad_id,
pad_type_id=pad_type_id,
pad_token=pad_token,
length=length,
)
def no_padding(self):
"""Disable padding"""
return self._tokenizer.no_padding()
@property
def padding(self) -> Optional[dict]:
"""Get the current padding parameters
Returns:
None if padding is disabled, a dict with the currently set parameters
if the padding is enabled.
"""
return self._tokenizer.padding
def enable_truncation(self, max_length: int, stride: Optional[int] = 0, strategy: Optional[str] = "longest_first"):
"""Change the truncation options
Args:
max_length: unsigned int:
The maximum length at which to truncate
stride: (`optional`) unsigned int:
The length of the previous first sequence to be included
in the overflowing sequence
strategy: (`optional`) str:
Can be one of `longest_first`, `only_first` or `only_second`
"""
return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
def no_truncation(self):
"""Disable truncation"""
return self._tokenizer.no_truncation()
@property
def truncation(self) -> Optional[dict]:
"""Get the current truncation parameters
Returns:
None if truncation is disabled, a dict with the current truncation parameters if
truncation is enabled
"""
return self._tokenizer.truncation
def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int:
"""Add the given tokens to the vocabulary
Args:
tokens: List[Union[str, AddedToken]]:
A list of tokens to add to the vocabulary. Each token can either be
a string, or an instance of AddedToken
Returns:
The number of tokens that were added to the vocabulary
"""
return self._tokenizer.add_tokens(tokens)
def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int:
"""Add the given special tokens to the vocabulary, and treat them as special tokens.
The special tokens will never be processed by the model, and will be
removed while decoding.
Args:
tokens: List[Union[str, AddedToken]]:
A list of special tokens to add to the vocabulary. Each token can either be
a string, or an instance of AddedToken
Returns:
The number of tokens that were added to the vocabulary
"""
return self._tokenizer.add_special_tokens(special_tokens)
def normalize(self, sequence: str) -> str:
"""Normalize the given sequence
Args:
sequence: str:
The sequence to normalize
Returns:
The normalized string
"""
return self._tokenizer.normalize(sequence)
def encode(
self,
sequence: InputSequence,
pair: Optional[InputSequence] = None,
is_pretokenized: bool = False,
add_special_tokens: bool = True,
) -> Encoding:
"""Encode the given sequence and pair. This method can process raw text sequences as well
as already pre-tokenized sequences.
Args:
sequence: InputSequence:
The sequence we want to encode. This sequence can be either raw text or
pre-tokenized, according to the `is_pretokenized` argument:
- If `is_pretokenized=False`: `InputSequence` is expected to be `str`
- If `is_pretokenized=True`: `InputSequence` is expected to be
`Union[List[str], Tuple[str]]`
is_pretokenized: bool:
Whether the input is already pre-tokenized.
add_special_tokens: bool:
Whether to add the special tokens while encoding.
Returns:
An Encoding
"""
if sequence is None:
raise ValueError("encode: `sequence` can't be `None`")
return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens)
def encode_batch(
self,
inputs: List[EncodeInput],
is_pretokenized: bool = False,
add_special_tokens: bool = True,
) -> List[Encoding]:
"""Encode the given inputs. This method accept both raw text sequences as well as already
pre-tokenized sequences.
Args:
inputs: List[EncodeInput]:
A list of single sequences or pair sequences to encode. Each `EncodeInput` is
expected to be of the following form:
`Union[InputSequence, Tuple[InputSequence, InputSequence]]`
Each `InputSequence` can either be raw text or pre-tokenized,
according to the `is_pretokenized` argument:
- If `is_pretokenized=False`: `InputSequence` is expected to be `str`
- If `is_pretokenized=True`: `InputSequence` is expected to be
`Union[List[str], Tuple[str]]`
is_pretokenized: bool:
Whether the input is already pre-tokenized.
add_special_tokens: bool:
Whether to add the special tokens while encoding.
Returns:
A list of Encoding
"""
if inputs is None:
raise ValueError("encode_batch: `inputs` can't be `None`")
return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens)
def decode(self, ids: List[int], skip_special_tokens: Optional[bool] = True) -> str:
"""Decode the given list of ids to a string sequence
Args:
ids: List[unsigned int]:
A list of ids to be decoded
skip_special_tokens: (`optional`) boolean:
Whether to remove all the special tokens from the output string
Returns:
The decoded string
"""
if ids is None:
raise ValueError("None input is not valid. Should be a list of integers.")
return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)
def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool] = True) -> str:
"""Decode the list of sequences to a list of string sequences
Args:
sequences: List[List[unsigned int]]:
A list of sequence of ids to be decoded
skip_special_tokens: (`optional`) boolean:
Whether to remove all the special tokens from the output strings
Returns:
A list of decoded strings
"""
if sequences is None:
raise ValueError("None input is not valid. Should be list of list of integers.")
return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens)
def token_to_id(self, token: str) -> Optional[int]:
"""Convert the given token to its corresponding id
Args:
token: str:
The token to convert
Returns:
The corresponding id if it exists, None otherwise
"""
return self._tokenizer.token_to_id(token)
def id_to_token(self, id: int) -> Optional[str]:
"""Convert the given token id to its corresponding string
Args:
token: id:
The token id to convert
Returns:
The corresponding string if it exists, None otherwise
"""
return self._tokenizer.id_to_token(id)
def save_model(self, directory: str, prefix: Optional[str] = None):
"""Save the current model to the given directory
Args:
directory: str:
A path to the destination directory
prefix: (Optional) str:
An optional prefix, used to prefix each file name
"""
return self._tokenizer.model.save(directory, prefix=prefix)
def save(self, path: str, pretty: bool = True):
"""Save the current Tokenizer at the given path
Args:
path: str:
A path to the destination Tokenizer file
"""
return self._tokenizer.save(path, pretty)
def to_str(self, pretty: bool = False):
"""Get a serialized JSON version of the Tokenizer as a str
Args:
pretty: bool:
Whether the JSON string should be prettified
Returns:
str
"""
return self._tokenizer.to_str(pretty)
def post_process(
self, encoding: Encoding, pair: Optional[Encoding] = None, add_special_tokens: bool = True
) -> Encoding:
"""Apply all the post-processing steps to the given encodings.
The various steps are:
1. Truncate according to global params (provided to `enable_truncation`)
2. Apply the PostProcessor
3. Pad according to global params. (provided to `enable_padding`)
Args:
encoding: Encoding:
The main Encoding to post process
pair: Optional[Encoding]:
An optional pair Encoding
add_special_tokens: bool:
Whether to add special tokens
Returns:
The resulting Encoding
"""
return self._tokenizer.post_process(encoding, pair, add_special_tokens)
@property
def model(self) -> Model:
return self._tokenizer.model
@model.setter
def model(self, model: Model):
self._tokenizer.model = model
@property
def normalizer(self) -> Normalizer:
return self._tokenizer.normalizer
@normalizer.setter
def normalizer(self, normalizer: Normalizer):
self._tokenizer.normalizer = normalizer
@property
def pre_tokenizer(self) -> PreTokenizer:
return self._tokenizer.pre_tokenizer
@pre_tokenizer.setter
def pre_tokenizer(self, pre_tokenizer: PreTokenizer):
self._tokenizer.pre_tokenizer = pre_tokenizer
@property
def post_processor(self) -> PostProcessor:
return self._tokenizer.post_processor
@post_processor.setter
def post_processor(self, post_processor: PostProcessor):
self._tokenizer.post_processor = post_processor
@property
def decoder(self) -> Decoder:
return self._tokenizer.decoder
@decoder.setter
def decoder(self, decoder: Decoder):
self._tokenizer.decoder = decoder
| tokenizers/bindings/python/py_src/tokenizers/implementations/base_tokenizer.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/base_tokenizer.py",
"repo_id": "tokenizers",
"token_count": 6036
} |
import itertools
import os
import re
from string import Template
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple
from tokenizers import Encoding, Tokenizer
dirname = os.path.dirname(__file__)
css_filename = os.path.join(dirname, "visualizer-styles.css")
with open(css_filename) as f:
css = f.read()
class Annotation:
start: int
end: int
label: int
def __init__(self, start: int, end: int, label: str):
self.start = start
self.end = end
self.label = label
AnnotationList = List[Annotation]
PartialIntList = List[Optional[int]]
class CharStateKey(NamedTuple):
token_ix: Optional[int]
anno_ix: Optional[int]
class CharState:
char_ix: Optional[int]
def __init__(self, char_ix):
self.char_ix = char_ix
self.anno_ix: Optional[int] = None
self.tokens: List[int] = []
@property
def token_ix(self):
return self.tokens[0] if len(self.tokens) > 0 else None
@property
def is_multitoken(self):
"""
BPE tokenizers can output more than one token for a char
"""
return len(self.tokens) > 1
def partition_key(self) -> CharStateKey:
return CharStateKey(
token_ix=self.token_ix,
anno_ix=self.anno_ix,
)
class Aligned:
pass
class EncodingVisualizer:
"""
Build an EncodingVisualizer
Args:
tokenizer (:class:`~tokenizers.Tokenizer`):
A tokenizer instance
default_to_notebook (:obj:`bool`):
Whether to render html output in a notebook by default
annotation_converter (:obj:`Callable`, `optional`):
An optional (lambda) function that takes an annotation in any format and returns
an Annotation object
"""
unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE)
def __init__(
self,
tokenizer: Tokenizer,
default_to_notebook: bool = True,
annotation_converter: Optional[Callable[[Any], Annotation]] = None,
):
if default_to_notebook:
try:
from IPython.core.display import HTML, display
except ImportError:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?
You can also pass `default_to_notebook=False` to get back raw HTML
"""
)
self.tokenizer = tokenizer
self.default_to_notebook = default_to_notebook
self.annotation_coverter = annotation_converter
pass
def __call__(
self,
text: str,
annotations: AnnotationList = [],
default_to_notebook: Optional[bool] = None,
) -> Optional[str]:
"""
Build a visualization of the given text
Args:
text (:obj:`str`):
The text to tokenize
annotations (:obj:`List[Annotation]`, `optional`):
An optional list of annotations of the text. The can either be an annotation class
or anything else if you instantiated the visualizer with a converter function
default_to_notebook (:obj:`bool`, `optional`, defaults to `False`):
If True, will render the html in a notebook. Otherwise returns an html string.
Returns:
The HTML string if default_to_notebook is False, otherwise (default) returns None and
renders the HTML in the notebook
"""
final_default_to_notebook = self.default_to_notebook
if default_to_notebook is not None:
final_default_to_notebook = default_to_notebook
if final_default_to_notebook:
try:
from IPython.core.display import HTML, display
except ImportError:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?"""
)
if self.annotation_coverter is not None:
annotations = list(map(self.annotation_coverter, annotations))
encoding = self.tokenizer.encode(text)
html = EncodingVisualizer.__make_html(text, encoding, annotations)
if final_default_to_notebook:
display(HTML(html))
else:
return html
@staticmethod
def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]:
"""
Generates a color palette for all the labels in a given set of annotations
Args:
annotations (:obj:`Annotation`):
A list of annotations
Returns:
:obj:`dict`: A dictionary mapping labels to colors in HSL format
"""
if len(annotations) == 0:
return {}
labels = set(map(lambda x: x.label, annotations))
num_labels = len(labels)
h_step = int(255 / num_labels)
if h_step < 20:
h_step = 20
s = 32
l = 64 # noqa: E741
h = 10
colors = {}
for label in sorted(labels): # sort so we always get the same colors for a given set of labels
colors[label] = f"hsl({h},{s}%,{l}%"
h += h_step
return colors
@staticmethod
def consecutive_chars_to_html(
consecutive_chars_list: List[CharState],
text: str,
encoding: Encoding,
):
"""
Converts a list of "consecutive chars" into a single HTML element.
Chars are consecutive if they fall under the same word, token and annotation.
The CharState class is a named tuple with a "partition_key" method that makes it easy to
compare if two chars are consecutive.
Args:
consecutive_chars_list (:obj:`List[CharState]`):
A list of CharStates that have been grouped together
text (:obj:`str`):
The original text being processed
encoding (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`str`: The HTML span for a set of consecutive chars
"""
first = consecutive_chars_list[0]
if first.char_ix is None:
# its a special token
stoken = encoding.tokens[first.token_ix]
# special tokens are represented as empty spans. We use the data attribute and css
# magic to display it
return f'<span class="special-token" data-stoken={stoken}></span>'
# We're not in a special token so this group has a start and end.
last = consecutive_chars_list[-1]
start = first.char_ix
end = last.char_ix + 1
span_text = text[start:end]
css_classes = [] # What css classes will we apply on the resulting span
data_items = {} # What data attributes will we apply on the result span
if first.token_ix is not None:
# We can either be in a token or not (e.g. in white space)
css_classes.append("token")
if first.is_multitoken:
css_classes.append("multi-token")
if first.token_ix % 2:
# We use this to color alternating tokens.
# A token might be split by an annotation that ends in the middle of it, so this
# lets us visually indicate a consecutive token despite its possible splitting in
# the html markup
css_classes.append("odd-token")
else:
# Like above, but a different color so we can see the tokens alternate
css_classes.append("even-token")
if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None:
# This is a special token that is in the text. probably UNK
css_classes.append("special-token")
# TODO is this the right name for the data attribute ?
data_items["stok"] = encoding.tokens[first.token_ix]
else:
# In this case we are looking at a group/single char that is not tokenized.
# e.g. white space
css_classes.append("non-token")
css = f'''class="{" ".join(css_classes)}"'''
data = ""
for key, val in data_items.items():
data += f' data-{key}="{val}"'
return f"<span {css} {data} >{span_text}</span>"
@staticmethod
def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str:
char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations)
current_consecutive_chars = [char_states[0]]
prev_anno_ix = char_states[0].anno_ix
spans = []
label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations)
cur_anno_ix = char_states[0].anno_ix
if cur_anno_ix is not None:
# If we started in an annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
for cs in char_states[1:]:
cur_anno_ix = cs.anno_ix
if cur_anno_ix != prev_anno_ix:
# If we've transitioned in or out of an annotation
spans.append(
# Create a span from the current consecutive characters
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
current_consecutive_chars = [cs]
if prev_anno_ix is not None:
# if we transitioned out of an annotation close it's span
spans.append("</span>")
if cur_anno_ix is not None:
# If we entered a new annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
prev_anno_ix = cur_anno_ix
if cs.partition_key() == current_consecutive_chars[0].partition_key():
# If the current charchter is in the same "group" as the previous one
current_consecutive_chars.append(cs)
else:
# Otherwise we make a span for the previous group
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
# An reset the consecutive_char_list to form a new group
current_consecutive_chars = [cs]
# All that's left is to fill out the final span
# TODO I think there is an edge case here where an annotation's span might not close
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
res = HTMLBody(spans) # Send the list of spans to the body of our html
return res
@staticmethod
def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList:
"""
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`AnnotationList`):
A (possibly empty) list of annotations
Returns:
A list of length len(text) whose entry at index i is None if there is no annotation on
character i or k, the index of the annotation that covers index i where k is with
respect to the list of annotations
"""
annotation_map = [None] * len(text)
for anno_ix, a in enumerate(annotations):
for i in range(a.start, a.end):
annotation_map[i] = anno_ix
return annotation_map
@staticmethod
def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]:
"""
For each character in the original text, we emit a tuple representing it's "state":
* which token_ix it corresponds to
* which word_ix it corresponds to
* which annotation_ix it corresponds to
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`List[Annotation]`):
A (possibly empty) list of annotations
encoding: (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what
it's state is
"""
annotation_map = EncodingVisualizer.__make_anno_map(text, annotations)
# Todo make this a dataclass or named tuple
char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))]
for token_ix, token in enumerate(encoding.tokens):
offsets = encoding.token_to_chars(token_ix)
if offsets is not None:
start, end = offsets
for i in range(start, end):
char_states[i].tokens.append(token_ix)
for char_ix, anno_ix in enumerate(annotation_map):
char_states[char_ix].anno_ix = anno_ix
return char_states
def HTMLBody(children: List[str], css_styles=css) -> str:
"""
Generates the full html with css from a list of html spans
Args:
children (:obj:`List[str]`):
A list of strings, assumed to be html elements
css_styles (:obj:`str`, `optional`):
Optional alternative implementation of the css
Returns:
:obj:`str`: An HTML string with style markup
"""
children_text = "".join(children)
return f"""
<html>
<head>
<style>
{css_styles}
</style>
</head>
<body>
<div class="tokenized-text" dir=auto>
{children_text}
</div>
</body>
</html>
"""
| tokenizers/bindings/python/py_src/tokenizers/tools/visualizer.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/tools/visualizer.py",
"repo_id": "tokenizers",
"token_count": 6751
} |
use std::convert::TryInto;
use std::sync::Arc;
use std::sync::RwLock;
use crate::encoding::PyEncoding;
use crate::error::ToPyResult;
use pyo3::exceptions;
use pyo3::exceptions::PyException;
use pyo3::prelude::*;
use pyo3::types::*;
use serde::ser::SerializeStruct;
use serde::Deserializer;
use serde::Serializer;
use serde::{Deserialize, Serialize};
use tk::processors::bert::BertProcessing;
use tk::processors::byte_level::ByteLevel;
use tk::processors::roberta::RobertaProcessing;
use tk::processors::template::{SpecialToken, Template};
use tk::processors::PostProcessorWrapper;
use tk::{Encoding, PostProcessor};
use tokenizers as tk;
/// Base class for all post-processors
///
/// This class is not supposed to be instantiated directly. Instead, any implementation of
/// a PostProcessor will return an instance of this class when instantiated.
#[pyclass(
dict,
module = "tokenizers.processors",
name = "PostProcessor",
subclass
)]
#[derive(Clone, Deserialize, Serialize)]
#[serde(transparent)]
pub struct PyPostProcessor {
processor: PyPostProcessorTypeWrapper,
}
impl<I> From<I> for PyPostProcessor
where
I: Into<PostProcessorWrapper>,
{
fn from(processor: I) -> Self {
PyPostProcessor {
processor: processor.into().into(),
}
}
}
impl PyPostProcessor {
pub(crate) fn new(processor: PyPostProcessorTypeWrapper) -> Self {
PyPostProcessor { processor }
}
pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> {
let base = self.clone();
Ok(
match self.processor {
PyPostProcessorTypeWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))?
.into_pyobject(py)?
.into_any()
.into(),
PyPostProcessorTypeWrapper::Single(ref inner) => {
match &*inner.read().map_err(|_| {
PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor")
})? {
PostProcessorWrapper::ByteLevel(_) => Py::new(py, (PyByteLevel {}, base))?
.into_pyobject(py)?
.into_any()
.into(),
PostProcessorWrapper::Bert(_) => Py::new(py, (PyBertProcessing {}, base))?
.into_pyobject(py)?
.into_any()
.into(),
PostProcessorWrapper::Roberta(_) => Py::new(py, (PyRobertaProcessing {}, base))?
.into_pyobject(py)?
.into_any()
.into(),
PostProcessorWrapper::Template(_) => Py::new(py, (PyTemplateProcessing {}, base))?
.into_pyobject(py)?
.into_any()
.into(),
PostProcessorWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))?
.into_pyobject(py)?
.into_any()
.into(),
}
}
}
)
}
}
impl PostProcessor for PyPostProcessor {
// TODO: update signature to `tk::Result<usize>`
fn added_tokens(&self, is_pair: bool) -> usize {
self.processor.added_tokens(is_pair)
}
fn process_encodings(
&self,
encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> tk::Result<Vec<Encoding>> {
self.processor
.process_encodings(encodings, add_special_tokens)
}
}
#[pymethods]
impl PyPostProcessor {
fn __getstate__(&self, py: Python) -> PyResult<PyObject> {
let data = serde_json::to_string(&self.processor).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to pickle PostProcessor: {}",
e
))
})?;
Ok(PyBytes::new(py, data.as_bytes()).into())
}
fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> {
match state.extract::<&[u8]>(py) {
Ok(s) => {
self.processor = serde_json::from_slice(s).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to unpickle PostProcessor: {}",
e
))
})?;
Ok(())
}
Err(e) => Err(e),
}
}
/// Return the number of special tokens that would be added for single/pair sentences.
///
/// Args:
/// is_pair (:obj:`bool`):
/// Whether the input would be a pair of sequences
///
/// Returns:
/// :obj:`int`: The number of tokens to add
#[pyo3(text_signature = "(self, is_pair)")]
fn num_special_tokens_to_add(&self, is_pair: bool) -> PyResult<usize> {
Ok(self.processor.added_tokens(is_pair))
}
/// Post-process the given encodings, generating the final one
///
/// Args:
/// encoding (:class:`~tokenizers.Encoding`):
/// The encoding for the first sequence
///
/// pair (:class:`~tokenizers.Encoding`, `optional`):
/// The encoding for the pair sequence
///
/// add_special_tokens (:obj:`bool`):
/// Whether to add the special tokens
///
/// Return:
/// :class:`~tokenizers.Encoding`: The final encoding
#[pyo3(signature = (encoding, pair = None, add_special_tokens = true))]
#[pyo3(text_signature = "(self, encoding, pair=None, add_special_tokens=True)")]
fn process(
&self,
encoding: &PyEncoding,
pair: Option<&PyEncoding>,
add_special_tokens: bool,
) -> PyResult<PyEncoding> {
let final_encoding = ToPyResult(self.processor.process(
encoding.encoding.clone(),
pair.map(|e| e.encoding.clone()),
add_special_tokens,
))
.into_py()?;
Ok(final_encoding.into())
}
fn __repr__(&self) -> PyResult<String> {
crate::utils::serde_pyo3::repr(self)
.map_err(|e| exceptions::PyException::new_err(e.to_string()))
}
fn __str__(&self) -> PyResult<String> {
crate::utils::serde_pyo3::to_string(self)
.map_err(|e| exceptions::PyException::new_err(e.to_string()))
}
}
macro_rules! getter {
($self: ident, $variant: ident, $($name: tt)+) => {{
let super_ = $self.as_ref();
if let PyPostProcessorTypeWrapper::Single(ref single) = super_.processor {
if let PostProcessorWrapper::$variant(ref post) = *single.read().expect(
"RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor"
) {
post.$($name)+
} else {
unreachable!()
}
} else {
unreachable!()
}
}};
}
macro_rules! setter {
($self: ident, $variant: ident, $name: ident, $value: expr) => {{
let super_ = $self.as_ref();
if let PyPostProcessorTypeWrapper::Single(ref single) = super_.processor {
if let PostProcessorWrapper::$variant(ref mut post) = *single.write().expect(
"RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor",
) {
post.$name = $value;
}
}
}};
($self: ident, $variant: ident, @$name: ident, $value: expr) => {{
let super_ = $self.as_ref();
if let PyPostProcessorTypeWrapper::Single(ref single) = super_.processor {
if let PostProcessorWrapper::$variant(ref mut post) = *single.write().expect(
"RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor",
) {
post.$name($value);
}
}
};};
}
#[derive(Clone)]
pub(crate) enum PyPostProcessorTypeWrapper {
Sequence(Vec<Arc<RwLock<PostProcessorWrapper>>>),
Single(Arc<RwLock<PostProcessorWrapper>>),
}
impl PostProcessor for PyPostProcessorTypeWrapper {
fn added_tokens(&self, is_pair: bool) -> usize {
match self {
PyPostProcessorTypeWrapper::Single(inner) => inner
.read()
.expect("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor")
.added_tokens(is_pair),
PyPostProcessorTypeWrapper::Sequence(inner) => inner.iter().map(|p| {
p.read()
.expect("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor")
.added_tokens(is_pair)
}).sum::<usize>(),
}
}
fn process_encodings(
&self,
mut encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> tk::Result<Vec<Encoding>> {
match self {
PyPostProcessorTypeWrapper::Single(inner) => inner
.read()
.map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPreTokenizer"))?
.process_encodings(encodings, add_special_tokens),
PyPostProcessorTypeWrapper::Sequence(inner) => {
for processor in inner.iter() {
encodings = processor
.read()
.map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPreTokenizer"))?
.process_encodings(encodings, add_special_tokens)?;
}
Ok(encodings)
},
}
}
}
impl<'de> Deserialize<'de> for PyPostProcessorTypeWrapper {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let wrapper = PostProcessorWrapper::deserialize(deserializer)?;
Ok(wrapper.into())
}
}
impl Serialize for PyPostProcessorTypeWrapper {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
PyPostProcessorTypeWrapper::Sequence(seq) => {
let mut ser = serializer.serialize_struct("Sequence", 2)?;
ser.serialize_field("type", "Sequence")?;
ser.serialize_field("processors", seq)?;
ser.end()
}
PyPostProcessorTypeWrapper::Single(inner) => inner.serialize(serializer),
}
}
}
impl<I> From<I> for PyPostProcessorTypeWrapper
where
I: Into<PostProcessorWrapper>,
{
fn from(processor: I) -> Self {
let processor = processor.into();
match processor {
PostProcessorWrapper::Sequence(seq) => PyPostProcessorTypeWrapper::Sequence(
seq.into_iter().map(|p| Arc::new(RwLock::new(p))).collect(),
),
_ => PyPostProcessorTypeWrapper::Single(Arc::new(RwLock::new(processor.clone()))),
}
}
}
/// This post-processor takes care of adding the special tokens needed by
/// a Bert model:
///
/// - a SEP token
/// - a CLS token
///
/// Args:
/// sep (:obj:`Tuple[str, int]`):
/// A tuple with the string representation of the SEP token, and its id
///
/// cls (:obj:`Tuple[str, int]`):
/// A tuple with the string representation of the CLS token, and its id
#[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "BertProcessing")]
pub struct PyBertProcessing {}
#[pymethods]
impl PyBertProcessing {
#[new]
#[pyo3(text_signature = "(self, sep, cls)")]
fn new(sep: (String, u32), cls: (String, u32)) -> (Self, PyPostProcessor) {
(PyBertProcessing {}, BertProcessing::new(sep, cls).into())
}
fn __getnewargs__<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyTuple>> {
PyTuple::new(py, [("", 0), ("", 0)])
}
#[getter]
fn get_sep(self_: PyRef<Self>) -> Result<Bound<'_, PyTuple>, PyErr> {
let py = self_.py();
let (tok, id) = getter!(self_, Bert, get_sep_copy());
PyTuple::new(
py,
Vec::<PyObject>::from([tok.into_pyobject(py)?.into(), id.into_pyobject(py)?.into()]),
)
}
#[setter]
fn set_sep(self_: PyRef<Self>, sep: Bound<'_, PyTuple>) -> PyResult<()> {
let sep = sep.extract()?;
setter!(self_, Bert, sep, sep);
Ok(())
}
#[getter]
fn get_cls(self_: PyRef<Self>) -> Result<Bound<'_, PyTuple>, PyErr> {
let py = self_.py();
let (tok, id) = getter!(self_, Bert, get_cls_copy());
PyTuple::new(
py,
Vec::<PyObject>::from([tok.into_pyobject(py)?.into(), id.into_pyobject(py)?.into()]),
)
}
#[setter]
fn set_cls(self_: PyRef<Self>, cls: Bound<'_, PyTuple>) -> PyResult<()> {
let cls = cls.extract()?;
setter!(self_, Bert, cls, cls);
Ok(())
}
}
/// This post-processor takes care of adding the special tokens needed by
/// a Roberta model:
///
/// - a SEP token
/// - a CLS token
///
/// It also takes care of trimming the offsets.
/// By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
/// want the offsets to include these whitespaces, then this PostProcessor should be initialized
/// with :obj:`trim_offsets=True`
///
/// Args:
/// sep (:obj:`Tuple[str, int]`):
/// A tuple with the string representation of the SEP token, and its id
///
/// cls (:obj:`Tuple[str, int]`):
/// A tuple with the string representation of the CLS token, and its id
///
/// trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`):
/// Whether to trim the whitespaces from the produced offsets.
///
/// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
/// Whether the add_prefix_space option was enabled during pre-tokenization. This
/// is relevant because it defines the way the offsets are trimmed out.
#[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "RobertaProcessing")]
pub struct PyRobertaProcessing {}
#[pymethods]
impl PyRobertaProcessing {
#[new]
#[pyo3(signature = (sep, cls, trim_offsets = true, add_prefix_space = true), text_signature = "(self, sep, cls, trim_offsets=True, add_prefix_space=True)")]
fn new(
sep: (String, u32),
cls: (String, u32),
trim_offsets: bool,
add_prefix_space: bool,
) -> (Self, PyPostProcessor) {
let proc = RobertaProcessing::new(sep, cls)
.trim_offsets(trim_offsets)
.add_prefix_space(add_prefix_space);
(PyRobertaProcessing {}, proc.into())
}
fn __getnewargs__<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyTuple>> {
PyTuple::new(py, [("", 0), ("", 0)])
}
#[getter]
fn get_sep(self_: PyRef<Self>) -> Result<Bound<'_, PyTuple>, PyErr> {
let py = self_.py();
let (tok, id) = getter!(self_, Roberta, get_sep_copy());
PyTuple::new(
py,
Vec::<PyObject>::from([tok.into_pyobject(py)?.into(), id.into_pyobject(py)?.into()]),
)
}
#[setter]
fn set_sep(self_: PyRef<Self>, sep: Bound<'_, PyTuple>) -> PyResult<()> {
let sep = sep.extract()?;
setter!(self_, Roberta, sep, sep);
Ok(())
}
#[getter]
fn get_cls(self_: PyRef<Self>) -> Result<Bound<'_, PyTuple>, PyErr> {
let py = self_.py();
let (tok, id) = getter!(self_, Roberta, get_cls_copy());
PyTuple::new(
py,
Vec::<PyObject>::from([tok.into_pyobject(py)?.into(), id.into_pyobject(py)?.into()]),
)
}
#[setter]
fn set_cls(self_: PyRef<Self>, cls: Bound<'_, PyTuple>) -> PyResult<()> {
let cls = cls.extract()?;
setter!(self_, Roberta, cls, cls);
Ok(())
}
#[getter]
fn get_trim_offsets(self_: PyRef<Self>) -> bool {
getter!(self_, Roberta, trim_offsets)
}
#[setter]
fn set_trim_offsets(self_: PyRef<Self>, trim_offsets: bool) {
setter!(self_, Roberta, trim_offsets, trim_offsets)
}
#[getter]
fn get_add_prefix_space(self_: PyRef<Self>) -> bool {
getter!(self_, Roberta, add_prefix_space)
}
#[setter]
fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) {
setter!(self_, Roberta, add_prefix_space, add_prefix_space)
}
}
/// This post-processor takes care of trimming the offsets.
///
/// By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
/// want the offsets to include these whitespaces, then this PostProcessor must be used.
///
/// Args:
/// trim_offsets (:obj:`bool`):
/// Whether to trim the whitespaces from the produced offsets.
#[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "ByteLevel")]
pub struct PyByteLevel {}
#[pymethods]
impl PyByteLevel {
#[new]
#[pyo3(signature = (add_prefix_space = None, trim_offsets = None, use_regex = None, **_kwargs), text_signature = "(self, trim_offsets=True)")]
fn new(
add_prefix_space: Option<bool>,
trim_offsets: Option<bool>,
use_regex: Option<bool>,
_kwargs: Option<&Bound<'_, PyDict>>,
) -> (Self, PyPostProcessor) {
let mut byte_level = ByteLevel::default();
if let Some(aps) = add_prefix_space {
byte_level = byte_level.add_prefix_space(aps);
}
if let Some(to) = trim_offsets {
byte_level = byte_level.trim_offsets(to);
}
if let Some(ur) = use_regex {
byte_level = byte_level.use_regex(ur);
}
(PyByteLevel {}, byte_level.into())
}
#[getter]
fn get_add_prefix_space(self_: PyRef<Self>) -> bool {
getter!(self_, ByteLevel, add_prefix_space)
}
#[setter]
fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) {
setter!(self_, ByteLevel, add_prefix_space, add_prefix_space)
}
#[getter]
fn get_trim_offsets(self_: PyRef<Self>) -> bool {
getter!(self_, ByteLevel, trim_offsets)
}
#[setter]
fn set_trim_offsets(self_: PyRef<Self>, trim_offsets: bool) {
setter!(self_, ByteLevel, trim_offsets, trim_offsets)
}
#[getter]
fn get_use_regex(self_: PyRef<Self>) -> bool {
getter!(self_, ByteLevel, use_regex)
}
#[setter]
fn set_use_regex(self_: PyRef<Self>, use_regex: bool) {
setter!(self_, ByteLevel, use_regex, use_regex)
}
}
#[derive(Clone, Debug)]
pub struct PySpecialToken(SpecialToken);
impl From<PySpecialToken> for SpecialToken {
fn from(v: PySpecialToken) -> Self {
v.0
}
}
impl FromPyObject<'_> for PySpecialToken {
fn extract_bound(ob: &Bound<'_, PyAny>) -> PyResult<Self> {
if let Ok(v) = ob.extract::<(String, u32)>() {
Ok(Self(v.into()))
} else if let Ok(v) = ob.extract::<(u32, String)>() {
Ok(Self(v.into()))
} else if let Ok(d) = ob.downcast::<PyDict>() {
let id = d
.get_item("id")?
.ok_or_else(|| exceptions::PyValueError::new_err("`id` must be specified"))?
.extract::<String>()?;
let ids = d
.get_item("ids")?
.ok_or_else(|| exceptions::PyValueError::new_err("`ids` must be specified"))?
.extract::<Vec<u32>>()?;
let tokens = d
.get_item("tokens")?
.ok_or_else(|| exceptions::PyValueError::new_err("`tokens` must be specified"))?
.extract::<Vec<String>>()?;
Ok(Self(
ToPyResult(SpecialToken::new(id, ids, tokens)).into_py()?,
))
} else {
Err(exceptions::PyTypeError::new_err(
"Expected Union[Tuple[str, int], Tuple[int, str], dict]",
))
}
}
}
#[derive(Clone, Debug)]
pub struct PyTemplate(Template);
impl From<PyTemplate> for Template {
fn from(v: PyTemplate) -> Self {
v.0
}
}
impl FromPyObject<'_> for PyTemplate {
fn extract_bound(ob: &Bound<'_, PyAny>) -> PyResult<Self> {
if let Ok(s) = ob.extract::<String>() {
Ok(Self(
s.try_into().map_err(exceptions::PyValueError::new_err)?,
))
} else if let Ok(s) = ob.extract::<Vec<String>>() {
Ok(Self(
s.try_into().map_err(exceptions::PyValueError::new_err)?,
))
} else {
Err(exceptions::PyTypeError::new_err(
"Expected Union[str, List[str]]",
))
}
}
}
/// Provides a way to specify templates in order to add the special tokens to each
/// input sequence as relevant.
///
/// Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to
/// delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first
/// sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair
/// sequences. The final result looks like this:
///
/// - Single sequence: :obj:`[CLS] Hello there [SEP]`
/// - Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]`
///
/// With the type ids as following::
///
/// [CLS] ... [SEP] ... [SEP]
/// 0 0 0 1 1
///
/// You can achieve such behavior using a TemplateProcessing::
///
/// TemplateProcessing(
/// single="[CLS] $0 [SEP]",
/// pair="[CLS] $A [SEP] $B:1 [SEP]:1",
/// special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
/// )
///
/// In this example, each input sequence is identified using a ``$`` construct. This identifier
/// lets us specify each input sequence, and the type_id to use. When nothing is specified,
/// it uses the default values. Here are the different ways to specify it:
///
/// - Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B``
/// - Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ...
/// - Specifying both: ``$A:0``, ``$B:1``, ...
///
/// The same construct is used for special tokens: ``<identifier>(:<type_id>)?``.
///
/// **Warning**: You must ensure that you are giving the correct tokens/ids as these
/// will be added to the Encoding without any further check. If the given ids correspond
/// to something totally different in a `Tokenizer` using this `PostProcessor`, it
/// might lead to unexpected results.
///
/// Args:
/// single (:obj:`Template`):
/// The template used for single sequences
///
/// pair (:obj:`Template`):
/// The template used when both sequences are specified
///
/// special_tokens (:obj:`Tokens`):
/// The list of special tokens used in each sequences
///
/// Types:
///
/// Template (:obj:`str` or :obj:`List`):
/// - If a :obj:`str` is provided, the whitespace is used as delimiter between tokens
/// - If a :obj:`List[str]` is provided, a list of tokens
///
/// Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`):
/// - A :obj:`Tuple` with both a token and its associated ID, in any order
/// - A :obj:`dict` with the following keys:
/// - "id": :obj:`str` => The special token id, as specified in the Template
/// - "ids": :obj:`List[int]` => The associated IDs
/// - "tokens": :obj:`List[str]` => The associated tokens
///
/// The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have
/// the same length.
#[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "TemplateProcessing")]
pub struct PyTemplateProcessing {}
#[pymethods]
impl PyTemplateProcessing {
#[new]
#[pyo3(signature = (single = None, pair = None, special_tokens = None), text_signature = "(self, single, pair, special_tokens)")]
fn new(
single: Option<PyTemplate>,
pair: Option<PyTemplate>,
special_tokens: Option<Vec<PySpecialToken>>,
) -> PyResult<(Self, PyPostProcessor)> {
let mut builder = tk::processors::template::TemplateProcessing::builder();
if let Some(seq) = single {
builder.single(seq.into());
}
if let Some(seq) = pair {
builder.pair(seq.into());
}
if let Some(sp) = special_tokens {
builder.special_tokens(sp);
}
let processor = builder
.build()
.map_err(|e| exceptions::PyValueError::new_err(e.to_string()))?;
Ok((PyTemplateProcessing {}, processor.into()))
}
#[getter]
fn get_single(self_: PyRef<Self>) -> String {
getter!(self_, Template, get_single())
}
#[setter]
fn set_single(self_: PyRef<Self>, single: PyTemplate) -> PyResult<()> {
let template: Template = Template::from(single);
let super_ = self_.as_ref();
if let PyPostProcessorTypeWrapper::Single(ref inner) = super_.processor {
if let PostProcessorWrapper::Template(ref mut post) = *inner
.write()
.map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor"))? {
post.set_single(template);
}
}
Ok(())
}
}
/// Sequence Processor
///
/// Args:
/// processors (:obj:`List[PostProcessor]`)
/// The processors that need to be chained
#[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "Sequence")]
pub struct PySequence {}
#[pymethods]
impl PySequence {
#[new]
#[pyo3(signature = (processors_py), text_signature = "(self, processors)")]
fn new(processors_py: &Bound<'_, PyList>) -> PyResult<(Self, PyPostProcessor)> {
let mut processors = Vec::with_capacity(processors_py.len());
for n in processors_py.iter() {
let processor: PyRef<PyPostProcessor> = n.extract()?;
match &processor.processor {
PyPostProcessorTypeWrapper::Sequence(inner) => {
processors.extend(inner.iter().cloned())
}
PyPostProcessorTypeWrapper::Single(inner) => processors.push(inner.clone()),
}
}
Ok((
PySequence {},
PyPostProcessor::new(PyPostProcessorTypeWrapper::Sequence(processors)),
))
}
fn __getnewargs__<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyTuple>> {
PyTuple::new(py, [PyList::empty(py)])
}
fn __getitem__(self_: PyRef<'_, Self>, py: Python<'_>, index: usize) -> PyResult<Py<PyAny>> {
match &self_.as_ref().processor {
PyPostProcessorTypeWrapper::Sequence(ref inner) => match inner.get(index) {
Some(item) => {
PyPostProcessor::new(PyPostProcessorTypeWrapper::Single(item.clone()))
.get_as_subtype(py)
}
_ => Err(PyErr::new::<pyo3::exceptions::PyIndexError, _>(
"Index not found",
)),
},
_ => Err(PyErr::new::<pyo3::exceptions::PyIndexError, _>(
"This processor is not a Sequence, it does not support __getitem__",
)),
}
}
fn __setitem__(self_: PyRef<'_, Self>, index: usize, value: Bound<'_, PyAny>) -> PyResult<()> {
let processor: PyPostProcessor = value.extract()?;
let PyPostProcessorTypeWrapper::Single(processor) = processor.processor else {
return Err(PyException::new_err("processor should not be a sequence"));
};
match &self_.as_ref().processor {
PyPostProcessorTypeWrapper::Sequence(inner) => match inner.get(index) {
Some(item) => {
*item
.write()
.map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor"))? = processor
.read()
.map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor"))?
.clone();
}
_ => {
return Err(PyErr::new::<pyo3::exceptions::PyIndexError, _>(
"Index not found",
))
}
},
_ => {
return Err(PyException::new_err(
"This processor is not a Sequence, it does not support __setitem__",
))
}
};
Ok(())
}
}
/// Processors Module
#[pymodule]
pub fn processors(m: &Bound<'_, PyModule>) -> PyResult<()> {
m.add_class::<PyPostProcessor>()?;
m.add_class::<PyBertProcessing>()?;
m.add_class::<PyRobertaProcessing>()?;
m.add_class::<PyByteLevel>()?;
m.add_class::<PyTemplateProcessing>()?;
m.add_class::<PySequence>()?;
Ok(())
}
#[cfg(test)]
mod test {
use std::sync::{Arc, RwLock};
use pyo3::prelude::*;
use tk::processors::bert::BertProcessing;
use tk::processors::PostProcessorWrapper;
use crate::processors::{PyPostProcessor, PyPostProcessorTypeWrapper};
#[test]
fn get_subtype() {
Python::with_gil(|py| {
let py_proc = PyPostProcessor::new(PyPostProcessorTypeWrapper::Single(Arc::new(
RwLock::new(BertProcessing::new(("SEP".into(), 0), ("CLS".into(), 1)).into()),
)));
let py_bert = py_proc.get_as_subtype(py).unwrap();
assert_eq!(
"BertProcessing",
py_bert.bind(py).get_type().qualname().unwrap()
);
})
}
#[test]
fn serialize() {
let rs_processing = BertProcessing::new(("SEP".into(), 0), ("CLS".into(), 1));
let rs_wrapper: PostProcessorWrapper = rs_processing.clone().into();
let rs_processing_ser = serde_json::to_string(&rs_processing).unwrap();
let rs_wrapper_ser = serde_json::to_string(&rs_wrapper).unwrap();
let py_processing = PyPostProcessor::new(PyPostProcessorTypeWrapper::Single(Arc::new(
RwLock::new(rs_wrapper),
)));
let py_ser = serde_json::to_string(&py_processing).unwrap();
assert_eq!(py_ser, rs_processing_ser);
assert_eq!(py_ser, rs_wrapper_ser);
let py_processing: PyPostProcessor = serde_json::from_str(&rs_processing_ser).unwrap();
match py_processing.processor {
PyPostProcessorTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() {
PostProcessorWrapper::Bert(_) => (),
_ => panic!("Expected Bert postprocessor."),
},
_ => panic!("Expected a single processor, got a sequence"),
}
let py_processing: PyPostProcessor = serde_json::from_str(&rs_wrapper_ser).unwrap();
match py_processing.processor {
PyPostProcessorTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() {
PostProcessorWrapper::Bert(_) => (),
_ => panic!("Expected Bert postprocessor."),
},
_ => panic!("Expected a single processor, got a sequence"),
};
}
}
| tokenizers/bindings/python/src/processors.rs/0 | {
"file_path": "tokenizers/bindings/python/src/processors.rs",
"repo_id": "tokenizers",
"token_count": 14543
} |
import pickle
import pytest
from tokenizers.models import BPE, Model, WordLevel, WordPiece
from ..utils import bert_files, data_dir, roberta_files
class TestBPE:
def test_instantiate(self, roberta_files):
assert isinstance(BPE(), Model)
assert isinstance(BPE(), BPE)
vocab = {"a": 0, "b": 1, "ab": 2}
merges = [("a", "b")]
assert isinstance(BPE(vocab, merges), Model)
assert isinstance(BPE.from_file(roberta_files["vocab"], roberta_files["merges"]), BPE)
with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"):
BPE(vocab=vocab)
with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"):
BPE(merges=merges)
assert isinstance(
pickle.loads(pickle.dumps(BPE(vocab, merges))),
BPE,
)
# Deprecated calls in 0.9
with pytest.deprecated_call():
assert isinstance(BPE(roberta_files["vocab"], roberta_files["merges"]), Model)
with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"):
BPE(vocab=roberta_files["vocab"])
with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"):
BPE(merges=roberta_files["merges"])
with pytest.deprecated_call():
assert isinstance(
pickle.loads(pickle.dumps(BPE(roberta_files["vocab"], roberta_files["merges"]))),
BPE,
)
def test_can_modify(self):
model = BPE(
dropout=0.5,
unk_token="[UNK]",
continuing_subword_prefix="__prefix__",
end_of_word_suffix="__suffix__",
fuse_unk=False,
)
assert model.dropout == 0.5
assert model.unk_token == "[UNK]"
assert model.continuing_subword_prefix == "__prefix__"
assert model.end_of_word_suffix == "__suffix__"
assert model.fuse_unk == False
assert model.byte_fallback == False
# Modify these
model.dropout = 0.1
assert pytest.approx(model.dropout) == 0.1
model.unk_token = "<unk>"
assert model.unk_token == "<unk>"
model.continuing_subword_prefix = None
assert model.continuing_subword_prefix == None
model.end_of_word_suffix = "suff"
assert model.end_of_word_suffix == "suff"
model.fuse_unk = True
assert model.fuse_unk == True
model.byte_fallback = True
assert model.byte_fallback == True
def test_dropout_zero(self):
model = BPE(dropout=0.0)
assert model.dropout == 0.0
class TestWordPiece:
def test_instantiate(self, bert_files):
assert isinstance(WordPiece(), Model)
assert isinstance(WordPiece(), WordPiece)
vocab = {"a": 0, "b": 1, "ab": 2}
assert isinstance(WordPiece(vocab), Model)
assert isinstance(WordPiece(vocab), WordPiece)
assert isinstance(WordPiece.from_file(bert_files["vocab"]), WordPiece)
assert isinstance(pickle.loads(pickle.dumps(WordPiece(vocab))), WordPiece)
# Deprecated calls in 0.9
with pytest.deprecated_call():
assert isinstance(WordPiece(bert_files["vocab"]), Model)
with pytest.deprecated_call():
assert isinstance(pickle.loads(pickle.dumps(WordPiece(bert_files["vocab"]))), WordPiece)
def test_can_modify(self):
model = WordPiece(
unk_token="<oov>",
continuing_subword_prefix="__prefix__",
max_input_chars_per_word=200,
)
assert model.unk_token == "<oov>"
assert model.continuing_subword_prefix == "__prefix__"
assert model.max_input_chars_per_word == 200
# Modify these
model.unk_token = "<unk>"
assert model.unk_token == "<unk>"
model.continuing_subword_prefix = "$$$"
assert model.continuing_subword_prefix == "$$$"
model.max_input_chars_per_word = 10
assert model.max_input_chars_per_word == 10
class TestWordLevel:
def test_instantiate(self, roberta_files):
assert isinstance(WordLevel(), Model)
assert isinstance(WordLevel(), WordLevel)
vocab = {"a": 0, "b": 1, "ab": 2}
assert isinstance(WordLevel(vocab), Model)
assert isinstance(WordLevel(vocab), WordLevel)
assert isinstance(WordLevel.from_file(roberta_files["vocab"]), WordLevel)
# The WordLevel model expects a vocab.json using the same format as roberta
# so we can just try to load with this file
with pytest.deprecated_call():
assert isinstance(WordLevel(roberta_files["vocab"]), Model)
with pytest.deprecated_call():
assert isinstance(WordLevel(roberta_files["vocab"]), WordLevel)
def test_can_modify(self):
model = WordLevel(unk_token="<oov>")
assert model.unk_token == "<oov>"
# Modify these
model.unk_token = "<unk>"
assert model.unk_token == "<unk>"
| tokenizers/bindings/python/tests/bindings/test_models.py/0 | {
"file_path": "tokenizers/bindings/python/tests/bindings/test_models.py",
"repo_id": "tokenizers",
"token_count": 2304
} |
# Visualizer
<tokenizerslangcontent>
<python>
## Annotation
[[autodoc]] tokenizers.tools.Annotation
## EncodingVisualizer
[[autodoc]] tokenizers.tools.EncodingVisualizer
- __call__
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/api/visualizer.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/api/visualizer.mdx",
"repo_id": "tokenizers",
"token_count": 134
} |
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.13.2]
- Python only changes
## [0.13.1]
- [#1072] Fixing Roberta type ids.
## [0.13.0]
- [#1009] `unstable_wasm` feature to support building on Wasm (it's unstable !)
- [#1008] `Decoder` is now a composable trait, but without being backward incompatible
- [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible
Both trait changes warrant a "major" number since, despite best efforts to not break backward
compatibility, the code is different enough that we cannot be exactly sure.
## [0.12.1]
- [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520
## [0.12.0] YANKED
Bump minor version because of a breaking change.
- [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free.
- [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience)
- [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens)
- [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking.
- [#961] Added link for Ruby port of `tokenizers`
- [#960] Feature gate for `cli` and its `clap` dependency
## [0.11.3]
- [#919] Fixing single_word AddedToken. (regression from 0.11.2)
- [#916] Deserializing faster `added_tokens` by loading them in batch.
## [0.11.2]
- [#884] Fixing bad deserialization following inclusion of a default for Punctuation
## [0.11.1]
- [#882] Fixing Punctuation deserialize without argument.
- [#868] Fixing missing direction in TruncationParams
- [#860] Adding TruncationSide to TruncationParams
## [0.11.0]
### Fixed
- [#236]: Fix a bug with offsets being shifted when there are sub-sequences (Usually with
special tokens and/or added tokens in the sequence).
- [#286]: Fix various crash when training a BPE model
- [#309]: Fixed a few bugs related to additional vocabulary/tokens
- [#363]: Fix panic from unwrapping `File::open` in `count_words`
### Changed
- [#234]: Completely changed the alignment mappings available on `Encoding`. Previous mappings
were misleading and only providing offsets. New ones provide methods to easily convert between
`char` or `word` (input space) and `token` (output space)
- [#236]: `AddedToken` with special options like `rstrip` will keep the matched whitespaces
in the textual representation of the token, exposed in `tokens` on the `Encoding`. The ID stays
the same as usual. This fixes the offsets for said tokens.
- [#236]: Offsets are now converted back to the original referential before we merge the
sub-sequences together and then do the post-processing. This also fixes some offsets bugs.
- [#236]: ByteLevel PostProcessor now uses the `add_prefix_space` attribute to determine how to
trim offsets.
- Improved `TruncationError` to handle cases where provided max length is too low.
- [#249]: `encode` and `encode_batch` input has been greatly improved, and it now also accept
pre-tokenized inputs.
- Improved `TruncationError` to handle cases where provided max length is too low.
- [#276]: Improve BPE training speeds, by reading files sequentially, but parallelizing the
processing of each file
- [#280]: Use `onig` for byte-level pre-tokenization to remove all the differences with the original
implementation from GPT-2
- [#309]: Improved the management of the additional vocabulary. This introduces an option
`normalized`, controlling whether a token should be extracted from the normalized version of the
input text.
- [#330]: BertNormalizer now keeps the same behavior than the original implementation when
`strip_accents` is not specified.
- [#355]: Tokenizer does not use any dynamic dispatch anymore.
- [#377]: Use byte offsets everywhere (instead of the char offsets)
### Added
- [#236]: RobertaProcessing is now also taking care of trimming offsets, and works just as ByteLevel
on this front.
- [#272]: Serialization of the `Tokenizer` and all the parts (`PreTokenizer`, `Normalizer`, ...)
using serde. It is now easy to save/load an entire tokenizer.
- [#289]: Ability to pad to a multiple of a specified value. This is especially useful to ensure
activation of the Tensor Cores, while ensuring padding to a multiple of 8.
- [#298]: Ability to get the currently set truncation/padding params
- [#311]: Ability to enable/disable the parallelism using the `TOKENIZERS_PARALLELISM` environment
variable.
- [#403]: Add `TemplateProcessing` `PostProcessor`.
### How to migrate
- Replace any `XXX_to_YYY_offsets()` method call by any of the new ones.
- Specify the `add_prefix_space` and `trim_offsets` options on `RobertaProcessing` if you don't
want the offsets trimmed out.
- Any custom `PostProcessor` now handles offsets relative to the original string (as opposed to the
normalized one).
## [0.10.1]
### Fixed
- [#226]: Fix the word indexes when there are special tokens
## [0.10.0]
### Changed
- [#222]: All Tokenizer's subparts must now be `Send + Sync`
### Added
- [#208]: Ability to retrieve the vocabulary from the `Tokenizer` & `Model`
### Fixed
- [#205]: Trim the decoded string in `BPEDecoder`
- [b770f36]: Fix a bug with added tokens generated IDs
## [0.9.0]
### Changed
- Only one progress bar while reading files during training. This is better for use-cases with
a high number of files as it avoids having too many progress bars on screen. Also avoids reading the
size of each file before starting to actually read these files, as this process could take really
long.
- [#190]: Improved BPE and WordPiece builders
- [#193]: `encode` and `encode_batch` now take a new argument, specifying whether we should add the
special tokens
- [#197]: The `NormalizedString` has been removed from the `Encoding`. It is now possible to
retrieve it by calling `normalize` on the `Tokenizer`. This brings a reduction of 70% of the memory
footprint
- [#197]: The `NormalizedString` API has been improved. It is now possible to retrieve parts of both
strings using both "normalized" or "original" offsets
- [#197]: The offsets provided on `Encoding` are now relative to the original string, and not the
normalized one anymore
- `AddedToken` are now used for both `add_special_tokens` and `add_tokens`. Also, these AddedToken
have more options to allow various behaviors.
### Added
- [#188]: `impl PostProcessor for ByteLevel`: Handles trimming the offsets if activated. This avoids
the unintuitive inclusion of the whitespaces in the produced offsets, even if these whitespaces are
part of the actual token
- More alignment mappings on the `Encoding`.
- `post_process` can be called on the `Tokenizer`
### Fixed
- [#193]: Fix some issues with the offsets being wrong with the `ByteLevel` BPE:
- when `add_prefix_space` is activated
- [#156]: when a Unicode character gets split-up in multiple byte-level characters
- Fix a bug where offsets were wrong when there was any added tokens in the sequence being encoded.
- [#175]: Fix a bug that prevented the addition of more than a certain amount of tokens (even if not
advised, but that's not the question)
### How to migrate
- Add the `ByteLevel` `PostProcessor` to your byte-level BPE tokenizers if relevant.
## [0.8.0]
### Changed
- [#165]: Big improvements in speed for BPE (Both training and tokenization)
### Fixed
- [#163]: Do not open all files directly while training
- [#156]: There was a bug in ByteLevel PreTokenizer that caused offsets to be wrong if a char got
split up in multiple bytes
- [#174]: The `LongestFirst` truncation strategy had a bug
[#1072]: https://github.com/huggingface/tokenizers/pull/1072
[#956]: https://github.com/huggingface/tokenizers/pull/956
[#1008]: https://github.com/huggingface/tokenizers/pull/1008
[#1009]: https://github.com/huggingface/tokenizers/pull/1009
[#1047]: https://github.com/huggingface/tokenizers/pull/1047
[#1055]: https://github.com/huggingface/tokenizers/pull/1055
[#1051]: https://github.com/huggingface/tokenizers/pull/1051
[#1052]: https://github.com/huggingface/tokenizers/pull/1052
[#938]: https://github.com/huggingface/tokenizers/pull/938
[#939]: https://github.com/huggingface/tokenizers/pull/939
[#952]: https://github.com/huggingface/tokenizers/pull/952
[#954]: https://github.com/huggingface/tokenizers/pull/954
[#961]: https://github.com/huggingface/tokenizers/pull/961
[#960]: https://github.com/huggingface/tokenizers/pull/960
[#919]: https://github.com/huggingface/tokenizers/pull/919
[#916]: https://github.com/huggingface/tokenizers/pull/916
[#884]: https://github.com/huggingface/tokenizers/pull/884
[#882]: https://github.com/huggingface/tokenizers/pull/882
[#868]: https://github.com/huggingface/tokenizers/pull/868
[#860]: https://github.com/huggingface/tokenizers/pull/860
[#403]: https://github.com/huggingface/tokenizers/pull/403
[#377]: https://github.com/huggingface/tokenizers/pull/377
[#355]: https://github.com/huggingface/tokenizers/pull/355
[#363]: https://github.com/huggingface/tokenizers/pull/363
[#330]: https://github.com/huggingface/tokenizers/pull/330
[#311]: https://github.com/huggingface/tokenizers/pull/311
[#309]: https://github.com/huggingface/tokenizers/pull/309
[#298]: https://github.com/huggingface/tokenizers/pull/298
[#289]: https://github.com/huggingface/tokenizers/pull/289
[#286]: https://github.com/huggingface/tokenizers/pull/286
[#280]: https://github.com/huggingface/tokenizers/pull/280
[#276]: https://github.com/huggingface/tokenizers/pull/276
[#272]: https://github.com/huggingface/tokenizers/pull/272
[#249]: https://github.com/huggingface/tokenizers/pull/249
[b770f36]: https://github.com/huggingface/tokenizers/commit/b770f364280af33efeffea8f0003102cda8cf1b7
[#236]: https://github.com/huggingface/tokenizers/pull/236
[#234]: https://github.com/huggingface/tokenizers/pull/234
[#226]: https://github.com/huggingface/tokenizers/pull/226
[#222]: https://github.com/huggingface/tokenizers/pull/222
[#208]: https://github.com/huggingface/tokenizers/pull/208
[#205]: https://github.com/huggingface/tokenizers/issues/205
[#197]: https://github.com/huggingface/tokenizers/pull/197
[#193]: https://github.com/huggingface/tokenizers/pull/193
[#190]: https://github.com/huggingface/tokenizers/pull/190
[#188]: https://github.com/huggingface/tokenizers/pull/188
[#175]: https://github.com/huggingface/tokenizers/issues/175
[#174]: https://github.com/huggingface/tokenizers/issues/174
[#165]: https://github.com/huggingface/tokenizers/pull/165
[#163]: https://github.com/huggingface/tokenizers/issues/163
[#156]: https://github.com/huggingface/tokenizers/pull/156
| tokenizers/tokenizers/CHANGELOG.md/0 | {
"file_path": "tokenizers/tokenizers/CHANGELOG.md",
"repo_id": "tokenizers",
"token_count": 3387
} |
<div align="center">
<h1><code>wasm-pack-template</code></h1>
<strong>A template for kick starting a Rust and WebAssembly project using <a href="https://github.com/rustwasm/wasm-pack">wasm-pack</a>.</strong>
<p>
<a href="https://travis-ci.org/rustwasm/wasm-pack-template"><img src="https://img.shields.io/travis/rustwasm/wasm-pack-template.svg?style=flat-square" alt="Build Status" /></a>
</p>
<h3>
<a href="https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html">Tutorial</a>
<span> | </span>
<a href="https://discordapp.com/channels/442252698964721669/443151097398296587">Chat</a>
</h3>
<sub>Built with 🦀🕸 by <a href="https://rustwasm.github.io/">The Rust and WebAssembly Working Group</a></sub>
</div>
## About
This is an example project showing off a very basic use case for `wasm` tokenizers
usage.
[**📚 Read this template tutorial! 📚**][template-docs]
This template is designed for compiling Rust libraries into WebAssembly and
publishing the resulting package to NPM.
Be sure to check out [other `wasm-pack` tutorials online][tutorials] for other
templates and usages of `wasm-pack`.
[tutorials]: https://rustwasm.github.io/docs/wasm-pack/tutorials/index.html
[template-docs]: https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html
## 🚴 Usage
### 🐑 Use `cargo generate` to Clone this Template
[Learn more about `cargo generate` here.](https://github.com/ashleygwilliams/cargo-generate)
```
cargo generate --git https://github.com/rustwasm/wasm-pack-template.git --name my-project
cd my-project
```
### 🛠️ Build with `wasm-pack build`
```
wasm-pack build
```
### 🔬 Test in Headless Browsers with `wasm-pack test`
```
wasm-pack test --headless --firefox
```
### 🎁 Publish to NPM with `wasm-pack publish`
```
wasm-pack publish
```
## 🔋 Batteries Included
* [`wasm-bindgen`](https://github.com/rustwasm/wasm-bindgen) for communicating
between WebAssembly and JavaScript.
* [`console_error_panic_hook`](https://github.com/rustwasm/console_error_panic_hook)
for logging panic messages to the developer console.
* [`wee_alloc`](https://github.com/rustwasm/wee_alloc), an allocator optimized
for small code size.
| tokenizers/tokenizers/examples/unstable_wasm/README.md/0 | {
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/README.md",
"repo_id": "tokenizers",
"token_count": 811
} |
use rand::distributions::WeightedIndex;
use rand::prelude::*;
use std::cell::RefCell;
use std::cmp::{min, Ordering};
use std::collections::BinaryHeap;
use std::rc::Rc;
type NodeRef = Rc<RefCell<Node>>;
type HypothesisRef = Rc<RefCell<Hypothesis>>;
type Agenda = BinaryHeap<Hypothesis>;
struct Hypothesis {
node_ref: NodeRef,
next: Option<HypothesisRef>,
fx: f64,
gx: f64,
}
impl Hypothesis {
pub fn new(node_ref: NodeRef, next: Option<HypothesisRef>, fx: f64, gx: f64) -> Self {
Self {
node_ref,
next,
fx,
gx,
}
}
}
impl PartialEq for Hypothesis {
fn eq(&self, other: &Self) -> bool {
self.fx == other.fx
}
}
impl Eq for Hypothesis {}
impl PartialOrd for Hypothesis {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
// TODO Maybe use Ordered Floats (https://docs.rs/ordered-float/1.0.2/ordered_float/)
impl Ord for Hypothesis {
fn cmp(&self, other: &Self) -> Ordering {
if self.fx < other.fx {
Ordering::Less
} else {
Ordering::Greater
}
}
}
/// Structure to implement Viterbi algorithm to find the best encoding, or sample
/// from all possible encodings of a given sentence.
#[derive(Debug)]
pub struct Lattice<'a> {
pub(super) sentence: &'a str,
len: usize,
nodes: Vec<NodeRef>,
pub(super) begin_nodes: Vec<Vec<NodeRef>>,
pub(super) end_nodes: Vec<Vec<NodeRef>>,
_bos_id: usize,
_eos_id: usize,
}
impl std::fmt::Display for Lattice<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let display_pieces = |nodes: &Vec<Vec<NodeRef>>| {
nodes
.iter()
.map(|l| {
l.iter()
.map(|n| self.piece(&n.borrow()))
.collect::<Vec<_>>()
})
.collect::<Vec<_>>()
};
f.debug_struct("Lattice")
.field("sentence", &self.sentence)
.field("begin_nodes", &display_pieces(&self.begin_nodes))
.field("end_nodes", &display_pieces(&self.end_nodes))
.finish()
}
}
/// A node from the lattice, that helps reconstruct the underlying `String`
#[derive(Debug, Clone)]
pub struct Node {
// Vocabulary id
pub(super) id: usize,
// Local lattice identifier
pub(super) node_id: usize,
pos: usize,
length: usize,
prev: Option<NodeRef>,
backtrace_score: f64,
score: f64,
}
impl PartialEq for Node {
fn eq(&self, other: &Node) -> bool {
self.id == other.id
}
}
impl Node {
pub fn new(id: usize, node_id: usize, pos: usize, length: usize, score: f64) -> Self {
Self {
id,
node_id,
pos,
length,
prev: None,
score,
backtrace_score: 0.0,
}
}
}
/// Returns log(exp(x) + exp(y)).
/// if init_mode is true, returns log(exp(y)) == y.
/// log(\sum_i exp(a[i])) can be computed as
/// for (int i = 0; i < a.size(); ++i)
/// x = LogSumExp(x, a[i], i == 0);
fn log_sum_exp(x: f64, y: f64, init_mode: bool) -> f64 {
if init_mode {
y
} else {
let (vmin, vmax) = if x > y { (y, x) } else { (x, y) };
let k_minus_log_epsilon = 50.0;
if vmax > vmin + k_minus_log_epsilon {
vmax
} else {
vmax + ((vmin - vmax).exp() + 1.0).ln()
}
}
}
impl<'a> Lattice<'a> {
pub fn from(sentence: &'a str, bos_id: usize, eos_id: usize) -> Self {
let len = sentence.len();
let k_reserved_node_size = 16;
// We are adding 2 tokens, bos and eos
let mut nodes: Vec<NodeRef> = Vec::with_capacity(k_reserved_node_size);
let mut begin_nodes = vec![Vec::with_capacity(k_reserved_node_size); len + 1];
let mut end_nodes = vec![Vec::with_capacity(k_reserved_node_size); len + 1];
let bos = Rc::new(RefCell::new(Node::new(bos_id, 0, 0, 0, 0.0)));
let eos = Rc::new(RefCell::new(Node::new(eos_id, 1, len, 0, 0.0)));
begin_nodes[len].push(Rc::clone(&eos));
end_nodes[0].push(Rc::clone(&bos));
nodes.push(bos);
nodes.push(eos);
Self {
sentence,
len,
nodes,
begin_nodes,
end_nodes,
_bos_id: bos_id,
_eos_id: eos_id,
}
}
pub fn insert(&mut self, pos: usize, length: usize, score: f64, id: usize) {
let node_id = self.nodes.len();
let node = Rc::new(RefCell::new(Node::new(id, node_id, pos, length, score)));
self.begin_nodes[pos].push(Rc::clone(&node));
self.end_nodes[pos + length].push(Rc::clone(&node));
self.nodes.push(node);
}
pub fn viterbi(&mut self) -> Vec<NodeRef> {
let len = self.len;
let mut pos = 0;
while pos <= len {
if self.begin_nodes[pos].is_empty() {
return vec![];
}
for rnode in &self.begin_nodes[pos] {
rnode.borrow_mut().prev = None;
let mut best_score = 0.0;
let mut best_node: Option<NodeRef> = None;
for lnode in &self.end_nodes[pos] {
let score = lnode.borrow().backtrace_score + rnode.borrow().score;
if best_node.is_none() || score > best_score {
// TODO can we remove this clone ?
best_node = Some(lnode.clone());
best_score = score
}
}
match best_node {
Some(bnode) => {
rnode.borrow_mut().prev = Some(Rc::clone(&bnode));
rnode.borrow_mut().backtrace_score = best_score;
}
None => return vec![],
}
}
if let Some(c) = self.sentence[pos..].chars().next() {
pos += c.len_utf8();
} else {
break;
}
}
let mut results: Vec<NodeRef> = vec![];
let root = self.begin_nodes[len][0].borrow();
let prev = root.prev.as_ref();
if prev.is_none() {
return vec![];
}
let mut node: NodeRef = prev.unwrap().clone();
while node.borrow().prev.is_some() {
results.push(node.clone());
let n = node.borrow().clone();
node = n.prev.as_ref().unwrap().clone();
}
results.reverse();
results
}
pub fn piece(&self, node: &Node) -> String {
self.sentence[node.pos..node.pos + node.length].to_owned()
}
pub fn tokens(&mut self) -> Vec<String> {
self.viterbi()
.iter()
.map(|node| self.piece(&node.borrow()))
.collect()
}
pub fn nbest(&mut self, n: usize) -> Vec<Vec<NodeRef>> {
match n {
0 => vec![],
1 => vec![self.viterbi()],
_ => {
// let k_reserved_hypothesis_size = 512;
let mut agenda: Agenda = BinaryHeap::new();
let mut hypotheses: Vec<Vec<NodeRef>> = vec![];
let eos = self.eos_node();
let score = eos.borrow().score;
let hypo = Hypothesis::new(eos, None, score, score);
agenda.push(hypo);
// Fill backtrace scores
self.viterbi();
while !agenda.is_empty() {
let top = Rc::new(RefCell::new(agenda.pop().unwrap()));
let node = Rc::clone(&top.borrow().node_ref);
if node.borrow().id == self.bos_node().borrow().id {
let mut hypothesis = vec![];
let mut next: HypothesisRef =
Rc::clone(top.borrow().next.as_ref().unwrap());
while next.borrow().next.is_some() {
hypothesis.push(next.borrow().node_ref.clone());
let c: HypothesisRef = next.clone();
// let c: Ref<Hypothesis> = next.clone().borrow();
next = Rc::clone(c.borrow().next.as_ref().unwrap());
}
hypotheses.push(hypothesis);
if hypotheses.len() == n {
return hypotheses;
}
} else {
for lnode in &self.end_nodes[node.borrow().pos] {
let top_gx = top.borrow().gx;
let fx = lnode.borrow().backtrace_score + top_gx;
let gx = lnode.borrow().score + top_gx;
let hyp =
Hypothesis::new(Rc::clone(lnode), Some(Rc::clone(&top)), fx, gx);
agenda.push(hyp);
}
// When the input is too long or contains duplicated phrases,
// `agenda` will get extremely big. Here we avoid this case by
// dynamically shrinking the agenda.
let k_max_agenda_size = 100_000;
let k_min_agenda_size = 512;
if agenda.len() > k_max_agenda_size {
let mut new_agenda = BinaryHeap::new();
let len = min(k_min_agenda_size, n * 10);
for _i in 0..len {
new_agenda.push(agenda.pop().unwrap());
}
agenda = new_agenda;
}
}
}
hypotheses
}
}
}
pub fn nbest_tokens(&mut self, n: usize) -> Vec<Vec<String>> {
self.nbest(n)
.iter()
.map(|v| v.iter().map(|node| self.piece(&node.borrow())).collect())
.collect()
}
pub fn len(&self) -> usize {
self.len
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
pub fn bos_node(&self) -> NodeRef {
Rc::clone(&self.end_nodes[0][0])
}
pub fn eos_node(&self) -> NodeRef {
Rc::clone(&self.begin_nodes[self.len][0])
}
pub fn surface(&self, n: usize) -> &str {
match self.sentence.char_indices().nth(n) {
Some((pos, _)) => &self.sentence[pos..],
None => "",
}
}
pub fn sentence(&self) -> &str {
self.sentence
}
pub fn populate_marginal(&self, freq: f64, expected: &mut [f64]) -> f64 {
let len = self.len();
let n_nodes = self.nodes.len();
let mut alpha = vec![0.0; n_nodes];
let mut beta = vec![0.0; n_nodes];
for pos in 0..=len {
for rnode in &self.begin_nodes[pos] {
for lnode in &self.end_nodes[pos] {
let lid = lnode.borrow().node_id;
let rid = rnode.borrow().node_id;
alpha[rid] = log_sum_exp(
alpha[rid],
lnode.borrow().score + alpha[lid],
*lnode == self.end_nodes[pos][0],
);
}
}
}
for pos in (0..=len).rev() {
// let rpos = len - pos;
for lnode in &self.end_nodes[pos] {
for rnode in &self.begin_nodes[pos] {
let lid = lnode.borrow().node_id;
let rid = rnode.borrow().node_id;
beta[lid] = log_sum_exp(
beta[lid],
rnode.borrow().score + beta[rid],
*rnode == self.begin_nodes[pos][0],
);
}
}
}
let eos_id = self.begin_nodes[len][0].borrow().node_id;
let z = alpha[eos_id];
for pos in 0..len {
for node in &self.begin_nodes[pos] {
let node_id = node.borrow().node_id;
let id = node.borrow().id;
let a = alpha[node_id];
let b = beta[node_id];
let total = a + node.borrow().score + b - z;
let update = freq * total.exp();
expected[id] += update;
}
}
freq * z
}
pub fn sample(&self, theta: f64) -> Vec<NodeRef> {
let len = self.len();
if len == 0 {
return vec![];
}
let mut alpha = vec![0.0; self.nodes.len()];
for pos in 0..=len {
for rnode in &self.begin_nodes[pos] {
for lnode in &self.end_nodes[pos] {
let lid = lnode.borrow().node_id;
let rid = rnode.borrow().node_id;
alpha[rid] = log_sum_exp(
alpha[rid],
theta * (lnode.borrow().score + alpha[lid]),
*lnode == self.end_nodes[pos][0],
);
}
}
}
let mut rng = thread_rng();
let mut results: Vec<NodeRef> = vec![];
let mut probs: Vec<f64> = vec![];
let mut z = alpha[self.eos_node().borrow().node_id];
let mut node = self.eos_node();
loop {
probs.clear();
let pos = node.borrow().pos;
for lnode in &self.end_nodes[pos] {
let lid = lnode.borrow().node_id;
probs.push((alpha[lid] + theta * lnode.borrow().score - z).exp())
}
let dist = WeightedIndex::new(&probs).unwrap();
let index = dist.sample(&mut rng);
node = Rc::clone(&self.end_nodes[pos][index]);
if node == self.bos_node() {
break;
}
z = alpha[node.borrow().node_id];
results.push(Rc::clone(&node));
}
results.reverse();
results
}
pub fn sample_token(&self, theta: f64) -> Vec<String> {
self.sample(theta)
.iter()
.map(|node| self.piece(&node.borrow()))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use assert_approx_eq::assert_approx_eq;
#[test]
fn set_sentence() {
let lattice = Lattice::from("", 1, 2);
assert_eq!(lattice.len(), 0);
let lattice = Lattice::from("", 1, 2);
assert_eq!(lattice.len(), 0);
assert_eq!(lattice.sentence(), "");
assert_eq!(lattice.surface(0), "");
let lattice = Lattice::from("test", 1, 2);
assert_eq!(lattice.len(), 4);
assert_eq!(lattice.sentence(), "test");
assert_eq!(lattice.surface(0), "test");
assert_eq!(lattice.surface(1), "est");
assert_eq!(lattice.surface(2), "st");
assert_eq!(lattice.surface(3), "t");
let bos = lattice.bos_node();
let eos = lattice.eos_node();
assert_eq!(bos.borrow().id, 1);
assert_eq!(eos.borrow().id, 2);
assert_eq!(
lattice.end_nodes[0].first().unwrap().borrow().id,
bos.borrow().id
);
assert_eq!(
lattice.begin_nodes[4].first().unwrap().borrow().id,
eos.borrow().id
);
let lattice = Lattice::from("テストab", 1, 2);
assert_eq!(lattice.len(), 11);
assert_eq!(lattice.sentence(), "テストab");
assert_eq!(lattice.surface(0), "テストab");
assert_eq!(lattice.surface(1), "ストab");
assert_eq!(lattice.surface(2), "トab");
assert_eq!(lattice.surface(3), "ab");
assert_eq!(lattice.surface(4), "b");
}
#[test]
fn insert_test() {
let mut lattice = Lattice::from("ABあい", 1, 2);
lattice.insert(0, 1, 0.0, 3);
lattice.insert(1, 1, 0.0, 4);
lattice.insert(2, 3, 0.0, 5);
lattice.insert(5, 3, 0.0, 6);
lattice.insert(0, 2, 0.0, 7);
lattice.insert(1, 4, 0.0, 8);
lattice.insert(2, 6, 0.0, 9);
// 0 & 1 are bos and eos
let node0 = lattice.nodes[2].borrow();
let node1 = lattice.nodes[3].borrow();
let node2 = lattice.nodes[4].borrow();
let node3 = lattice.nodes[5].borrow();
let node4 = lattice.nodes[6].borrow();
let node5 = lattice.nodes[7].borrow();
let node6 = lattice.nodes[8].borrow();
assert_eq!(lattice.piece(&node0), "A");
assert_eq!(lattice.piece(&node1), "B");
assert_eq!(lattice.piece(&node2), "あ");
assert_eq!(lattice.piece(&node3), "い");
assert_eq!(lattice.piece(&node4), "AB");
assert_eq!(lattice.piece(&node5), "Bあ");
assert_eq!(lattice.piece(&node6), "あい");
assert_eq!(node0.pos, 0);
assert_eq!(node1.pos, 1);
assert_eq!(node2.pos, 2);
assert_eq!(node3.pos, 5);
assert_eq!(node4.pos, 0);
assert_eq!(node5.pos, 1);
assert_eq!(node6.pos, 2);
assert_eq!(node0.length, 1);
assert_eq!(node1.length, 1);
assert_eq!(node2.length, 3);
assert_eq!(node3.length, 3);
assert_eq!(node4.length, 2);
assert_eq!(node5.length, 4);
assert_eq!(node6.length, 6);
assert_eq!(lattice.bos_node().borrow().id, 1);
assert_eq!(lattice.eos_node().borrow().id, 2);
assert_eq!(node0.id, 3);
assert_eq!(node1.id, 4);
assert_eq!(node2.id, 5);
assert_eq!(node3.id, 6);
assert_eq!(node4.id, 7);
assert_eq!(node5.id, 8);
assert_eq!(node6.id, 9);
assert_eq!(lattice.begin_nodes[0].len(), 2);
assert_eq!(lattice.begin_nodes[1].len(), 2);
assert_eq!(lattice.begin_nodes[2].len(), 2);
assert_eq!(lattice.begin_nodes[5].len(), 1);
assert_eq!(lattice.begin_nodes[8].len(), 1);
assert_eq!(lattice.end_nodes[0].len(), 1);
assert_eq!(lattice.end_nodes[1].len(), 1);
assert_eq!(lattice.end_nodes[2].len(), 2);
assert_eq!(lattice.end_nodes[5].len(), 2);
assert_eq!(lattice.end_nodes[8].len(), 2);
assert_eq!(lattice.begin_nodes[0][0].borrow().id, node0.id);
assert_eq!(lattice.begin_nodes[0][1].borrow().id, node4.id);
assert_eq!(lattice.begin_nodes[1][0].borrow().id, node1.id);
assert_eq!(lattice.begin_nodes[1][1].borrow().id, node5.id);
assert_eq!(lattice.begin_nodes[2][0].borrow().id, node2.id);
assert_eq!(lattice.begin_nodes[2][1].borrow().id, node6.id);
assert_eq!(lattice.begin_nodes[5][0].borrow().id, node3.id);
assert_eq!(
lattice.eos_node().borrow().id,
lattice.begin_nodes[8][0].borrow().id
);
assert_eq!(
lattice.bos_node().borrow().id,
lattice.end_nodes[0][0].borrow().id
);
assert_eq!(node0.id, lattice.end_nodes[1][0].borrow().id);
assert_eq!(node1.id, lattice.end_nodes[2][0].borrow().id);
assert_eq!(node4.id, lattice.end_nodes[2][1].borrow().id);
assert_eq!(node2.id, lattice.end_nodes[5][0].borrow().id);
assert_eq!(node5.id, lattice.end_nodes[5][1].borrow().id);
assert_eq!(node3.id, lattice.end_nodes[8][0].borrow().id);
assert_eq!(node6.id, lattice.end_nodes[8][1].borrow().id);
}
#[test]
fn test_viterbi() {
let mut lattice = Lattice::from("ABC", 1, 2);
assert_eq!(lattice.viterbi(), vec![]);
// Still incomplete
lattice.insert(0, 1, 0.0, 3);
assert_eq!(lattice.viterbi(), vec![]);
lattice.insert(1, 1, 0.0, 4);
lattice.insert(2, 1, 0.0, 5);
// XXX: In sentence piece this is not tested, still incomplete ?
assert_eq!(lattice.viterbi().len(), 3);
}
#[test]
fn test_viterbi2() {
let mut lattice = Lattice::from("ABC", 1, 2);
lattice.insert(0, 1, 0.0, 3);
lattice.insert(1, 1, 0.0, 4);
lattice.insert(2, 1, 0.0, 5);
assert_eq!(lattice.tokens(), ["A", "B", "C"]);
lattice.insert(0, 2, 2.0, 6);
assert_eq!(lattice.tokens(), ["AB", "C"]);
lattice.insert(1, 2, 5.0, 7);
assert_eq!(lattice.tokens(), ["A", "BC"]);
lattice.insert(0, 3, 10.0, 8);
assert_eq!(lattice.tokens(), ["ABC"]);
}
#[test]
fn test_nbest() {
let mut lattice = Lattice::from("ABC", 1, 2);
lattice.insert(0, 1, 0.0, 3);
lattice.insert(1, 1, 0.0, 4);
lattice.insert(2, 1, 0.0, 5);
lattice.insert(0, 2, 2.0, 6);
lattice.insert(1, 2, 5.0, 7);
lattice.insert(0, 3, 10.0, 8);
let nbests = lattice.nbest_tokens(10);
assert_eq!(
nbests,
vec![
vec!["ABC"],
vec!["A", "BC"],
vec!["AB", "C"],
vec!["A", "B", "C"]
]
);
assert!(lattice.nbest_tokens(0).is_empty());
assert_eq!(lattice.nbest_tokens(1), vec![vec!["ABC"]]);
}
#[test]
fn test_log_sum_exp() {
let mut x = 0.0;
let v: Vec<f64> = vec![1.0, 2.0, 3.0];
for (i, y) in v.iter().enumerate() {
x = log_sum_exp(x, *y, i == 0);
}
assert_approx_eq!(x, v.iter().map(|n| n.exp()).sum::<f64>().ln(), 0.001);
}
#[test]
fn test_populate() {
let mut lattice = Lattice::from("ABC", 1, 2);
lattice.insert(0, 1, 1.0, 3); // A
lattice.insert(1, 1, 1.2, 4); // B
lattice.insert(2, 1, 2.5, 5); // C
lattice.insert(0, 2, 3.0, 6); // AB
lattice.insert(1, 2, 4.0, 7); // BC
lattice.insert(0, 3, 2.0, 8); // ABC
let mut probs = vec![0.0; 9];
let p1 = (1.0_f64 + 1.2 + 2.5).exp();
let p2 = (3.0_f64 + 2.5).exp();
let p3 = (1.0_f64 + 4.0).exp();
let p4 = 2.0_f64.exp();
let z = p1 + p2 + p3 + p4;
let log_z = lattice.populate_marginal(1.0, &mut probs);
assert_approx_eq!(log_z, z.ln(), 0.001);
assert_approx_eq!(probs[0], 0.0, 0.001);
assert_approx_eq!(probs[1], 0.0, 0.001);
assert_approx_eq!(probs[2], 0.0, 0.001);
assert_approx_eq!(probs[3], (p1 + p3) / z, 0.001);
assert_approx_eq!(probs[4], (p1) / z, 0.001);
assert_approx_eq!(probs[5], (p1 + p2) / z, 0.001);
assert_approx_eq!(probs[6], (p2) / z, 0.001);
assert_approx_eq!(probs[7], (p3) / z, 0.001);
assert_approx_eq!(probs[8], (p4) / z, 0.001);
}
}
| tokenizers/tokenizers/src/models/unigram/lattice.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/unigram/lattice.rs",
"repo_id": "tokenizers",
"token_count": 12682
} |
use crate::tokenizer::{NormalizedString, Normalizer, Result};
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(tag = "type")]
pub struct Prepend {
pub prepend: String,
}
impl Prepend {
pub fn new(prepend: String) -> Self {
Self { prepend }
}
}
impl Normalizer for Prepend {
/// Strip the normalized string inplace
fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> {
if !normalized.is_empty() {
normalized.prepend(&self.prepend);
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_prepend() {
let original = "Hello";
let normalized = "▁Hello";
assert_ne!(original, normalized);
let mut n = NormalizedString::from(original);
let prepend = Prepend::new("▁".to_string());
prepend.normalize(&mut n).unwrap();
assert_eq!(&n.get(), &normalized);
assert_eq!(
n,
NormalizedString::new(
original.to_string(),
normalized.to_string(),
vec![
(0, 1),
(0, 1),
(0, 1),
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5)
],
0
)
);
assert_eq!(
n.alignments_original(),
vec![(0, 4), (4, 5), (5, 6), (6, 7), (7, 8)]
);
}
}
| tokenizers/tokenizers/src/normalizers/prepend.rs/0 | {
"file_path": "tokenizers/tokenizers/src/normalizers/prepend.rs",
"repo_id": "tokenizers",
"token_count": 856
} |
// Generated by modified Perl script at https://github.com/google/sentencepiece/blob/master/data/gen_unicode_scripts_code.pl
// Unicode scripts : https://gist.github.com/Narsil/07556f26dc84a6baeff4d499e68d3cd2
// Rust adaptation : https://gist.github.com/Narsil/1df9fbbf5296a8d4d62de55dcb2fe700
#[derive(PartialEq, Debug, Clone, Copy, Eq)]
pub enum Script {
Any,
Adlam,
Ahom,
AnatolianHieroglyphs,
Arabic,
Armenian,
Avestan,
Balinese,
Bamum,
BassaVah,
Batak,
Bengali,
Bhaiksuki,
Bopomofo,
Brahmi,
Braille,
Buginese,
Buhid,
CanadianAboriginal,
Carian,
CaucasianAlbanian,
Chakma,
Cham,
Cherokee,
Common,
Coptic,
Cuneiform,
Cypriot,
Cyrillic,
Deseret,
Devanagari,
Duployan,
EgyptianHieroglyphs,
Elbasan,
Ethiopic,
Georgian,
Glagolitic,
Gothic,
Grantha,
Greek,
Gujarati,
Gurmukhi,
Han,
Hangul,
Hanunoo,
Hatran,
Hebrew,
Hiragana,
ImperialAramaic,
Inherited,
InscriptionalPahlavi,
InscriptionalParthian,
Javanese,
Kaithi,
Kannada,
Katakana,
KayahLi,
Kharoshthi,
Khmer,
Khojki,
Khudawadi,
Lao,
Latin,
Lepcha,
Limbu,
LinearA,
LinearB,
Lisu,
Lycian,
Lydian,
Mahajani,
Malayalam,
Mandaic,
Manichaean,
Marchen,
MeeteiMayek,
MendeKikakui,
MeroiticCursive,
MeroiticHieroglyphs,
Miao,
Modi,
Mongolian,
Mro,
Multani,
Myanmar,
Nabataean,
NewTaiLue,
Newa,
Nko,
Ogham,
OlChiki,
OldHungarian,
OldItalic,
OldNorthArabian,
OldPermic,
OldPersian,
OldSouthArabian,
OldTurkic,
Oriya,
Osage,
Osmanya,
PahawhHmong,
Palmyrene,
PauCinHau,
PhagsPa,
Phoenician,
PsalterPahlavi,
Rejang,
Runic,
Samaritan,
Saurashtra,
Sharada,
Shavian,
Siddham,
SignWriting,
Sinhala,
SoraSompeng,
Sundanese,
SylotiNagri,
Syriac,
Tagalog,
Tagbanwa,
TaiLe,
TaiTham,
TaiViet,
Takri,
Tamil,
Tangut,
Telugu,
Thaana,
Thai,
Tibetan,
Tifinagh,
Tirhuta,
Ugaritic,
Vai,
WarangCiti,
Yi,
}
pub fn get_script(c: char) -> Script {
match c as u32 {
0x0000..=0x001F => Script::Common,
0x0020 => Script::Common,
0x0021..=0x0023 => Script::Common,
0x0024 => Script::Common,
0x0025..=0x0027 => Script::Common,
0x0028 => Script::Common,
0x0029 => Script::Common,
0x002A => Script::Common,
0x002B => Script::Common,
0x002C => Script::Common,
0x002D => Script::Common,
0x002E..=0x002F => Script::Common,
0x0030..=0x0039 => Script::Common,
0x003A..=0x003B => Script::Common,
0x003C..=0x003E => Script::Common,
0x003F..=0x0040 => Script::Common,
0x005B => Script::Common,
0x005C => Script::Common,
0x005D => Script::Common,
0x005E => Script::Common,
0x005F => Script::Common,
0x0060 => Script::Common,
0x007B => Script::Common,
0x007C => Script::Common,
0x007D => Script::Common,
0x007E => Script::Common,
0x007F..=0x009F => Script::Common,
0x00A0 => Script::Common,
0x00A1 => Script::Common,
0x00A2..=0x00A5 => Script::Common,
0x00A6 => Script::Common,
0x00A7 => Script::Common,
0x00A8 => Script::Common,
0x00A9 => Script::Common,
0x00AB => Script::Common,
0x00AC => Script::Common,
0x00AD => Script::Common,
0x00AE => Script::Common,
0x00AF => Script::Common,
0x00B0 => Script::Common,
0x00B1 => Script::Common,
0x00B2..=0x00B3 => Script::Common,
0x00B4 => Script::Common,
0x00B5 => Script::Common,
0x00B6..=0x00B7 => Script::Common,
0x00B8 => Script::Common,
0x00B9 => Script::Common,
0x00BB => Script::Common,
0x00BC..=0x00BE => Script::Common,
0x00BF => Script::Common,
0x00D7 => Script::Common,
0x00F7 => Script::Common,
0x02B9..=0x02C1 => Script::Common,
0x02C2..=0x02C5 => Script::Common,
0x02C6..=0x02D1 => Script::Common,
0x02D2..=0x02DF => Script::Common,
0x02E5..=0x02E9 => Script::Common,
0x02EC => Script::Common,
0x02ED => Script::Common,
0x02EE => Script::Common,
0x02EF..=0x02FF => Script::Common,
0x0374 => Script::Common,
0x037E => Script::Common,
0x0385 => Script::Common,
0x0387 => Script::Common,
0x0589 => Script::Common,
0x0605 => Script::Common,
0x060C => Script::Common,
0x061B => Script::Common,
0x061C => Script::Common,
0x061F => Script::Common,
0x0640 => Script::Common,
0x06DD => Script::Common,
0x08E2 => Script::Common,
0x0964..=0x0965 => Script::Common,
0x0E3F => Script::Common,
0x0FD5..=0x0FD8 => Script::Common,
0x10FB => Script::Common,
0x16EB..=0x16ED => Script::Common,
0x1735..=0x1736 => Script::Common,
0x1802..=0x1803 => Script::Common,
0x1805 => Script::Common,
0x1CD3 => Script::Common,
0x1CE1 => Script::Common,
0x1CE9..=0x1CEC => Script::Common,
0x1CEE..=0x1CF1 => Script::Common,
0x1CF2..=0x1CF3 => Script::Common,
0x1CF5..=0x1CF6 => Script::Common,
0x2000..=0x200A => Script::Common,
0x200B => Script::Common,
0x200E..=0x200F => Script::Common,
0x2010..=0x2015 => Script::Common,
0x2016..=0x2017 => Script::Common,
0x2018 => Script::Common,
0x2019 => Script::Common,
0x201A => Script::Common,
0x201B..=0x201C => Script::Common,
0x201D => Script::Common,
0x201E => Script::Common,
0x201F => Script::Common,
0x2020..=0x2027 => Script::Common,
0x2028 => Script::Common,
0x2029 => Script::Common,
0x202A..=0x202E => Script::Common,
0x202F => Script::Common,
0x2030..=0x2038 => Script::Common,
0x2039 => Script::Common,
0x203A => Script::Common,
0x203B..=0x203E => Script::Common,
0x203F..=0x2040 => Script::Common,
0x2041..=0x2043 => Script::Common,
0x2044 => Script::Common,
0x2045 => Script::Common,
0x2046 => Script::Common,
0x2047..=0x2051 => Script::Common,
0x2052 => Script::Common,
0x2053 => Script::Common,
0x2054 => Script::Common,
0x2055..=0x205E => Script::Common,
0x205F => Script::Common,
0x2060..=0x2064 => Script::Common,
0x2066..=0x206F => Script::Common,
0x2070 => Script::Common,
0x2074..=0x2079 => Script::Common,
0x207A..=0x207C => Script::Common,
0x207D => Script::Common,
0x207E => Script::Common,
0x2080..=0x2089 => Script::Common,
0x208A..=0x208C => Script::Common,
0x208D => Script::Common,
0x208E => Script::Common,
0x20A0..=0x20BE => Script::Common,
0x2100..=0x2101 => Script::Common,
0x2102 => Script::Common,
0x2103..=0x2106 => Script::Common,
0x2107 => Script::Common,
0x2108..=0x2109 => Script::Common,
0x210A..=0x2113 => Script::Common,
0x2114 => Script::Common,
0x2115 => Script::Common,
0x2116..=0x2117 => Script::Common,
0x2118 => Script::Common,
0x2119..=0x211D => Script::Common,
0x211E..=0x2123 => Script::Common,
0x2124 => Script::Common,
0x2125 => Script::Common,
0x2127 => Script::Common,
0x2128 => Script::Common,
0x2129 => Script::Common,
0x212C..=0x212D => Script::Common,
0x212E => Script::Common,
0x212F..=0x2131 => Script::Common,
0x2133..=0x2134 => Script::Common,
0x2135..=0x2138 => Script::Common,
0x2139 => Script::Common,
0x213A..=0x213B => Script::Common,
0x213C..=0x213F => Script::Common,
0x2140..=0x2144 => Script::Common,
0x2145..=0x2149 => Script::Common,
0x214A => Script::Common,
0x214B => Script::Common,
0x214C..=0x214D => Script::Common,
0x214F => Script::Common,
0x2150..=0x215F => Script::Common,
0x2189 => Script::Common,
0x218A..=0x218B => Script::Common,
0x2190..=0x2194 => Script::Common,
0x2195..=0x2199 => Script::Common,
0x219A..=0x219B => Script::Common,
0x219C..=0x219F => Script::Common,
0x21A0 => Script::Common,
0x21A1..=0x21A2 => Script::Common,
0x21A3 => Script::Common,
0x21A4..=0x21A5 => Script::Common,
0x21A6 => Script::Common,
0x21A7..=0x21AD => Script::Common,
0x21AE => Script::Common,
0x21AF..=0x21CD => Script::Common,
0x21CE..=0x21CF => Script::Common,
0x21D0..=0x21D1 => Script::Common,
0x21D2 => Script::Common,
0x21D3 => Script::Common,
0x21D4 => Script::Common,
0x21D5..=0x21F3 => Script::Common,
0x21F4..=0x22FF => Script::Common,
0x2300..=0x2307 => Script::Common,
0x2308 => Script::Common,
0x2309 => Script::Common,
0x230A => Script::Common,
0x230B => Script::Common,
0x230C..=0x231F => Script::Common,
0x2320..=0x2321 => Script::Common,
0x2322..=0x2328 => Script::Common,
0x2329 => Script::Common,
0x232A => Script::Common,
0x232B..=0x237B => Script::Common,
0x237C => Script::Common,
0x237D..=0x239A => Script::Common,
0x239B..=0x23B3 => Script::Common,
0x23B4..=0x23DB => Script::Common,
0x23DC..=0x23E1 => Script::Common,
0x23E2..=0x23FE => Script::Common,
0x2400..=0x2426 => Script::Common,
0x2440..=0x244A => Script::Common,
0x2460..=0x249B => Script::Common,
0x249C..=0x24E9 => Script::Common,
0x24EA..=0x24FF => Script::Common,
0x2500..=0x25B6 => Script::Common,
0x25B7 => Script::Common,
0x25B8..=0x25C0 => Script::Common,
0x25C1 => Script::Common,
0x25C2..=0x25F7 => Script::Common,
0x25F8..=0x25FF => Script::Common,
0x2600..=0x266E => Script::Common,
0x266F => Script::Common,
0x2670..=0x2767 => Script::Common,
0x2768 => Script::Common,
0x2769 => Script::Common,
0x276A => Script::Common,
0x276B => Script::Common,
0x276C => Script::Common,
0x276D => Script::Common,
0x276E => Script::Common,
0x276F => Script::Common,
0x2770 => Script::Common,
0x2771 => Script::Common,
0x2772 => Script::Common,
0x2773 => Script::Common,
0x2774 => Script::Common,
0x2775 => Script::Common,
0x2776..=0x2793 => Script::Common,
0x2794..=0x27BF => Script::Common,
0x27C0..=0x27C4 => Script::Common,
0x27C5 => Script::Common,
0x27C6 => Script::Common,
0x27C7..=0x27E5 => Script::Common,
0x27E6 => Script::Common,
0x27E7 => Script::Common,
0x27E8 => Script::Common,
0x27E9 => Script::Common,
0x27EA => Script::Common,
0x27EB => Script::Common,
0x27EC => Script::Common,
0x27ED => Script::Common,
0x27EE => Script::Common,
0x27EF => Script::Common,
0x27F0..=0x27FF => Script::Common,
0x2900..=0x2982 => Script::Common,
0x2983 => Script::Common,
0x2984 => Script::Common,
0x2985 => Script::Common,
0x2986 => Script::Common,
0x2987 => Script::Common,
0x2988 => Script::Common,
0x2989 => Script::Common,
0x298A => Script::Common,
0x298B => Script::Common,
0x298C => Script::Common,
0x298D => Script::Common,
0x298E => Script::Common,
0x298F => Script::Common,
0x2990 => Script::Common,
0x2991 => Script::Common,
0x2992 => Script::Common,
0x2993 => Script::Common,
0x2994 => Script::Common,
0x2995 => Script::Common,
0x2996 => Script::Common,
0x2997 => Script::Common,
0x2998 => Script::Common,
0x2999..=0x29D7 => Script::Common,
0x29D8 => Script::Common,
0x29D9 => Script::Common,
0x29DA => Script::Common,
0x29DB => Script::Common,
0x29DC..=0x29FB => Script::Common,
0x29FC => Script::Common,
0x29FD => Script::Common,
0x29FE..=0x2AFF => Script::Common,
0x2B00..=0x2B2F => Script::Common,
0x2B30..=0x2B44 => Script::Common,
0x2B45..=0x2B46 => Script::Common,
0x2B47..=0x2B4C => Script::Common,
0x2B4D..=0x2B73 => Script::Common,
0x2B76..=0x2B95 => Script::Common,
0x2B98..=0x2BB9 => Script::Common,
0x2BBD..=0x2BC8 => Script::Common,
0x2BCA..=0x2BD1 => Script::Common,
0x2BEC..=0x2BEF => Script::Common,
0x2E00..=0x2E01 => Script::Common,
0x2E02 => Script::Common,
0x2E03 => Script::Common,
0x2E04 => Script::Common,
0x2E05 => Script::Common,
0x2E06..=0x2E08 => Script::Common,
0x2E09 => Script::Common,
0x2E0A => Script::Common,
0x2E0B => Script::Common,
0x2E0C => Script::Common,
0x2E0D => Script::Common,
0x2E0E..=0x2E16 => Script::Common,
0x2E17 => Script::Common,
0x2E18..=0x2E19 => Script::Common,
0x2E1A => Script::Common,
0x2E1B => Script::Common,
0x2E1C => Script::Common,
0x2E1D => Script::Common,
0x2E1E..=0x2E1F => Script::Common,
0x2E20 => Script::Common,
0x2E21 => Script::Common,
0x2E22 => Script::Common,
0x2E23 => Script::Common,
0x2E24 => Script::Common,
0x2E25 => Script::Common,
0x2E26 => Script::Common,
0x2E27 => Script::Common,
0x2E28 => Script::Common,
0x2E29 => Script::Common,
0x2E2A..=0x2E2E => Script::Common,
0x2E2F => Script::Common,
0x2E30..=0x2E39 => Script::Common,
0x2E3A..=0x2E3B => Script::Common,
0x2E3C..=0x2E3F => Script::Common,
0x2E40 => Script::Common,
0x2E41 => Script::Common,
0x2E42 => Script::Common,
0x2E43..=0x2E44 => Script::Common,
0x2FF0..=0x2FFB => Script::Common,
0x3000 => Script::Common,
0x3001..=0x3003 => Script::Common,
0x3004 => Script::Common,
0x3006 => Script::Common,
0x3008 => Script::Common,
0x3009 => Script::Common,
0x300A => Script::Common,
0x300B => Script::Common,
0x300C => Script::Common,
0x300D => Script::Common,
0x300E => Script::Common,
0x300F => Script::Common,
0x3010 => Script::Common,
0x3011 => Script::Common,
0x3012..=0x3013 => Script::Common,
0x3014 => Script::Common,
0x3015 => Script::Common,
0x3016 => Script::Common,
0x3017 => Script::Common,
0x3018 => Script::Common,
0x3019 => Script::Common,
0x301A => Script::Common,
0x301B => Script::Common,
0x301C => Script::Common,
0x301D => Script::Common,
0x301E..=0x301F => Script::Common,
0x3020 => Script::Common,
0x3030 => Script::Common,
0x3031..=0x3035 => Script::Common,
0x3036..=0x3037 => Script::Common,
0x303C => Script::Common,
0x303D => Script::Common,
0x303E..=0x303F => Script::Common,
0x309B..=0x309C => Script::Common,
0x30A0 => Script::Common,
0x30FB => Script::Common,
0x30FC => Script::Common,
0x3190..=0x3191 => Script::Common,
0x3192..=0x3195 => Script::Common,
0x3196..=0x319F => Script::Common,
0x31C0..=0x31E3 => Script::Common,
0x3220..=0x3229 => Script::Common,
0x322A..=0x3247 => Script::Common,
0x3248..=0x324F => Script::Common,
0x3250 => Script::Common,
0x3251..=0x325F => Script::Common,
0x327F => Script::Common,
0x3280..=0x3289 => Script::Common,
0x328A..=0x32B0 => Script::Common,
0x32B1..=0x32BF => Script::Common,
0x32C0..=0x32CF => Script::Common,
0x3358..=0x33FF => Script::Common,
0x4DC0..=0x4DFF => Script::Common,
0xA700..=0xA716 => Script::Common,
0xA717..=0xA71F => Script::Common,
0xA720..=0xA721 => Script::Common,
0xA788 => Script::Common,
0xA789..=0xA78A => Script::Common,
0xA830..=0xA835 => Script::Common,
0xA836..=0xA837 => Script::Common,
0xA838 => Script::Common,
0xA839 => Script::Common,
0xA92E => Script::Common,
0xA9CF => Script::Common,
0xAB5B => Script::Common,
0xFD3E => Script::Common,
0xFD3F => Script::Common,
0xFE10..=0xFE16 => Script::Common,
0xFE17 => Script::Common,
0xFE18 => Script::Common,
0xFE19 => Script::Common,
0xFE30 => Script::Common,
0xFE31..=0xFE32 => Script::Common,
0xFE33..=0xFE34 => Script::Common,
0xFE35 => Script::Common,
0xFE36 => Script::Common,
0xFE37 => Script::Common,
0xFE38 => Script::Common,
0xFE39 => Script::Common,
0xFE3A => Script::Common,
0xFE3B => Script::Common,
0xFE3C => Script::Common,
0xFE3D => Script::Common,
0xFE3E => Script::Common,
0xFE3F => Script::Common,
0xFE40 => Script::Common,
0xFE41 => Script::Common,
0xFE42 => Script::Common,
0xFE43 => Script::Common,
0xFE44 => Script::Common,
0xFE45..=0xFE46 => Script::Common,
0xFE47 => Script::Common,
0xFE48 => Script::Common,
0xFE49..=0xFE4C => Script::Common,
0xFE4D..=0xFE4F => Script::Common,
0xFE50..=0xFE52 => Script::Common,
0xFE54..=0xFE57 => Script::Common,
0xFE58 => Script::Common,
0xFE59 => Script::Common,
0xFE5A => Script::Common,
0xFE5B => Script::Common,
0xFE5C => Script::Common,
0xFE5D => Script::Common,
0xFE5E => Script::Common,
0xFE5F..=0xFE61 => Script::Common,
0xFE62 => Script::Common,
0xFE63 => Script::Common,
0xFE64..=0xFE66 => Script::Common,
0xFE68 => Script::Common,
0xFE69 => Script::Common,
0xFE6A..=0xFE6B => Script::Common,
0xFEFF => Script::Common,
0xFF01..=0xFF03 => Script::Common,
0xFF04 => Script::Common,
0xFF05..=0xFF07 => Script::Common,
0xFF08 => Script::Common,
0xFF09 => Script::Common,
0xFF0A => Script::Common,
0xFF0B => Script::Common,
0xFF0C => Script::Common,
0xFF0D => Script::Common,
0xFF0E..=0xFF0F => Script::Common,
0xFF10..=0xFF19 => Script::Common,
0xFF1A..=0xFF1B => Script::Common,
0xFF1C..=0xFF1E => Script::Common,
0xFF1F..=0xFF20 => Script::Common,
0xFF3B => Script::Common,
0xFF3C => Script::Common,
0xFF3D => Script::Common,
0xFF3E => Script::Common,
0xFF3F => Script::Common,
0xFF40 => Script::Common,
0xFF5B => Script::Common,
0xFF5C => Script::Common,
0xFF5D => Script::Common,
0xFF5E => Script::Common,
0xFF5F => Script::Common,
0xFF60 => Script::Common,
0xFF61 => Script::Common,
0xFF62 => Script::Common,
0xFF63 => Script::Common,
0xFF64..=0xFF65 => Script::Common,
0xFF70 => Script::Common,
0xFF9E..=0xFF9F => Script::Common,
0xFFE0..=0xFFE1 => Script::Common,
0xFFE2 => Script::Common,
0xFFE3 => Script::Common,
0xFFE4 => Script::Common,
0xFFE5..=0xFFE6 => Script::Common,
0xFFE8 => Script::Common,
0xFFE9..=0xFFEC => Script::Common,
0xFFED..=0xFFEE => Script::Common,
0xFFF9..=0xFFFB => Script::Common,
0xFFFC..=0xFFFD => Script::Common,
0x10100..=0x10102 => Script::Common,
0x10107..=0x10133 => Script::Common,
0x10137..=0x1013F => Script::Common,
0x10190..=0x1019B => Script::Common,
0x101D0..=0x101FC => Script::Common,
0x102E1..=0x102FB => Script::Common,
0x1BCA0..=0x1BCA3 => Script::Common,
0x1D000..=0x1D0F5 => Script::Common,
0x1D100..=0x1D126 => Script::Common,
0x1D129..=0x1D164 => Script::Common,
0x1D165..=0x1D166 => Script::Common,
0x1D16A..=0x1D16C => Script::Common,
0x1D16D..=0x1D172 => Script::Common,
0x1D173..=0x1D17A => Script::Common,
0x1D183..=0x1D184 => Script::Common,
0x1D18C..=0x1D1A9 => Script::Common,
0x1D1AE..=0x1D1E8 => Script::Common,
0x1D300..=0x1D356 => Script::Common,
0x1D360..=0x1D371 => Script::Common,
0x1D400..=0x1D454 => Script::Common,
0x1D456..=0x1D49C => Script::Common,
0x1D49E..=0x1D49F => Script::Common,
0x1D4A2 => Script::Common,
0x1D4A5..=0x1D4A6 => Script::Common,
0x1D4A9..=0x1D4AC => Script::Common,
0x1D4AE..=0x1D4B9 => Script::Common,
0x1D4BB => Script::Common,
0x1D4BD..=0x1D4C3 => Script::Common,
0x1D4C5..=0x1D505 => Script::Common,
0x1D507..=0x1D50A => Script::Common,
0x1D50D..=0x1D514 => Script::Common,
0x1D516..=0x1D51C => Script::Common,
0x1D51E..=0x1D539 => Script::Common,
0x1D53B..=0x1D53E => Script::Common,
0x1D540..=0x1D544 => Script::Common,
0x1D546 => Script::Common,
0x1D54A..=0x1D550 => Script::Common,
0x1D552..=0x1D6A5 => Script::Common,
0x1D6A8..=0x1D6C0 => Script::Common,
0x1D6C1 => Script::Common,
0x1D6C2..=0x1D6DA => Script::Common,
0x1D6DB => Script::Common,
0x1D6DC..=0x1D6FA => Script::Common,
0x1D6FB => Script::Common,
0x1D6FC..=0x1D714 => Script::Common,
0x1D715 => Script::Common,
0x1D716..=0x1D734 => Script::Common,
0x1D735 => Script::Common,
0x1D736..=0x1D74E => Script::Common,
0x1D74F => Script::Common,
0x1D750..=0x1D76E => Script::Common,
0x1D76F => Script::Common,
0x1D770..=0x1D788 => Script::Common,
0x1D789 => Script::Common,
0x1D78A..=0x1D7A8 => Script::Common,
0x1D7A9 => Script::Common,
0x1D7AA..=0x1D7C2 => Script::Common,
0x1D7C3 => Script::Common,
0x1D7C4..=0x1D7CB => Script::Common,
0x1D7CE..=0x1D7FF => Script::Common,
0x1F000..=0x1F02B => Script::Common,
0x1F030..=0x1F093 => Script::Common,
0x1F0A0..=0x1F0AE => Script::Common,
0x1F0B1..=0x1F0BF => Script::Common,
0x1F0C1..=0x1F0CF => Script::Common,
0x1F0D1..=0x1F0F5 => Script::Common,
0x1F100..=0x1F10C => Script::Common,
0x1F110..=0x1F12E => Script::Common,
0x1F130..=0x1F16B => Script::Common,
0x1F170..=0x1F1AC => Script::Common,
0x1F1E6..=0x1F1FF => Script::Common,
0x1F201..=0x1F202 => Script::Common,
0x1F210..=0x1F23B => Script::Common,
0x1F240..=0x1F248 => Script::Common,
0x1F250..=0x1F251 => Script::Common,
0x1F300..=0x1F3FA => Script::Common,
0x1F3FB..=0x1F3FF => Script::Common,
0x1F400..=0x1F6D2 => Script::Common,
0x1F6E0..=0x1F6EC => Script::Common,
0x1F6F0..=0x1F6F6 => Script::Common,
0x1F700..=0x1F773 => Script::Common,
0x1F780..=0x1F7D4 => Script::Common,
0x1F800..=0x1F80B => Script::Common,
0x1F810..=0x1F847 => Script::Common,
0x1F850..=0x1F859 => Script::Common,
0x1F860..=0x1F887 => Script::Common,
0x1F890..=0x1F8AD => Script::Common,
0x1F910..=0x1F91E => Script::Common,
0x1F920..=0x1F927 => Script::Common,
0x1F930 => Script::Common,
0x1F933..=0x1F93E => Script::Common,
0x1F940..=0x1F94B => Script::Common,
0x1F950..=0x1F95E => Script::Common,
0x1F980..=0x1F991 => Script::Common,
0x1F9C0 => Script::Common,
0xE0001 => Script::Common,
0xE0020..=0xE007F => Script::Common,
0x0041..=0x005A => Script::Latin,
0x0061..=0x007A => Script::Latin,
0x00AA => Script::Latin,
0x00BA => Script::Latin,
0x00C0..=0x00D6 => Script::Latin,
0x00D8..=0x00F6 => Script::Latin,
0x00F8..=0x01BA => Script::Latin,
0x01BB => Script::Latin,
0x01BC..=0x01BF => Script::Latin,
0x01C0..=0x01C3 => Script::Latin,
0x01C4..=0x0293 => Script::Latin,
0x0294 => Script::Latin,
0x0295..=0x02AF => Script::Latin,
0x02B0..=0x02B8 => Script::Latin,
0x02E0..=0x02E4 => Script::Latin,
0x1D00..=0x1D25 => Script::Latin,
0x1D2C..=0x1D5C => Script::Latin,
0x1D62..=0x1D65 => Script::Latin,
0x1D6B..=0x1D77 => Script::Latin,
0x1D79..=0x1D9A => Script::Latin,
0x1D9B..=0x1DBE => Script::Latin,
0x1E00..=0x1EFF => Script::Latin,
0x2071 => Script::Latin,
0x207F => Script::Latin,
0x2090..=0x209C => Script::Latin,
0x212A..=0x212B => Script::Latin,
0x2132 => Script::Latin,
0x214E => Script::Latin,
0x2160..=0x2182 => Script::Latin,
0x2183..=0x2184 => Script::Latin,
0x2185..=0x2188 => Script::Latin,
0x2C60..=0x2C7B => Script::Latin,
0x2C7C..=0x2C7D => Script::Latin,
0x2C7E..=0x2C7F => Script::Latin,
0xA722..=0xA76F => Script::Latin,
0xA770 => Script::Latin,
0xA771..=0xA787 => Script::Latin,
0xA78B..=0xA78E => Script::Latin,
0xA78F => Script::Latin,
0xA790..=0xA7AE => Script::Latin,
0xA7B0..=0xA7B7 => Script::Latin,
0xA7F7 => Script::Latin,
0xA7F8..=0xA7F9 => Script::Latin,
0xA7FA => Script::Latin,
0xA7FB..=0xA7FF => Script::Latin,
0xAB30..=0xAB5A => Script::Latin,
0xAB5C..=0xAB5F => Script::Latin,
0xAB60..=0xAB64 => Script::Latin,
0xFB00..=0xFB06 => Script::Latin,
0xFF21..=0xFF3A => Script::Latin,
0xFF41..=0xFF5A => Script::Latin,
0x0370..=0x0373 => Script::Greek,
0x0375 => Script::Greek,
0x0376..=0x0377 => Script::Greek,
0x037A => Script::Greek,
0x037B..=0x037D => Script::Greek,
0x037F => Script::Greek,
0x0384 => Script::Greek,
0x0386 => Script::Greek,
0x0388..=0x038A => Script::Greek,
0x038C => Script::Greek,
0x038E..=0x03A1 => Script::Greek,
0x03A3..=0x03E1 => Script::Greek,
0x03F0..=0x03F5 => Script::Greek,
0x03F6 => Script::Greek,
0x03F7..=0x03FF => Script::Greek,
0x1D26..=0x1D2A => Script::Greek,
0x1D5D..=0x1D61 => Script::Greek,
0x1D66..=0x1D6A => Script::Greek,
0x1DBF => Script::Greek,
0x1F00..=0x1F15 => Script::Greek,
0x1F18..=0x1F1D => Script::Greek,
0x1F20..=0x1F45 => Script::Greek,
0x1F48..=0x1F4D => Script::Greek,
0x1F50..=0x1F57 => Script::Greek,
0x1F59 => Script::Greek,
0x1F5B => Script::Greek,
0x1F5D => Script::Greek,
0x1F5F..=0x1F7D => Script::Greek,
0x1F80..=0x1FB4 => Script::Greek,
0x1FB6..=0x1FBC => Script::Greek,
0x1FBD => Script::Greek,
0x1FBE => Script::Greek,
0x1FBF..=0x1FC1 => Script::Greek,
0x1FC2..=0x1FC4 => Script::Greek,
0x1FC6..=0x1FCC => Script::Greek,
0x1FCD..=0x1FCF => Script::Greek,
0x1FD0..=0x1FD3 => Script::Greek,
0x1FD6..=0x1FDB => Script::Greek,
0x1FDD..=0x1FDF => Script::Greek,
0x1FE0..=0x1FEC => Script::Greek,
0x1FED..=0x1FEF => Script::Greek,
0x1FF2..=0x1FF4 => Script::Greek,
0x1FF6..=0x1FFC => Script::Greek,
0x1FFD..=0x1FFE => Script::Greek,
0x2126 => Script::Greek,
0xAB65 => Script::Greek,
0x10140..=0x10174 => Script::Greek,
0x10175..=0x10178 => Script::Greek,
0x10179..=0x10189 => Script::Greek,
0x1018A..=0x1018B => Script::Greek,
0x1018C..=0x1018E => Script::Greek,
0x101A0 => Script::Greek,
0x1D200..=0x1D241 => Script::Greek,
0x1D242..=0x1D244 => Script::Greek,
0x1D245 => Script::Greek,
0x0400..=0x0481 => Script::Cyrillic,
0x0482 => Script::Cyrillic,
0x0483..=0x0484 => Script::Cyrillic,
0x0487 => Script::Cyrillic,
0x0488..=0x0489 => Script::Cyrillic,
0x048A..=0x052F => Script::Cyrillic,
0x1C80..=0x1C88 => Script::Cyrillic,
0x1D2B => Script::Cyrillic,
0x1D78 => Script::Cyrillic,
0x2DE0..=0x2DFF => Script::Cyrillic,
0xA640..=0xA66D => Script::Cyrillic,
0xA66E => Script::Cyrillic,
0xA66F => Script::Cyrillic,
0xA670..=0xA672 => Script::Cyrillic,
0xA673 => Script::Cyrillic,
0xA674..=0xA67D => Script::Cyrillic,
0xA67E => Script::Cyrillic,
0xA67F => Script::Cyrillic,
0xA680..=0xA69B => Script::Cyrillic,
0xA69C..=0xA69D => Script::Cyrillic,
0xA69E..=0xA69F => Script::Cyrillic,
0xFE2E..=0xFE2F => Script::Cyrillic,
0x0531..=0x0556 => Script::Armenian,
0x0559 => Script::Armenian,
0x055A..=0x055F => Script::Armenian,
0x0561..=0x0587 => Script::Armenian,
0x058A => Script::Armenian,
0x058D..=0x058E => Script::Armenian,
0x058F => Script::Armenian,
0xFB13..=0xFB17 => Script::Armenian,
0x0591..=0x05BD => Script::Hebrew,
0x05BE => Script::Hebrew,
0x05BF => Script::Hebrew,
0x05C0 => Script::Hebrew,
0x05C1..=0x05C2 => Script::Hebrew,
0x05C3 => Script::Hebrew,
0x05C4..=0x05C5 => Script::Hebrew,
0x05C6 => Script::Hebrew,
0x05C7 => Script::Hebrew,
0x05D0..=0x05EA => Script::Hebrew,
0x05F0..=0x05F2 => Script::Hebrew,
0x05F3..=0x05F4 => Script::Hebrew,
0xFB1D => Script::Hebrew,
0xFB1E => Script::Hebrew,
0xFB1F..=0xFB28 => Script::Hebrew,
0xFB29 => Script::Hebrew,
0xFB2A..=0xFB36 => Script::Hebrew,
0xFB38..=0xFB3C => Script::Hebrew,
0xFB3E => Script::Hebrew,
0xFB40..=0xFB41 => Script::Hebrew,
0xFB43..=0xFB44 => Script::Hebrew,
0xFB46..=0xFB4F => Script::Hebrew,
0x0600..=0x0604 => Script::Arabic,
0x0606..=0x0608 => Script::Arabic,
0x0609..=0x060A => Script::Arabic,
0x060B => Script::Arabic,
0x060D => Script::Arabic,
0x060E..=0x060F => Script::Arabic,
0x0610..=0x061A => Script::Arabic,
0x061E => Script::Arabic,
0x0620..=0x063F => Script::Arabic,
0x0641..=0x064A => Script::Arabic,
0x0656..=0x065F => Script::Arabic,
0x0660..=0x0669 => Script::Arabic,
0x066A..=0x066D => Script::Arabic,
0x066E..=0x066F => Script::Arabic,
0x0671..=0x06D3 => Script::Arabic,
0x06D4 => Script::Arabic,
0x06D5 => Script::Arabic,
0x06D6..=0x06DC => Script::Arabic,
0x06DE => Script::Arabic,
0x06DF..=0x06E4 => Script::Arabic,
0x06E5..=0x06E6 => Script::Arabic,
0x06E7..=0x06E8 => Script::Arabic,
0x06E9 => Script::Arabic,
0x06EA..=0x06ED => Script::Arabic,
0x06EE..=0x06EF => Script::Arabic,
0x06F0..=0x06F9 => Script::Arabic,
0x06FA..=0x06FC => Script::Arabic,
0x06FD..=0x06FE => Script::Arabic,
0x06FF => Script::Arabic,
0x0750..=0x077F => Script::Arabic,
0x08A0..=0x08B4 => Script::Arabic,
0x08B6..=0x08BD => Script::Arabic,
0x08D4..=0x08E1 => Script::Arabic,
0x08E3..=0x08FF => Script::Arabic,
0xFB50..=0xFBB1 => Script::Arabic,
0xFBB2..=0xFBC1 => Script::Arabic,
0xFBD3..=0xFD3D => Script::Arabic,
0xFD50..=0xFD8F => Script::Arabic,
0xFD92..=0xFDC7 => Script::Arabic,
0xFDF0..=0xFDFB => Script::Arabic,
0xFDFC => Script::Arabic,
0xFDFD => Script::Arabic,
0xFE70..=0xFE74 => Script::Arabic,
0xFE76..=0xFEFC => Script::Arabic,
0x10E60..=0x10E7E => Script::Arabic,
0x1EE00..=0x1EE03 => Script::Arabic,
0x1EE05..=0x1EE1F => Script::Arabic,
0x1EE21..=0x1EE22 => Script::Arabic,
0x1EE24 => Script::Arabic,
0x1EE27 => Script::Arabic,
0x1EE29..=0x1EE32 => Script::Arabic,
0x1EE34..=0x1EE37 => Script::Arabic,
0x1EE39 => Script::Arabic,
0x1EE3B => Script::Arabic,
0x1EE42 => Script::Arabic,
0x1EE47 => Script::Arabic,
0x1EE49 => Script::Arabic,
0x1EE4B => Script::Arabic,
0x1EE4D..=0x1EE4F => Script::Arabic,
0x1EE51..=0x1EE52 => Script::Arabic,
0x1EE54 => Script::Arabic,
0x1EE57 => Script::Arabic,
0x1EE59 => Script::Arabic,
0x1EE5B => Script::Arabic,
0x1EE5D => Script::Arabic,
0x1EE5F => Script::Arabic,
0x1EE61..=0x1EE62 => Script::Arabic,
0x1EE64 => Script::Arabic,
0x1EE67..=0x1EE6A => Script::Arabic,
0x1EE6C..=0x1EE72 => Script::Arabic,
0x1EE74..=0x1EE77 => Script::Arabic,
0x1EE79..=0x1EE7C => Script::Arabic,
0x1EE7E => Script::Arabic,
0x1EE80..=0x1EE89 => Script::Arabic,
0x1EE8B..=0x1EE9B => Script::Arabic,
0x1EEA1..=0x1EEA3 => Script::Arabic,
0x1EEA5..=0x1EEA9 => Script::Arabic,
0x1EEAB..=0x1EEBB => Script::Arabic,
0x1EEF0..=0x1EEF1 => Script::Arabic,
0x0700..=0x070D => Script::Syriac,
0x070F => Script::Syriac,
0x0710 => Script::Syriac,
0x0711 => Script::Syriac,
0x0712..=0x072F => Script::Syriac,
0x0730..=0x074A => Script::Syriac,
0x074D..=0x074F => Script::Syriac,
0x0780..=0x07A5 => Script::Thaana,
0x07A6..=0x07B0 => Script::Thaana,
0x07B1 => Script::Thaana,
0x0900..=0x0902 => Script::Devanagari,
0x0903 => Script::Devanagari,
0x0904..=0x0939 => Script::Devanagari,
0x093A => Script::Devanagari,
0x093B => Script::Devanagari,
0x093C => Script::Devanagari,
0x093D => Script::Devanagari,
0x093E..=0x0940 => Script::Devanagari,
0x0941..=0x0948 => Script::Devanagari,
0x0949..=0x094C => Script::Devanagari,
0x094D => Script::Devanagari,
0x094E..=0x094F => Script::Devanagari,
0x0950 => Script::Devanagari,
0x0953..=0x0957 => Script::Devanagari,
0x0958..=0x0961 => Script::Devanagari,
0x0962..=0x0963 => Script::Devanagari,
0x0966..=0x096F => Script::Devanagari,
0x0970 => Script::Devanagari,
0x0971 => Script::Devanagari,
0x0972..=0x097F => Script::Devanagari,
0xA8E0..=0xA8F1 => Script::Devanagari,
0xA8F2..=0xA8F7 => Script::Devanagari,
0xA8F8..=0xA8FA => Script::Devanagari,
0xA8FB => Script::Devanagari,
0xA8FC => Script::Devanagari,
0xA8FD => Script::Devanagari,
0x0980 => Script::Bengali,
0x0981 => Script::Bengali,
0x0982..=0x0983 => Script::Bengali,
0x0985..=0x098C => Script::Bengali,
0x098F..=0x0990 => Script::Bengali,
0x0993..=0x09A8 => Script::Bengali,
0x09AA..=0x09B0 => Script::Bengali,
0x09B2 => Script::Bengali,
0x09B6..=0x09B9 => Script::Bengali,
0x09BC => Script::Bengali,
0x09BD => Script::Bengali,
0x09BE..=0x09C0 => Script::Bengali,
0x09C1..=0x09C4 => Script::Bengali,
0x09C7..=0x09C8 => Script::Bengali,
0x09CB..=0x09CC => Script::Bengali,
0x09CD => Script::Bengali,
0x09CE => Script::Bengali,
0x09D7 => Script::Bengali,
0x09DC..=0x09DD => Script::Bengali,
0x09DF..=0x09E1 => Script::Bengali,
0x09E2..=0x09E3 => Script::Bengali,
0x09E6..=0x09EF => Script::Bengali,
0x09F0..=0x09F1 => Script::Bengali,
0x09F2..=0x09F3 => Script::Bengali,
0x09F4..=0x09F9 => Script::Bengali,
0x09FA => Script::Bengali,
0x09FB => Script::Bengali,
0x0A01..=0x0A02 => Script::Gurmukhi,
0x0A03 => Script::Gurmukhi,
0x0A05..=0x0A0A => Script::Gurmukhi,
0x0A0F..=0x0A10 => Script::Gurmukhi,
0x0A13..=0x0A28 => Script::Gurmukhi,
0x0A2A..=0x0A30 => Script::Gurmukhi,
0x0A32..=0x0A33 => Script::Gurmukhi,
0x0A35..=0x0A36 => Script::Gurmukhi,
0x0A38..=0x0A39 => Script::Gurmukhi,
0x0A3C => Script::Gurmukhi,
0x0A3E..=0x0A40 => Script::Gurmukhi,
0x0A41..=0x0A42 => Script::Gurmukhi,
0x0A47..=0x0A48 => Script::Gurmukhi,
0x0A4B..=0x0A4D => Script::Gurmukhi,
0x0A51 => Script::Gurmukhi,
0x0A59..=0x0A5C => Script::Gurmukhi,
0x0A5E => Script::Gurmukhi,
0x0A66..=0x0A6F => Script::Gurmukhi,
0x0A70..=0x0A71 => Script::Gurmukhi,
0x0A72..=0x0A74 => Script::Gurmukhi,
0x0A75 => Script::Gurmukhi,
0x0A81..=0x0A82 => Script::Gujarati,
0x0A83 => Script::Gujarati,
0x0A85..=0x0A8D => Script::Gujarati,
0x0A8F..=0x0A91 => Script::Gujarati,
0x0A93..=0x0AA8 => Script::Gujarati,
0x0AAA..=0x0AB0 => Script::Gujarati,
0x0AB2..=0x0AB3 => Script::Gujarati,
0x0AB5..=0x0AB9 => Script::Gujarati,
0x0ABC => Script::Gujarati,
0x0ABD => Script::Gujarati,
0x0ABE..=0x0AC0 => Script::Gujarati,
0x0AC1..=0x0AC5 => Script::Gujarati,
0x0AC7..=0x0AC8 => Script::Gujarati,
0x0AC9 => Script::Gujarati,
0x0ACB..=0x0ACC => Script::Gujarati,
0x0ACD => Script::Gujarati,
0x0AD0 => Script::Gujarati,
0x0AE0..=0x0AE1 => Script::Gujarati,
0x0AE2..=0x0AE3 => Script::Gujarati,
0x0AE6..=0x0AEF => Script::Gujarati,
0x0AF0 => Script::Gujarati,
0x0AF1 => Script::Gujarati,
0x0AF9 => Script::Gujarati,
0x0B01 => Script::Oriya,
0x0B02..=0x0B03 => Script::Oriya,
0x0B05..=0x0B0C => Script::Oriya,
0x0B0F..=0x0B10 => Script::Oriya,
0x0B13..=0x0B28 => Script::Oriya,
0x0B2A..=0x0B30 => Script::Oriya,
0x0B32..=0x0B33 => Script::Oriya,
0x0B35..=0x0B39 => Script::Oriya,
0x0B3C => Script::Oriya,
0x0B3D => Script::Oriya,
0x0B3E => Script::Oriya,
0x0B3F => Script::Oriya,
0x0B40 => Script::Oriya,
0x0B41..=0x0B44 => Script::Oriya,
0x0B47..=0x0B48 => Script::Oriya,
0x0B4B..=0x0B4C => Script::Oriya,
0x0B4D => Script::Oriya,
0x0B56 => Script::Oriya,
0x0B57 => Script::Oriya,
0x0B5C..=0x0B5D => Script::Oriya,
0x0B5F..=0x0B61 => Script::Oriya,
0x0B62..=0x0B63 => Script::Oriya,
0x0B66..=0x0B6F => Script::Oriya,
0x0B70 => Script::Oriya,
0x0B71 => Script::Oriya,
0x0B72..=0x0B77 => Script::Oriya,
0x0B82 => Script::Tamil,
0x0B83 => Script::Tamil,
0x0B85..=0x0B8A => Script::Tamil,
0x0B8E..=0x0B90 => Script::Tamil,
0x0B92..=0x0B95 => Script::Tamil,
0x0B99..=0x0B9A => Script::Tamil,
0x0B9C => Script::Tamil,
0x0B9E..=0x0B9F => Script::Tamil,
0x0BA3..=0x0BA4 => Script::Tamil,
0x0BA8..=0x0BAA => Script::Tamil,
0x0BAE..=0x0BB9 => Script::Tamil,
0x0BBE..=0x0BBF => Script::Tamil,
0x0BC0 => Script::Tamil,
0x0BC1..=0x0BC2 => Script::Tamil,
0x0BC6..=0x0BC8 => Script::Tamil,
0x0BCA..=0x0BCC => Script::Tamil,
0x0BCD => Script::Tamil,
0x0BD0 => Script::Tamil,
0x0BD7 => Script::Tamil,
0x0BE6..=0x0BEF => Script::Tamil,
0x0BF0..=0x0BF2 => Script::Tamil,
0x0BF3..=0x0BF8 => Script::Tamil,
0x0BF9 => Script::Tamil,
0x0BFA => Script::Tamil,
0x0C00 => Script::Telugu,
0x0C01..=0x0C03 => Script::Telugu,
0x0C05..=0x0C0C => Script::Telugu,
0x0C0E..=0x0C10 => Script::Telugu,
0x0C12..=0x0C28 => Script::Telugu,
0x0C2A..=0x0C39 => Script::Telugu,
0x0C3D => Script::Telugu,
0x0C3E..=0x0C40 => Script::Telugu,
0x0C41..=0x0C44 => Script::Telugu,
0x0C46..=0x0C48 => Script::Telugu,
0x0C4A..=0x0C4D => Script::Telugu,
0x0C55..=0x0C56 => Script::Telugu,
0x0C58..=0x0C5A => Script::Telugu,
0x0C60..=0x0C61 => Script::Telugu,
0x0C62..=0x0C63 => Script::Telugu,
0x0C66..=0x0C6F => Script::Telugu,
0x0C78..=0x0C7E => Script::Telugu,
0x0C7F => Script::Telugu,
0x0C80 => Script::Kannada,
0x0C81 => Script::Kannada,
0x0C82..=0x0C83 => Script::Kannada,
0x0C85..=0x0C8C => Script::Kannada,
0x0C8E..=0x0C90 => Script::Kannada,
0x0C92..=0x0CA8 => Script::Kannada,
0x0CAA..=0x0CB3 => Script::Kannada,
0x0CB5..=0x0CB9 => Script::Kannada,
0x0CBC => Script::Kannada,
0x0CBD => Script::Kannada,
0x0CBE => Script::Kannada,
0x0CBF => Script::Kannada,
0x0CC0..=0x0CC4 => Script::Kannada,
0x0CC6 => Script::Kannada,
0x0CC7..=0x0CC8 => Script::Kannada,
0x0CCA..=0x0CCB => Script::Kannada,
0x0CCC..=0x0CCD => Script::Kannada,
0x0CD5..=0x0CD6 => Script::Kannada,
0x0CDE => Script::Kannada,
0x0CE0..=0x0CE1 => Script::Kannada,
0x0CE2..=0x0CE3 => Script::Kannada,
0x0CE6..=0x0CEF => Script::Kannada,
0x0CF1..=0x0CF2 => Script::Kannada,
0x0D01 => Script::Malayalam,
0x0D02..=0x0D03 => Script::Malayalam,
0x0D05..=0x0D0C => Script::Malayalam,
0x0D0E..=0x0D10 => Script::Malayalam,
0x0D12..=0x0D3A => Script::Malayalam,
0x0D3D => Script::Malayalam,
0x0D3E..=0x0D40 => Script::Malayalam,
0x0D41..=0x0D44 => Script::Malayalam,
0x0D46..=0x0D48 => Script::Malayalam,
0x0D4A..=0x0D4C => Script::Malayalam,
0x0D4D => Script::Malayalam,
0x0D4E => Script::Malayalam,
0x0D4F => Script::Malayalam,
0x0D54..=0x0D56 => Script::Malayalam,
0x0D57 => Script::Malayalam,
0x0D58..=0x0D5E => Script::Malayalam,
0x0D5F..=0x0D61 => Script::Malayalam,
0x0D62..=0x0D63 => Script::Malayalam,
0x0D66..=0x0D6F => Script::Malayalam,
0x0D70..=0x0D78 => Script::Malayalam,
0x0D79 => Script::Malayalam,
0x0D7A..=0x0D7F => Script::Malayalam,
0x0D82..=0x0D83 => Script::Sinhala,
0x0D85..=0x0D96 => Script::Sinhala,
0x0D9A..=0x0DB1 => Script::Sinhala,
0x0DB3..=0x0DBB => Script::Sinhala,
0x0DBD => Script::Sinhala,
0x0DC0..=0x0DC6 => Script::Sinhala,
0x0DCA => Script::Sinhala,
0x0DCF..=0x0DD1 => Script::Sinhala,
0x0DD2..=0x0DD4 => Script::Sinhala,
0x0DD6 => Script::Sinhala,
0x0DD8..=0x0DDF => Script::Sinhala,
0x0DE6..=0x0DEF => Script::Sinhala,
0x0DF2..=0x0DF3 => Script::Sinhala,
0x0DF4 => Script::Sinhala,
0x111E1..=0x111F4 => Script::Sinhala,
0x0E01..=0x0E30 => Script::Thai,
0x0E31 => Script::Thai,
0x0E32..=0x0E33 => Script::Thai,
0x0E34..=0x0E3A => Script::Thai,
0x0E40..=0x0E45 => Script::Thai,
0x0E46 => Script::Thai,
0x0E47..=0x0E4E => Script::Thai,
0x0E4F => Script::Thai,
0x0E50..=0x0E59 => Script::Thai,
0x0E5A..=0x0E5B => Script::Thai,
0x0E81..=0x0E82 => Script::Lao,
0x0E84 => Script::Lao,
0x0E87..=0x0E88 => Script::Lao,
0x0E8A => Script::Lao,
0x0E8D => Script::Lao,
0x0E94..=0x0E97 => Script::Lao,
0x0E99..=0x0E9F => Script::Lao,
0x0EA1..=0x0EA3 => Script::Lao,
0x0EA5 => Script::Lao,
0x0EA7 => Script::Lao,
0x0EAA..=0x0EAB => Script::Lao,
0x0EAD..=0x0EB0 => Script::Lao,
0x0EB1 => Script::Lao,
0x0EB2..=0x0EB3 => Script::Lao,
0x0EB4..=0x0EB9 => Script::Lao,
0x0EBB..=0x0EBC => Script::Lao,
0x0EBD => Script::Lao,
0x0EC0..=0x0EC4 => Script::Lao,
0x0EC6 => Script::Lao,
0x0EC8..=0x0ECD => Script::Lao,
0x0ED0..=0x0ED9 => Script::Lao,
0x0EDC..=0x0EDF => Script::Lao,
0x0F00 => Script::Tibetan,
0x0F01..=0x0F03 => Script::Tibetan,
0x0F04..=0x0F12 => Script::Tibetan,
0x0F13 => Script::Tibetan,
0x0F14 => Script::Tibetan,
0x0F15..=0x0F17 => Script::Tibetan,
0x0F18..=0x0F19 => Script::Tibetan,
0x0F1A..=0x0F1F => Script::Tibetan,
0x0F20..=0x0F29 => Script::Tibetan,
0x0F2A..=0x0F33 => Script::Tibetan,
0x0F34 => Script::Tibetan,
0x0F35 => Script::Tibetan,
0x0F36 => Script::Tibetan,
0x0F37 => Script::Tibetan,
0x0F38 => Script::Tibetan,
0x0F39 => Script::Tibetan,
0x0F3A => Script::Tibetan,
0x0F3B => Script::Tibetan,
0x0F3C => Script::Tibetan,
0x0F3D => Script::Tibetan,
0x0F3E..=0x0F3F => Script::Tibetan,
0x0F40..=0x0F47 => Script::Tibetan,
0x0F49..=0x0F6C => Script::Tibetan,
0x0F71..=0x0F7E => Script::Tibetan,
0x0F7F => Script::Tibetan,
0x0F80..=0x0F84 => Script::Tibetan,
0x0F85 => Script::Tibetan,
0x0F86..=0x0F87 => Script::Tibetan,
0x0F88..=0x0F8C => Script::Tibetan,
0x0F8D..=0x0F97 => Script::Tibetan,
0x0F99..=0x0FBC => Script::Tibetan,
0x0FBE..=0x0FC5 => Script::Tibetan,
0x0FC6 => Script::Tibetan,
0x0FC7..=0x0FCC => Script::Tibetan,
0x0FCE..=0x0FCF => Script::Tibetan,
0x0FD0..=0x0FD4 => Script::Tibetan,
0x0FD9..=0x0FDA => Script::Tibetan,
0x1000..=0x102A => Script::Myanmar,
0x102B..=0x102C => Script::Myanmar,
0x102D..=0x1030 => Script::Myanmar,
0x1031 => Script::Myanmar,
0x1032..=0x1037 => Script::Myanmar,
0x1038 => Script::Myanmar,
0x1039..=0x103A => Script::Myanmar,
0x103B..=0x103C => Script::Myanmar,
0x103D..=0x103E => Script::Myanmar,
0x103F => Script::Myanmar,
0x1040..=0x1049 => Script::Myanmar,
0x104A..=0x104F => Script::Myanmar,
0x1050..=0x1055 => Script::Myanmar,
0x1056..=0x1057 => Script::Myanmar,
0x1058..=0x1059 => Script::Myanmar,
0x105A..=0x105D => Script::Myanmar,
0x105E..=0x1060 => Script::Myanmar,
0x1061 => Script::Myanmar,
0x1062..=0x1064 => Script::Myanmar,
0x1065..=0x1066 => Script::Myanmar,
0x1067..=0x106D => Script::Myanmar,
0x106E..=0x1070 => Script::Myanmar,
0x1071..=0x1074 => Script::Myanmar,
0x1075..=0x1081 => Script::Myanmar,
0x1082 => Script::Myanmar,
0x1083..=0x1084 => Script::Myanmar,
0x1085..=0x1086 => Script::Myanmar,
0x1087..=0x108C => Script::Myanmar,
0x108D => Script::Myanmar,
0x108E => Script::Myanmar,
0x108F => Script::Myanmar,
0x1090..=0x1099 => Script::Myanmar,
0x109A..=0x109C => Script::Myanmar,
0x109D => Script::Myanmar,
0x109E..=0x109F => Script::Myanmar,
0xA9E0..=0xA9E4 => Script::Myanmar,
0xA9E5 => Script::Myanmar,
0xA9E6 => Script::Myanmar,
0xA9E7..=0xA9EF => Script::Myanmar,
0xA9F0..=0xA9F9 => Script::Myanmar,
0xA9FA..=0xA9FE => Script::Myanmar,
0xAA60..=0xAA6F => Script::Myanmar,
0xAA70 => Script::Myanmar,
0xAA71..=0xAA76 => Script::Myanmar,
0xAA77..=0xAA79 => Script::Myanmar,
0xAA7A => Script::Myanmar,
0xAA7B => Script::Myanmar,
0xAA7C => Script::Myanmar,
0xAA7D => Script::Myanmar,
0xAA7E..=0xAA7F => Script::Myanmar,
0x10A0..=0x10C5 => Script::Georgian,
0x10C7 => Script::Georgian,
0x10CD => Script::Georgian,
0x10D0..=0x10FA => Script::Georgian,
0x10FC => Script::Georgian,
0x10FD..=0x10FF => Script::Georgian,
0x2D00..=0x2D25 => Script::Georgian,
0x2D27 => Script::Georgian,
0x2D2D => Script::Georgian,
0x1100..=0x11FF => Script::Hangul,
0x302E..=0x302F => Script::Hangul,
0x3131..=0x318E => Script::Hangul,
0x3200..=0x321E => Script::Hangul,
0x3260..=0x327E => Script::Hangul,
0xA960..=0xA97C => Script::Hangul,
0xAC00..=0xD7A3 => Script::Hangul,
0xD7B0..=0xD7C6 => Script::Hangul,
0xD7CB..=0xD7FB => Script::Hangul,
0xFFA0..=0xFFBE => Script::Hangul,
0xFFC2..=0xFFC7 => Script::Hangul,
0xFFCA..=0xFFCF => Script::Hangul,
0xFFD2..=0xFFD7 => Script::Hangul,
0xFFDA..=0xFFDC => Script::Hangul,
0x1200..=0x1248 => Script::Ethiopic,
0x124A..=0x124D => Script::Ethiopic,
0x1250..=0x1256 => Script::Ethiopic,
0x1258 => Script::Ethiopic,
0x125A..=0x125D => Script::Ethiopic,
0x1260..=0x1288 => Script::Ethiopic,
0x128A..=0x128D => Script::Ethiopic,
0x1290..=0x12B0 => Script::Ethiopic,
0x12B2..=0x12B5 => Script::Ethiopic,
0x12B8..=0x12BE => Script::Ethiopic,
0x12C0 => Script::Ethiopic,
0x12C2..=0x12C5 => Script::Ethiopic,
0x12C8..=0x12D6 => Script::Ethiopic,
0x12D8..=0x1310 => Script::Ethiopic,
0x1312..=0x1315 => Script::Ethiopic,
0x1318..=0x135A => Script::Ethiopic,
0x135D..=0x135F => Script::Ethiopic,
0x1360..=0x1368 => Script::Ethiopic,
0x1369..=0x137C => Script::Ethiopic,
0x1380..=0x138F => Script::Ethiopic,
0x1390..=0x1399 => Script::Ethiopic,
0x2D80..=0x2D96 => Script::Ethiopic,
0x2DA0..=0x2DA6 => Script::Ethiopic,
0x2DA8..=0x2DAE => Script::Ethiopic,
0x2DB0..=0x2DB6 => Script::Ethiopic,
0x2DB8..=0x2DBE => Script::Ethiopic,
0x2DC0..=0x2DC6 => Script::Ethiopic,
0x2DC8..=0x2DCE => Script::Ethiopic,
0x2DD0..=0x2DD6 => Script::Ethiopic,
0x2DD8..=0x2DDE => Script::Ethiopic,
0xAB01..=0xAB06 => Script::Ethiopic,
0xAB09..=0xAB0E => Script::Ethiopic,
0xAB11..=0xAB16 => Script::Ethiopic,
0xAB20..=0xAB26 => Script::Ethiopic,
0xAB28..=0xAB2E => Script::Ethiopic,
0x13A0..=0x13F5 => Script::Cherokee,
0x13F8..=0x13FD => Script::Cherokee,
0xAB70..=0xABBF => Script::Cherokee,
0x1400 => Script::CanadianAboriginal,
0x1401..=0x166C => Script::CanadianAboriginal,
0x166D..=0x166E => Script::CanadianAboriginal,
0x166F..=0x167F => Script::CanadianAboriginal,
0x18B0..=0x18F5 => Script::CanadianAboriginal,
0x1680 => Script::Ogham,
0x1681..=0x169A => Script::Ogham,
0x169B => Script::Ogham,
0x169C => Script::Ogham,
0x16A0..=0x16EA => Script::Runic,
0x16EE..=0x16F0 => Script::Runic,
0x16F1..=0x16F8 => Script::Runic,
0x1780..=0x17B3 => Script::Khmer,
0x17B4..=0x17B5 => Script::Khmer,
0x17B6 => Script::Khmer,
0x17B7..=0x17BD => Script::Khmer,
0x17BE..=0x17C5 => Script::Khmer,
0x17C6 => Script::Khmer,
0x17C7..=0x17C8 => Script::Khmer,
0x17C9..=0x17D3 => Script::Khmer,
0x17D4..=0x17D6 => Script::Khmer,
0x17D7 => Script::Khmer,
0x17D8..=0x17DA => Script::Khmer,
0x17DB => Script::Khmer,
0x17DC => Script::Khmer,
0x17DD => Script::Khmer,
0x17E0..=0x17E9 => Script::Khmer,
0x17F0..=0x17F9 => Script::Khmer,
0x19E0..=0x19FF => Script::Khmer,
0x1800..=0x1801 => Script::Mongolian,
0x1804 => Script::Mongolian,
0x1806 => Script::Mongolian,
0x1807..=0x180A => Script::Mongolian,
0x180B..=0x180D => Script::Mongolian,
0x180E => Script::Mongolian,
0x1810..=0x1819 => Script::Mongolian,
0x1820..=0x1842 => Script::Mongolian,
0x1843 => Script::Mongolian,
0x1844..=0x1877 => Script::Mongolian,
0x1880..=0x1884 => Script::Mongolian,
0x1885..=0x1886 => Script::Mongolian,
0x1887..=0x18A8 => Script::Mongolian,
0x18A9 => Script::Mongolian,
0x18AA => Script::Mongolian,
0x11660..=0x1166C => Script::Mongolian,
0x3041..=0x3096 => Script::Hiragana,
0x309D..=0x309E => Script::Hiragana,
0x309F => Script::Hiragana,
0x1B001 => Script::Hiragana,
0x1F200 => Script::Hiragana,
0x30A1..=0x30FA => Script::Katakana,
0x30FD..=0x30FE => Script::Katakana,
0x30FF => Script::Katakana,
0x31F0..=0x31FF => Script::Katakana,
0x32D0..=0x32FE => Script::Katakana,
0x3300..=0x3357 => Script::Katakana,
0xFF66..=0xFF6F => Script::Katakana,
0xFF71..=0xFF9D => Script::Katakana,
0x1B000 => Script::Katakana,
0x02EA..=0x02EB => Script::Bopomofo,
0x3105..=0x312D => Script::Bopomofo,
0x31A0..=0x31BA => Script::Bopomofo,
0x2E80..=0x2E99 => Script::Han,
0x2E9B..=0x2EF3 => Script::Han,
0x2F00..=0x2FD5 => Script::Han,
0x3005 => Script::Han,
0x3007 => Script::Han,
0x3021..=0x3029 => Script::Han,
0x3038..=0x303A => Script::Han,
0x303B => Script::Han,
0x3400..=0x4DB5 => Script::Han,
0x4E00..=0x9FD5 => Script::Han,
0xF900..=0xFA6D => Script::Han,
0xFA70..=0xFAD9 => Script::Han,
0x20000..=0x2A6D6 => Script::Han,
0x2A700..=0x2B734 => Script::Han,
0x2B740..=0x2B81D => Script::Han,
0x2B820..=0x2CEA1 => Script::Han,
0x2F800..=0x2FA1D => Script::Han,
0xA000..=0xA014 => Script::Yi,
0xA015 => Script::Yi,
0xA016..=0xA48C => Script::Yi,
0xA490..=0xA4C6 => Script::Yi,
0x10300..=0x1031F => Script::OldItalic,
0x10320..=0x10323 => Script::OldItalic,
0x10330..=0x10340 => Script::Gothic,
0x10341 => Script::Gothic,
0x10342..=0x10349 => Script::Gothic,
0x1034A => Script::Gothic,
0x10400..=0x1044F => Script::Deseret,
0x0300..=0x036F => Script::Inherited,
0x0485..=0x0486 => Script::Inherited,
0x064B..=0x0655 => Script::Inherited,
0x0670 => Script::Inherited,
0x0951..=0x0952 => Script::Inherited,
0x1AB0..=0x1ABD => Script::Inherited,
0x1ABE => Script::Inherited,
0x1CD0..=0x1CD2 => Script::Inherited,
0x1CD4..=0x1CE0 => Script::Inherited,
0x1CE2..=0x1CE8 => Script::Inherited,
0x1CED => Script::Inherited,
0x1CF4 => Script::Inherited,
0x1CF8..=0x1CF9 => Script::Inherited,
0x1DC0..=0x1DF5 => Script::Inherited,
0x1DFB..=0x1DFF => Script::Inherited,
0x200C..=0x200D => Script::Inherited,
0x20D0..=0x20DC => Script::Inherited,
0x20DD..=0x20E0 => Script::Inherited,
0x20E1 => Script::Inherited,
0x20E2..=0x20E4 => Script::Inherited,
0x20E5..=0x20F0 => Script::Inherited,
0x302A..=0x302D => Script::Inherited,
0x3099..=0x309A => Script::Inherited,
0xFE00..=0xFE0F => Script::Inherited,
0xFE20..=0xFE2D => Script::Inherited,
0x101FD => Script::Inherited,
0x102E0 => Script::Inherited,
0x1D167..=0x1D169 => Script::Inherited,
0x1D17B..=0x1D182 => Script::Inherited,
0x1D185..=0x1D18B => Script::Inherited,
0x1D1AA..=0x1D1AD => Script::Inherited,
0xE0100..=0xE01EF => Script::Inherited,
0x1700..=0x170C => Script::Tagalog,
0x170E..=0x1711 => Script::Tagalog,
0x1712..=0x1714 => Script::Tagalog,
0x1720..=0x1731 => Script::Hanunoo,
0x1732..=0x1734 => Script::Hanunoo,
0x1740..=0x1751 => Script::Buhid,
0x1752..=0x1753 => Script::Buhid,
0x1760..=0x176C => Script::Tagbanwa,
0x176E..=0x1770 => Script::Tagbanwa,
0x1772..=0x1773 => Script::Tagbanwa,
0x1900..=0x191E => Script::Limbu,
0x1920..=0x1922 => Script::Limbu,
0x1923..=0x1926 => Script::Limbu,
0x1927..=0x1928 => Script::Limbu,
0x1929..=0x192B => Script::Limbu,
0x1930..=0x1931 => Script::Limbu,
0x1932 => Script::Limbu,
0x1933..=0x1938 => Script::Limbu,
0x1939..=0x193B => Script::Limbu,
0x1940 => Script::Limbu,
0x1944..=0x1945 => Script::Limbu,
0x1946..=0x194F => Script::Limbu,
0x1950..=0x196D => Script::TaiLe,
0x1970..=0x1974 => Script::TaiLe,
0x10000..=0x1000B => Script::LinearB,
0x1000D..=0x10026 => Script::LinearB,
0x10028..=0x1003A => Script::LinearB,
0x1003C..=0x1003D => Script::LinearB,
0x1003F..=0x1004D => Script::LinearB,
0x10050..=0x1005D => Script::LinearB,
0x10080..=0x100FA => Script::LinearB,
0x10380..=0x1039D => Script::Ugaritic,
0x1039F => Script::Ugaritic,
0x10450..=0x1047F => Script::Shavian,
0x10480..=0x1049D => Script::Osmanya,
0x104A0..=0x104A9 => Script::Osmanya,
0x10800..=0x10805 => Script::Cypriot,
0x10808 => Script::Cypriot,
0x1080A..=0x10835 => Script::Cypriot,
0x10837..=0x10838 => Script::Cypriot,
0x1083C => Script::Cypriot,
0x1083F => Script::Cypriot,
0x2800..=0x28FF => Script::Braille,
0x1A00..=0x1A16 => Script::Buginese,
0x1A17..=0x1A18 => Script::Buginese,
0x1A19..=0x1A1A => Script::Buginese,
0x1A1B => Script::Buginese,
0x1A1E..=0x1A1F => Script::Buginese,
0x03E2..=0x03EF => Script::Coptic,
0x2C80..=0x2CE4 => Script::Coptic,
0x2CE5..=0x2CEA => Script::Coptic,
0x2CEB..=0x2CEE => Script::Coptic,
0x2CEF..=0x2CF1 => Script::Coptic,
0x2CF2..=0x2CF3 => Script::Coptic,
0x2CF9..=0x2CFC => Script::Coptic,
0x2CFD => Script::Coptic,
0x2CFE..=0x2CFF => Script::Coptic,
0x1980..=0x19AB => Script::NewTaiLue,
0x19B0..=0x19C9 => Script::NewTaiLue,
0x19D0..=0x19D9 => Script::NewTaiLue,
0x19DA => Script::NewTaiLue,
0x19DE..=0x19DF => Script::NewTaiLue,
0x2C00..=0x2C2E => Script::Glagolitic,
0x2C30..=0x2C5E => Script::Glagolitic,
0x1E000..=0x1E006 => Script::Glagolitic,
0x1E008..=0x1E018 => Script::Glagolitic,
0x1E01B..=0x1E021 => Script::Glagolitic,
0x1E023..=0x1E024 => Script::Glagolitic,
0x1E026..=0x1E02A => Script::Glagolitic,
0x2D30..=0x2D67 => Script::Tifinagh,
0x2D6F => Script::Tifinagh,
0x2D70 => Script::Tifinagh,
0x2D7F => Script::Tifinagh,
0xA800..=0xA801 => Script::SylotiNagri,
0xA802 => Script::SylotiNagri,
0xA803..=0xA805 => Script::SylotiNagri,
0xA806 => Script::SylotiNagri,
0xA807..=0xA80A => Script::SylotiNagri,
0xA80B => Script::SylotiNagri,
0xA80C..=0xA822 => Script::SylotiNagri,
0xA823..=0xA824 => Script::SylotiNagri,
0xA825..=0xA826 => Script::SylotiNagri,
0xA827 => Script::SylotiNagri,
0xA828..=0xA82B => Script::SylotiNagri,
0x103A0..=0x103C3 => Script::OldPersian,
0x103C8..=0x103CF => Script::OldPersian,
0x103D0 => Script::OldPersian,
0x103D1..=0x103D5 => Script::OldPersian,
0x10A00 => Script::Kharoshthi,
0x10A01..=0x10A03 => Script::Kharoshthi,
0x10A05..=0x10A06 => Script::Kharoshthi,
0x10A0C..=0x10A0F => Script::Kharoshthi,
0x10A10..=0x10A13 => Script::Kharoshthi,
0x10A15..=0x10A17 => Script::Kharoshthi,
0x10A19..=0x10A33 => Script::Kharoshthi,
0x10A38..=0x10A3A => Script::Kharoshthi,
0x10A3F => Script::Kharoshthi,
0x10A40..=0x10A47 => Script::Kharoshthi,
0x10A50..=0x10A58 => Script::Kharoshthi,
0x1B00..=0x1B03 => Script::Balinese,
0x1B04 => Script::Balinese,
0x1B05..=0x1B33 => Script::Balinese,
0x1B34 => Script::Balinese,
0x1B35 => Script::Balinese,
0x1B36..=0x1B3A => Script::Balinese,
0x1B3B => Script::Balinese,
0x1B3C => Script::Balinese,
0x1B3D..=0x1B41 => Script::Balinese,
0x1B42 => Script::Balinese,
0x1B43..=0x1B44 => Script::Balinese,
0x1B45..=0x1B4B => Script::Balinese,
0x1B50..=0x1B59 => Script::Balinese,
0x1B5A..=0x1B60 => Script::Balinese,
0x1B61..=0x1B6A => Script::Balinese,
0x1B6B..=0x1B73 => Script::Balinese,
0x1B74..=0x1B7C => Script::Balinese,
0x12000..=0x12399 => Script::Cuneiform,
0x12400..=0x1246E => Script::Cuneiform,
0x12470..=0x12474 => Script::Cuneiform,
0x12480..=0x12543 => Script::Cuneiform,
0x10900..=0x10915 => Script::Phoenician,
0x10916..=0x1091B => Script::Phoenician,
0x1091F => Script::Phoenician,
0xA840..=0xA873 => Script::PhagsPa,
0xA874..=0xA877 => Script::PhagsPa,
0x07C0..=0x07C9 => Script::Nko,
0x07CA..=0x07EA => Script::Nko,
0x07EB..=0x07F3 => Script::Nko,
0x07F4..=0x07F5 => Script::Nko,
0x07F6 => Script::Nko,
0x07F7..=0x07F9 => Script::Nko,
0x07FA => Script::Nko,
0x1B80..=0x1B81 => Script::Sundanese,
0x1B82 => Script::Sundanese,
0x1B83..=0x1BA0 => Script::Sundanese,
0x1BA1 => Script::Sundanese,
0x1BA2..=0x1BA5 => Script::Sundanese,
0x1BA6..=0x1BA7 => Script::Sundanese,
0x1BA8..=0x1BA9 => Script::Sundanese,
0x1BAA => Script::Sundanese,
0x1BAB..=0x1BAD => Script::Sundanese,
0x1BAE..=0x1BAF => Script::Sundanese,
0x1BB0..=0x1BB9 => Script::Sundanese,
0x1BBA..=0x1BBF => Script::Sundanese,
0x1CC0..=0x1CC7 => Script::Sundanese,
0x1C00..=0x1C23 => Script::Lepcha,
0x1C24..=0x1C2B => Script::Lepcha,
0x1C2C..=0x1C33 => Script::Lepcha,
0x1C34..=0x1C35 => Script::Lepcha,
0x1C36..=0x1C37 => Script::Lepcha,
0x1C3B..=0x1C3F => Script::Lepcha,
0x1C40..=0x1C49 => Script::Lepcha,
0x1C4D..=0x1C4F => Script::Lepcha,
0x1C50..=0x1C59 => Script::OlChiki,
0x1C5A..=0x1C77 => Script::OlChiki,
0x1C78..=0x1C7D => Script::OlChiki,
0x1C7E..=0x1C7F => Script::OlChiki,
0xA500..=0xA60B => Script::Vai,
0xA60C => Script::Vai,
0xA60D..=0xA60F => Script::Vai,
0xA610..=0xA61F => Script::Vai,
0xA620..=0xA629 => Script::Vai,
0xA62A..=0xA62B => Script::Vai,
0xA880..=0xA881 => Script::Saurashtra,
0xA882..=0xA8B3 => Script::Saurashtra,
0xA8B4..=0xA8C3 => Script::Saurashtra,
0xA8C4..=0xA8C5 => Script::Saurashtra,
0xA8CE..=0xA8CF => Script::Saurashtra,
0xA8D0..=0xA8D9 => Script::Saurashtra,
0xA900..=0xA909 => Script::KayahLi,
0xA90A..=0xA925 => Script::KayahLi,
0xA926..=0xA92D => Script::KayahLi,
0xA92F => Script::KayahLi,
0xA930..=0xA946 => Script::Rejang,
0xA947..=0xA951 => Script::Rejang,
0xA952..=0xA953 => Script::Rejang,
0xA95F => Script::Rejang,
0x10280..=0x1029C => Script::Lycian,
0x102A0..=0x102D0 => Script::Carian,
0x10920..=0x10939 => Script::Lydian,
0x1093F => Script::Lydian,
0xAA00..=0xAA28 => Script::Cham,
0xAA29..=0xAA2E => Script::Cham,
0xAA2F..=0xAA30 => Script::Cham,
0xAA31..=0xAA32 => Script::Cham,
0xAA33..=0xAA34 => Script::Cham,
0xAA35..=0xAA36 => Script::Cham,
0xAA40..=0xAA42 => Script::Cham,
0xAA43 => Script::Cham,
0xAA44..=0xAA4B => Script::Cham,
0xAA4C => Script::Cham,
0xAA4D => Script::Cham,
0xAA50..=0xAA59 => Script::Cham,
0xAA5C..=0xAA5F => Script::Cham,
0x1A20..=0x1A54 => Script::TaiTham,
0x1A55 => Script::TaiTham,
0x1A56 => Script::TaiTham,
0x1A57 => Script::TaiTham,
0x1A58..=0x1A5E => Script::TaiTham,
0x1A60 => Script::TaiTham,
0x1A61 => Script::TaiTham,
0x1A62 => Script::TaiTham,
0x1A63..=0x1A64 => Script::TaiTham,
0x1A65..=0x1A6C => Script::TaiTham,
0x1A6D..=0x1A72 => Script::TaiTham,
0x1A73..=0x1A7C => Script::TaiTham,
0x1A7F => Script::TaiTham,
0x1A80..=0x1A89 => Script::TaiTham,
0x1A90..=0x1A99 => Script::TaiTham,
0x1AA0..=0x1AA6 => Script::TaiTham,
0x1AA7 => Script::TaiTham,
0x1AA8..=0x1AAD => Script::TaiTham,
0xAA80..=0xAAAF => Script::TaiViet,
0xAAB0 => Script::TaiViet,
0xAAB1 => Script::TaiViet,
0xAAB2..=0xAAB4 => Script::TaiViet,
0xAAB5..=0xAAB6 => Script::TaiViet,
0xAAB7..=0xAAB8 => Script::TaiViet,
0xAAB9..=0xAABD => Script::TaiViet,
0xAABE..=0xAABF => Script::TaiViet,
0xAAC0 => Script::TaiViet,
0xAAC1 => Script::TaiViet,
0xAAC2 => Script::TaiViet,
0xAADB..=0xAADC => Script::TaiViet,
0xAADD => Script::TaiViet,
0xAADE..=0xAADF => Script::TaiViet,
0x10B00..=0x10B35 => Script::Avestan,
0x10B39..=0x10B3F => Script::Avestan,
0x13000..=0x1342E => Script::EgyptianHieroglyphs,
0x0800..=0x0815 => Script::Samaritan,
0x0816..=0x0819 => Script::Samaritan,
0x081A => Script::Samaritan,
0x081B..=0x0823 => Script::Samaritan,
0x0824 => Script::Samaritan,
0x0825..=0x0827 => Script::Samaritan,
0x0828 => Script::Samaritan,
0x0829..=0x082D => Script::Samaritan,
0x0830..=0x083E => Script::Samaritan,
0xA4D0..=0xA4F7 => Script::Lisu,
0xA4F8..=0xA4FD => Script::Lisu,
0xA4FE..=0xA4FF => Script::Lisu,
0xA6A0..=0xA6E5 => Script::Bamum,
0xA6E6..=0xA6EF => Script::Bamum,
0xA6F0..=0xA6F1 => Script::Bamum,
0xA6F2..=0xA6F7 => Script::Bamum,
0x16800..=0x16A38 => Script::Bamum,
0xA980..=0xA982 => Script::Javanese,
0xA983 => Script::Javanese,
0xA984..=0xA9B2 => Script::Javanese,
0xA9B3 => Script::Javanese,
0xA9B4..=0xA9B5 => Script::Javanese,
0xA9B6..=0xA9B9 => Script::Javanese,
0xA9BA..=0xA9BB => Script::Javanese,
0xA9BC => Script::Javanese,
0xA9BD..=0xA9C0 => Script::Javanese,
0xA9C1..=0xA9CD => Script::Javanese,
0xA9D0..=0xA9D9 => Script::Javanese,
0xA9DE..=0xA9DF => Script::Javanese,
0xAAE0..=0xAAEA => Script::MeeteiMayek,
0xAAEB => Script::MeeteiMayek,
0xAAEC..=0xAAED => Script::MeeteiMayek,
0xAAEE..=0xAAEF => Script::MeeteiMayek,
0xAAF0..=0xAAF1 => Script::MeeteiMayek,
0xAAF2 => Script::MeeteiMayek,
0xAAF3..=0xAAF4 => Script::MeeteiMayek,
0xAAF5 => Script::MeeteiMayek,
0xAAF6 => Script::MeeteiMayek,
0xABC0..=0xABE2 => Script::MeeteiMayek,
0xABE3..=0xABE4 => Script::MeeteiMayek,
0xABE5 => Script::MeeteiMayek,
0xABE6..=0xABE7 => Script::MeeteiMayek,
0xABE8 => Script::MeeteiMayek,
0xABE9..=0xABEA => Script::MeeteiMayek,
0xABEB => Script::MeeteiMayek,
0xABEC => Script::MeeteiMayek,
0xABED => Script::MeeteiMayek,
0xABF0..=0xABF9 => Script::MeeteiMayek,
0x10840..=0x10855 => Script::ImperialAramaic,
0x10857 => Script::ImperialAramaic,
0x10858..=0x1085F => Script::ImperialAramaic,
0x10A60..=0x10A7C => Script::OldSouthArabian,
0x10A7D..=0x10A7E => Script::OldSouthArabian,
0x10A7F => Script::OldSouthArabian,
0x10B40..=0x10B55 => Script::InscriptionalParthian,
0x10B58..=0x10B5F => Script::InscriptionalParthian,
0x10B60..=0x10B72 => Script::InscriptionalPahlavi,
0x10B78..=0x10B7F => Script::InscriptionalPahlavi,
0x10C00..=0x10C48 => Script::OldTurkic,
0x11080..=0x11081 => Script::Kaithi,
0x11082 => Script::Kaithi,
0x11083..=0x110AF => Script::Kaithi,
0x110B0..=0x110B2 => Script::Kaithi,
0x110B3..=0x110B6 => Script::Kaithi,
0x110B7..=0x110B8 => Script::Kaithi,
0x110B9..=0x110BA => Script::Kaithi,
0x110BB..=0x110BC => Script::Kaithi,
0x110BD => Script::Kaithi,
0x110BE..=0x110C1 => Script::Kaithi,
0x1BC0..=0x1BE5 => Script::Batak,
0x1BE6 => Script::Batak,
0x1BE7 => Script::Batak,
0x1BE8..=0x1BE9 => Script::Batak,
0x1BEA..=0x1BEC => Script::Batak,
0x1BED => Script::Batak,
0x1BEE => Script::Batak,
0x1BEF..=0x1BF1 => Script::Batak,
0x1BF2..=0x1BF3 => Script::Batak,
0x1BFC..=0x1BFF => Script::Batak,
0x11000 => Script::Brahmi,
0x11001 => Script::Brahmi,
0x11002 => Script::Brahmi,
0x11003..=0x11037 => Script::Brahmi,
0x11038..=0x11046 => Script::Brahmi,
0x11047..=0x1104D => Script::Brahmi,
0x11052..=0x11065 => Script::Brahmi,
0x11066..=0x1106F => Script::Brahmi,
0x1107F => Script::Brahmi,
0x0840..=0x0858 => Script::Mandaic,
0x0859..=0x085B => Script::Mandaic,
0x085E => Script::Mandaic,
0x11100..=0x11102 => Script::Chakma,
0x11103..=0x11126 => Script::Chakma,
0x11127..=0x1112B => Script::Chakma,
0x1112C => Script::Chakma,
0x1112D..=0x11134 => Script::Chakma,
0x11136..=0x1113F => Script::Chakma,
0x11140..=0x11143 => Script::Chakma,
0x109A0..=0x109B7 => Script::MeroiticCursive,
0x109BC..=0x109BD => Script::MeroiticCursive,
0x109BE..=0x109BF => Script::MeroiticCursive,
0x109C0..=0x109CF => Script::MeroiticCursive,
0x109D2..=0x109FF => Script::MeroiticCursive,
0x10980..=0x1099F => Script::MeroiticHieroglyphs,
0x16F00..=0x16F44 => Script::Miao,
0x16F50 => Script::Miao,
0x16F51..=0x16F7E => Script::Miao,
0x16F8F..=0x16F92 => Script::Miao,
0x16F93..=0x16F9F => Script::Miao,
0x11180..=0x11181 => Script::Sharada,
0x11182 => Script::Sharada,
0x11183..=0x111B2 => Script::Sharada,
0x111B3..=0x111B5 => Script::Sharada,
0x111B6..=0x111BE => Script::Sharada,
0x111BF..=0x111C0 => Script::Sharada,
0x111C1..=0x111C4 => Script::Sharada,
0x111C5..=0x111C9 => Script::Sharada,
0x111CA..=0x111CC => Script::Sharada,
0x111CD => Script::Sharada,
0x111D0..=0x111D9 => Script::Sharada,
0x111DA => Script::Sharada,
0x111DB => Script::Sharada,
0x111DC => Script::Sharada,
0x111DD..=0x111DF => Script::Sharada,
0x110D0..=0x110E8 => Script::SoraSompeng,
0x110F0..=0x110F9 => Script::SoraSompeng,
0x11680..=0x116AA => Script::Takri,
0x116AB => Script::Takri,
0x116AC => Script::Takri,
0x116AD => Script::Takri,
0x116AE..=0x116AF => Script::Takri,
0x116B0..=0x116B5 => Script::Takri,
0x116B6 => Script::Takri,
0x116B7 => Script::Takri,
0x116C0..=0x116C9 => Script::Takri,
0x10530..=0x10563 => Script::CaucasianAlbanian,
0x1056F => Script::CaucasianAlbanian,
0x16AD0..=0x16AED => Script::BassaVah,
0x16AF0..=0x16AF4 => Script::BassaVah,
0x16AF5 => Script::BassaVah,
0x1BC00..=0x1BC6A => Script::Duployan,
0x1BC70..=0x1BC7C => Script::Duployan,
0x1BC80..=0x1BC88 => Script::Duployan,
0x1BC90..=0x1BC99 => Script::Duployan,
0x1BC9C => Script::Duployan,
0x1BC9D..=0x1BC9E => Script::Duployan,
0x1BC9F => Script::Duployan,
0x10500..=0x10527 => Script::Elbasan,
0x11300..=0x11301 => Script::Grantha,
0x11302..=0x11303 => Script::Grantha,
0x11305..=0x1130C => Script::Grantha,
0x1130F..=0x11310 => Script::Grantha,
0x11313..=0x11328 => Script::Grantha,
0x1132A..=0x11330 => Script::Grantha,
0x11332..=0x11333 => Script::Grantha,
0x11335..=0x11339 => Script::Grantha,
0x1133C => Script::Grantha,
0x1133D => Script::Grantha,
0x1133E..=0x1133F => Script::Grantha,
0x11340 => Script::Grantha,
0x11341..=0x11344 => Script::Grantha,
0x11347..=0x11348 => Script::Grantha,
0x1134B..=0x1134D => Script::Grantha,
0x11350 => Script::Grantha,
0x11357 => Script::Grantha,
0x1135D..=0x11361 => Script::Grantha,
0x11362..=0x11363 => Script::Grantha,
0x11366..=0x1136C => Script::Grantha,
0x11370..=0x11374 => Script::Grantha,
0x16B00..=0x16B2F => Script::PahawhHmong,
0x16B30..=0x16B36 => Script::PahawhHmong,
0x16B37..=0x16B3B => Script::PahawhHmong,
0x16B3C..=0x16B3F => Script::PahawhHmong,
0x16B40..=0x16B43 => Script::PahawhHmong,
0x16B44 => Script::PahawhHmong,
0x16B45 => Script::PahawhHmong,
0x16B50..=0x16B59 => Script::PahawhHmong,
0x16B5B..=0x16B61 => Script::PahawhHmong,
0x16B63..=0x16B77 => Script::PahawhHmong,
0x16B7D..=0x16B8F => Script::PahawhHmong,
0x11200..=0x11211 => Script::Khojki,
0x11213..=0x1122B => Script::Khojki,
0x1122C..=0x1122E => Script::Khojki,
0x1122F..=0x11231 => Script::Khojki,
0x11232..=0x11233 => Script::Khojki,
0x11234 => Script::Khojki,
0x11235 => Script::Khojki,
0x11236..=0x11237 => Script::Khojki,
0x11238..=0x1123D => Script::Khojki,
0x1123E => Script::Khojki,
0x10600..=0x10736 => Script::LinearA,
0x10740..=0x10755 => Script::LinearA,
0x10760..=0x10767 => Script::LinearA,
0x11150..=0x11172 => Script::Mahajani,
0x11173 => Script::Mahajani,
0x11174..=0x11175 => Script::Mahajani,
0x11176 => Script::Mahajani,
0x10AC0..=0x10AC7 => Script::Manichaean,
0x10AC8 => Script::Manichaean,
0x10AC9..=0x10AE4 => Script::Manichaean,
0x10AE5..=0x10AE6 => Script::Manichaean,
0x10AEB..=0x10AEF => Script::Manichaean,
0x10AF0..=0x10AF6 => Script::Manichaean,
0x1E800..=0x1E8C4 => Script::MendeKikakui,
0x1E8C7..=0x1E8CF => Script::MendeKikakui,
0x1E8D0..=0x1E8D6 => Script::MendeKikakui,
0x11600..=0x1162F => Script::Modi,
0x11630..=0x11632 => Script::Modi,
0x11633..=0x1163A => Script::Modi,
0x1163B..=0x1163C => Script::Modi,
0x1163D => Script::Modi,
0x1163E => Script::Modi,
0x1163F..=0x11640 => Script::Modi,
0x11641..=0x11643 => Script::Modi,
0x11644 => Script::Modi,
0x11650..=0x11659 => Script::Modi,
0x16A40..=0x16A5E => Script::Mro,
0x16A60..=0x16A69 => Script::Mro,
0x16A6E..=0x16A6F => Script::Mro,
0x10A80..=0x10A9C => Script::OldNorthArabian,
0x10A9D..=0x10A9F => Script::OldNorthArabian,
0x10880..=0x1089E => Script::Nabataean,
0x108A7..=0x108AF => Script::Nabataean,
0x10860..=0x10876 => Script::Palmyrene,
0x10877..=0x10878 => Script::Palmyrene,
0x10879..=0x1087F => Script::Palmyrene,
0x11AC0..=0x11AF8 => Script::PauCinHau,
0x10350..=0x10375 => Script::OldPermic,
0x10376..=0x1037A => Script::OldPermic,
0x10B80..=0x10B91 => Script::PsalterPahlavi,
0x10B99..=0x10B9C => Script::PsalterPahlavi,
0x10BA9..=0x10BAF => Script::PsalterPahlavi,
0x11580..=0x115AE => Script::Siddham,
0x115AF..=0x115B1 => Script::Siddham,
0x115B2..=0x115B5 => Script::Siddham,
0x115B8..=0x115BB => Script::Siddham,
0x115BC..=0x115BD => Script::Siddham,
0x115BE => Script::Siddham,
0x115BF..=0x115C0 => Script::Siddham,
0x115C1..=0x115D7 => Script::Siddham,
0x115D8..=0x115DB => Script::Siddham,
0x115DC..=0x115DD => Script::Siddham,
0x112B0..=0x112DE => Script::Khudawadi,
0x112DF => Script::Khudawadi,
0x112E0..=0x112E2 => Script::Khudawadi,
0x112E3..=0x112EA => Script::Khudawadi,
0x112F0..=0x112F9 => Script::Khudawadi,
0x11480..=0x114AF => Script::Tirhuta,
0x114B0..=0x114B2 => Script::Tirhuta,
0x114B3..=0x114B8 => Script::Tirhuta,
0x114B9 => Script::Tirhuta,
0x114BA => Script::Tirhuta,
0x114BB..=0x114BE => Script::Tirhuta,
0x114BF..=0x114C0 => Script::Tirhuta,
0x114C1 => Script::Tirhuta,
0x114C2..=0x114C3 => Script::Tirhuta,
0x114C4..=0x114C5 => Script::Tirhuta,
0x114C6 => Script::Tirhuta,
0x114C7 => Script::Tirhuta,
0x114D0..=0x114D9 => Script::Tirhuta,
0x118A0..=0x118DF => Script::WarangCiti,
0x118E0..=0x118E9 => Script::WarangCiti,
0x118EA..=0x118F2 => Script::WarangCiti,
0x118FF => Script::WarangCiti,
0x11700..=0x11719 => Script::Ahom,
0x1171D..=0x1171F => Script::Ahom,
0x11720..=0x11721 => Script::Ahom,
0x11722..=0x11725 => Script::Ahom,
0x11726 => Script::Ahom,
0x11727..=0x1172B => Script::Ahom,
0x11730..=0x11739 => Script::Ahom,
0x1173A..=0x1173B => Script::Ahom,
0x1173C..=0x1173E => Script::Ahom,
0x1173F => Script::Ahom,
0x14400..=0x14646 => Script::AnatolianHieroglyphs,
0x108E0..=0x108F2 => Script::Hatran,
0x108F4..=0x108F5 => Script::Hatran,
0x108FB..=0x108FF => Script::Hatran,
0x11280..=0x11286 => Script::Multani,
0x11288 => Script::Multani,
0x1128A..=0x1128D => Script::Multani,
0x1128F..=0x1129D => Script::Multani,
0x1129F..=0x112A8 => Script::Multani,
0x112A9 => Script::Multani,
0x10C80..=0x10CB2 => Script::OldHungarian,
0x10CC0..=0x10CF2 => Script::OldHungarian,
0x10CFA..=0x10CFF => Script::OldHungarian,
0x1D800..=0x1D9FF => Script::SignWriting,
0x1DA00..=0x1DA36 => Script::SignWriting,
0x1DA37..=0x1DA3A => Script::SignWriting,
0x1DA3B..=0x1DA6C => Script::SignWriting,
0x1DA6D..=0x1DA74 => Script::SignWriting,
0x1DA75 => Script::SignWriting,
0x1DA76..=0x1DA83 => Script::SignWriting,
0x1DA84 => Script::SignWriting,
0x1DA85..=0x1DA86 => Script::SignWriting,
0x1DA87..=0x1DA8B => Script::SignWriting,
0x1DA9B..=0x1DA9F => Script::SignWriting,
0x1DAA1..=0x1DAAF => Script::SignWriting,
0x1E900..=0x1E943 => Script::Adlam,
0x1E944..=0x1E94A => Script::Adlam,
0x1E950..=0x1E959 => Script::Adlam,
0x1E95E..=0x1E95F => Script::Adlam,
0x11C00..=0x11C08 => Script::Bhaiksuki,
0x11C0A..=0x11C2E => Script::Bhaiksuki,
0x11C2F => Script::Bhaiksuki,
0x11C30..=0x11C36 => Script::Bhaiksuki,
0x11C38..=0x11C3D => Script::Bhaiksuki,
0x11C3E => Script::Bhaiksuki,
0x11C3F => Script::Bhaiksuki,
0x11C40 => Script::Bhaiksuki,
0x11C41..=0x11C45 => Script::Bhaiksuki,
0x11C50..=0x11C59 => Script::Bhaiksuki,
0x11C5A..=0x11C6C => Script::Bhaiksuki,
0x11C70..=0x11C71 => Script::Marchen,
0x11C72..=0x11C8F => Script::Marchen,
0x11C92..=0x11CA7 => Script::Marchen,
0x11CA9 => Script::Marchen,
0x11CAA..=0x11CB0 => Script::Marchen,
0x11CB1 => Script::Marchen,
0x11CB2..=0x11CB3 => Script::Marchen,
0x11CB4 => Script::Marchen,
0x11CB5..=0x11CB6 => Script::Marchen,
0x11400..=0x11434 => Script::Newa,
0x11435..=0x11437 => Script::Newa,
0x11438..=0x1143F => Script::Newa,
0x11440..=0x11441 => Script::Newa,
0x11442..=0x11444 => Script::Newa,
0x11445 => Script::Newa,
0x11446 => Script::Newa,
0x11447..=0x1144A => Script::Newa,
0x1144B..=0x1144F => Script::Newa,
0x11450..=0x11459 => Script::Newa,
0x1145B => Script::Newa,
0x1145D => Script::Newa,
0x104B0..=0x104D3 => Script::Osage,
0x104D8..=0x104FB => Script::Osage,
0x16FE0 => Script::Tangut,
0x17000..=0x187EC => Script::Tangut,
0x18800..=0x18AF2 => Script::Tangut,
_ => Script::Any,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_unicode_script() {
assert_eq!(Script::Han, get_script('京'));
assert_eq!(Script::Han, get_script('太'));
assert_eq!(Script::Hiragana, get_script('い'));
assert_eq!(Script::Katakana, get_script('グ'));
assert_eq!(Script::Common, get_script('ー'));
assert_eq!(Script::Latin, get_script('a'));
assert_eq!(Script::Latin, get_script('A'));
assert_eq!(Script::Common, get_script('0'));
assert_eq!(Script::Common, get_script('$'));
assert_eq!(Script::Common, get_script('@'));
assert_eq!(Script::Common, get_script('-'));
assert_eq!(Script::Common, get_script(' '));
assert_eq!(Script::Common, get_script('�'));
}
}
| tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/scripts.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/scripts.rs",
"repo_id": "tokenizers",
"token_count": 46440
} |
use crate::Result;
use hf_hub::{api::sync::ApiBuilder, Repo, RepoType};
use std::collections::HashMap;
use std::path::PathBuf;
/// Defines the additional parameters available for the `from_pretrained` function
#[derive(Debug, Clone)]
pub struct FromPretrainedParameters {
pub revision: String,
pub user_agent: HashMap<String, String>,
pub token: Option<String>,
}
impl Default for FromPretrainedParameters {
fn default() -> Self {
Self {
revision: "main".into(),
user_agent: HashMap::new(),
token: None,
}
}
}
/// Downloads and cache the identified tokenizer if it exists on
/// the Hugging Face Hub, and returns a local path to the file
pub fn from_pretrained<S: AsRef<str>>(
identifier: S,
params: Option<FromPretrainedParameters>,
) -> Result<PathBuf> {
let identifier: String = identifier.as_ref().to_string();
let valid_chars = ['-', '_', '.', '/'];
let is_valid_char = |x: char| x.is_alphanumeric() || valid_chars.contains(&x);
let valid = identifier.chars().all(is_valid_char);
let valid_chars_stringified = valid_chars
.iter()
.fold(vec![], |mut buf, x| {
buf.push(format!("'{}'", x));
buf
})
.join(", "); // "'/', '-', '_', '.'"
if !valid {
return Err(format!(
"Model \"{}\" contains invalid characters, expected only alphanumeric or {valid_chars_stringified}",
identifier
)
.into());
}
let params = params.unwrap_or_default();
let revision = ¶ms.revision;
let valid_revision = revision.chars().all(is_valid_char);
if !valid_revision {
return Err(format!(
"Revision \"{}\" contains invalid characters, expected only alphanumeric or {valid_chars_stringified}",
revision
)
.into());
}
let mut builder = ApiBuilder::new();
if let Some(token) = params.token {
builder = builder.with_token(Some(token));
}
let api = builder.build()?;
let repo = Repo::with_revision(identifier, RepoType::Model, params.revision);
let api = api.repo(repo);
Ok(api.get("tokenizer.json")?)
}
| tokenizers/tokenizers/src/utils/from_pretrained.rs/0 | {
"file_path": "tokenizers/tokenizers/src/utils/from_pretrained.rs",
"repo_id": "tokenizers",
"token_count": 906
} |
#[cfg(not(debug_assertions))]
use assert_approx_eq::assert_approx_eq;
use std::collections::HashMap;
use std::fs::read_to_string;
use std::path::Path;
#[cfg(not(debug_assertions))]
use tokenizers::models::unigram::Lattice;
use tokenizers::models::unigram::Unigram;
use tokenizers::models::unigram::UnigramTrainer;
use tokenizers::tokenizer::Model;
#[test]
fn test_unigram_from_file() {
let model = Unigram::load(Path::new("data/unigram.json")).unwrap();
let string = "吾輩《わがはい》は猫である。名前はまだ無い。";
assert_eq!(
model
.tokenize(string)
.unwrap()
.iter()
.map(|tok| tok.value.clone())
.collect::<Vec<_>>(),
vec![
"吾輩",
"《",
"わが",
"はい",
"》",
"は",
"猫",
"である",
"。",
"名前",
"はまだ",
"無い",
"。"
]
);
}
#[test]
fn test_train_unigram_from_file() {
let content = read_to_string("data/small.txt").unwrap();
let mut word_counts = HashMap::new();
content.split_whitespace().for_each(|word| {
// This is important for the test of char vs u8
let word = format!("▁{word}");
*word_counts.entry(word).or_insert(0) += 1;
});
// println!("Words counts {:?}", word_counts);
let trainer = UnigramTrainer::builder()
.show_progress(false)
.unk_token(Some("<UNK>".into()))
.build()
.unwrap();
let mut model = Unigram::default();
let sentences: Vec<_> = word_counts
.iter()
.map(|(s, i)| (s.to_owned(), *i))
.collect();
trainer.do_train(sentences, &mut model).unwrap();
assert_eq!(model.get_vocab_size(), 719);
}
#[cfg(not(debug_assertions))]
#[test]
fn test_sample() {
let mut lattice = Lattice::from("ABC", 0, 2);
lattice.insert(0, 1, 1.0, 3); // A
lattice.insert(1, 1, 1.2, 4); // B
lattice.insert(2, 1, 1.5, 5); // C
lattice.insert(0, 2, 1.6, 6); // AB
lattice.insert(1, 2, 1.7, 7); // BC
lattice.insert(0, 3, 1.8, 8); // ABC
let thetas: Vec<f64> = vec![0.0, 0.01, 0.5, 0.7, 1.0];
for theta in thetas {
let mut probs: HashMap<String, f64> = HashMap::new();
probs.insert("A B C".to_string(), (theta * (1.0 + 1.2 + 1.5)).exp());
probs.insert("AB C".to_string(), (theta * (1.6 + 1.5)).exp());
probs.insert("A BC".to_string(), (theta * (1.0 + 1.7)).exp());
probs.insert("ABC".to_string(), (theta * (1.8)).exp());
// Computes expected probabilities.
let mut z = 0.0;
for (_, p) in probs.iter() {
z += p;
}
for (_, p) in probs.iter_mut() {
*p /= z;
}
let n_trials = 10_000;
let mut freq: HashMap<String, u32> = HashMap::new();
for _ in 0..n_trials {
let string = lattice.sample_token(theta).join(" ");
*freq.entry(string).or_insert(0) += 1;
}
assert_eq!(freq.len(), probs.len());
for (s, p) in probs.iter() {
assert_approx_eq!(1.0 * (freq[s] as f64) / (n_trials as f64), p, 0.03)
}
}
}
| tokenizers/tokenizers/tests/unigram.rs/0 | {
"file_path": "tokenizers/tokenizers/tests/unigram.rs",
"repo_id": "tokenizers",
"token_count": 1697
} |
# Running models on WebGPU
WebGPU is a new web standard for accelerated graphics and compute. The [API](https://developer.mozilla.org/en-US/docs/Web/API/WebGPU_API) enables web developers to use the underlying system's GPU to carry out high-performance computations directly in the browser. WebGPU is the successor to [WebGL](https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API) and provides significantly better performance, because it allows for more direct interaction with modern GPUs. Lastly, it supports general-purpose GPU computations, which makes it just perfect for machine learning!
> [!WARNING]
> As of October 2024, global WebGPU support is around 70% (according to [caniuse.com](https://caniuse.com/webgpu)), meaning some users may not be able to use the API.
>
> If the following demos do not work in your browser, you may need to enable it using a feature flag:
>
> - Firefox: with the `dom.webgpu.enabled` flag (see [here](https://developer.mozilla.org/en-US/docs/Mozilla/Firefox/Experimental_features#:~:text=tested%20by%20Firefox.-,WebGPU%20API,-The%20WebGPU%20API)).
> - Safari: with the `WebGPU` feature flag (see [here](https://webkit.org/blog/14879/webgpu-now-available-for-testing-in-safari-technology-preview/)).
> - Older Chromium browsers (on Windows, macOS, Linux): with the `enable-unsafe-webgpu` flag (see [here](https://developer.chrome.com/docs/web-platform/webgpu/troubleshooting-tips)).
## Usage in Transformers.js v3
Thanks to our collaboration with [ONNX Runtime Web](https://www.npmjs.com/package/onnxruntime-web), enabling WebGPU acceleration is as simple as setting `device: 'webgpu'` when loading a model. Let's see some examples!
**Example:** Compute text embeddings on WebGPU ([demo](https://v2.scrimba.com/s06a2smeej))
```js
import { pipeline } from "@huggingface/transformers";
// Create a feature-extraction pipeline
const extractor = await pipeline(
"feature-extraction",
"mixedbread-ai/mxbai-embed-xsmall-v1",
{ device: "webgpu" },
);
// Compute embeddings
const texts = ["Hello world!", "This is an example sentence."];
const embeddings = await extractor(texts, { pooling: "mean", normalize: true });
console.log(embeddings.tolist());
// [
// [-0.016986183822155, 0.03228696808218956, -0.0013630966423079371, ... ],
// [0.09050482511520386, 0.07207386940717697, 0.05762749910354614, ... ],
// ]
```
**Example:** Perform automatic speech recognition with OpenAI whisper on WebGPU ([demo](https://v2.scrimba.com/s0oi76h82g))
```js
import { pipeline } from "@huggingface/transformers";
// Create automatic speech recognition pipeline
const transcriber = await pipeline(
"automatic-speech-recognition",
"onnx-community/whisper-tiny.en",
{ device: "webgpu" },
);
// Transcribe audio from a URL
const url = "https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav";
const output = await transcriber(url);
console.log(output);
// { text: ' And so my fellow Americans ask not what your country can do for you, ask what you can do for your country.' }
```
**Example:** Perform image classification with MobileNetV4 on WebGPU ([demo](https://v2.scrimba.com/s0fv2uab1t))
```js
import { pipeline } from "@huggingface/transformers";
// Create image classification pipeline
const classifier = await pipeline(
"image-classification",
"onnx-community/mobilenetv4_conv_small.e2400_r224_in1k",
{ device: "webgpu" },
);
// Classify an image from a URL
const url = "https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/tiger.jpg";
const output = await classifier(url);
console.log(output);
// [
// { label: 'tiger, Panthera tigris', score: 0.6149784922599792 },
// { label: 'tiger cat', score: 0.30281734466552734 },
// { label: 'tabby, tabby cat', score: 0.0019135422771796584 },
// { label: 'lynx, catamount', score: 0.0012161266058683395 },
// { label: 'Egyptian cat', score: 0.0011465961579233408 }
// ]
```
## Reporting bugs and providing feedback
Due to the experimental nature of WebGPU, especially in non-Chromium browsers, you may experience issues when trying to run a model (even it it can run in WASM). If you do, please open [an issue on GitHub](https://github.com/huggingface/transformers.js/issues/new?title=[WebGPU]%20Error%20running%20MODEL_GOES_HERE&assignees=&labels=bug,webgpu&projects=&template=1_bug-report.yml) and we'll do our best to address it. Thanks!
| transformers.js/docs/source/guides/webgpu.md/0 | {
"file_path": "transformers.js/docs/source/guides/webgpu.md",
"repo_id": "transformers.js",
"token_count": 1452
} |
function formatBytes(bytes, decimals = 0) {
const sizes = ["Bytes", "KB", "MB", "GB", "TB"];
if (bytes === 0) return "0 Bytes";
const i = parseInt(Math.floor(Math.log(bytes) / Math.log(1000)), 10);
const rounded = (bytes / Math.pow(1000, i)).toFixed(decimals);
return rounded + " " + sizes[i];
}
export default function Progress({ data }) {
const progress = data.progress ?? 0;
const text = data.file;
const a = formatBytes(data.loaded);
const b = formatBytes(data.total);
return (
<div className="progress-container">
<div className='progress-bar' style={{ 'width': `${progress}%` }}>{text} ({`${a} / ${b}`})</div>
</div>
);
} | transformers.js/examples/code-completion/src/components/Progress.jsx/0 | {
"file_path": "transformers.js/examples/code-completion/src/components/Progress.jsx",
"repo_id": "transformers.js",
"token_count": 278
} |
import { env, AutoTokenizer, AutoModelForSequenceClassification } from '@xenova/transformers';
// Skip local model check since we are downloading the model from the Hugging Face Hub.
env.allowLocalModels = false;
class CrossEncoderSingleton {
static model_id = 'mixedbread-ai/mxbai-rerank-xsmall-v1';
static model = null;
static tokenizer = null;
static async getInstance(progress_callback) {
if (!this.tokenizer) {
this.tokenizer = AutoTokenizer.from_pretrained(this.model_id);
}
if (!this.model) {
this.model = AutoModelForSequenceClassification.from_pretrained(this.model_id, {
quantized: true,
progress_callback,
});
}
return Promise.all([this.tokenizer, this.model]);
}
}
// Listen for messages from the main thread
self.addEventListener('message', async (event) => {
// Retrieve the pipeline. When called for the first time,
// this will load the pipeline and save it for future use.
const [tokenizer, model] = await CrossEncoderSingleton.getInstance(x => {
// We also add a progress callback to the pipeline so that we can
// track model loading.
self.postMessage(x);
});
const { query, documents } = event.data;
const docs = documents.trim().split('\n');
const inputs = tokenizer(
new Array(docs.length).fill(query),
{
text_pair: docs,
padding: true,
truncation: true,
}
)
const { logits } = await model(inputs);
const output = logits
.sigmoid()
.tolist()
.map(([score], i) => ({
corpus_id: i,
score,
text: docs[i],
}))
.sort((a, b) => b.score - a.score);
// Send the output back to the main thread
self.postMessage({ status: 'complete', output });
});
| transformers.js/examples/cross-encoder/src/worker.js/0 | {
"file_path": "transformers.js/examples/cross-encoder/src/worker.js",
"repo_id": "transformers.js",
"token_count": 786
} |
// Adapted from https://www.npmjs.com/package/audiobuffer-to-wav
export function encodeWAV(samples, sampleRate = 16000) {
let offset = 44;
const buffer = new ArrayBuffer(offset + samples.length * 4);
const view = new DataView(buffer);
/* RIFF identifier */
writeString(view, 0, 'RIFF')
/* RIFF chunk length */
view.setUint32(4, 36 + samples.length * 4, true)
/* RIFF type */
writeString(view, 8, 'WAVE')
/* format chunk identifier */
writeString(view, 12, 'fmt ')
/* format chunk length */
view.setUint32(16, 16, true)
/* sample format (raw) */
view.setUint16(20, 3, true)
/* channel count */
view.setUint16(22, 1, true)
/* sample rate */
view.setUint32(24, sampleRate, true)
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true)
/* block align (channel count * bytes per sample) */
view.setUint16(32, 4, true)
/* bits per sample */
view.setUint16(34, 32, true)
/* data chunk identifier */
writeString(view, 36, 'data')
/* data chunk length */
view.setUint32(40, samples.length * 4, true)
for (let i = 0; i < samples.length; ++i, offset += 4) {
view.setFloat32(offset, samples[i], true)
}
return buffer
}
function writeString(view, offset, string) {
for (let i = 0; i < string.length; ++i) {
view.setUint8(offset + i, string.charCodeAt(i))
}
}
export async function share(body, settings) {
const response = await fetch('https://huggingface.co/uploads', { method: 'POST', body });
if (!response.ok) throw new Error(`Failed to upload audio: ${response.statusText}`);
const url = await response.text();
const params = new URLSearchParams({
title: `🎵 ${settings.prompt}`,
description: `<audio controls src="${url}"></audio>\n${JSON.stringify(settings, null, 2)}`,
});
const shareURL = `https://huggingface.co/spaces/Xenova/musicgen-web/discussions/new?${params.toString()}`;
window.open(shareURL, '_blank');
} | transformers.js/examples/musicgen-web/src/utils.js/0 | {
"file_path": "transformers.js/examples/musicgen-web/src/utils.js",
"repo_id": "transformers.js",
"token_count": 787
} |
Subsets and Splits